diff --git a/OracleAccessManagement/kubernetes/3.0.1/ingress-per-domain/README.md b/OracleAccessManagement/kubernetes/3.0.1/ingress-per-domain/README.md deleted file mode 100755 index eb761e87e..000000000 --- a/OracleAccessManagement/kubernetes/3.0.1/ingress-per-domain/README.md +++ /dev/null @@ -1,73 +0,0 @@ -# An Ingress per domain chart -This chart is for deploying an Ingress resource in front of a WebLogic domain cluster. We support two Ingress types: Traeafik and Voyager. - -## Prerequisites -- Have Docker and a Kubernetes cluster running and have `kubectl` installed and configured. -- Have Helm installed. -- The corresponding Ingress controller, Traefik or Voyager, is installed in the Kubernetes cluster. -- A WebLogic domain cluster deployed by `weblogic-operator` is running in the Kubernetes cluster. - -## Installing the chart - -To install the chart with the release name, `my-ingress`, with the given `values.yaml`: -``` -# Change directory to the cloned git weblogic-kubernetes-operator repo. -$ cd kubernetes/samples/charts - -# Use helm to install the chart. Use `--namespace` to specify the name of the WebLogic domain's namespace. -$ helm install ingress-per-domain --name my-ingress --namespace my-domain-namespace --values values.yaml -``` -The Ingress resource will be created in the same namespace as the WebLogic domain cluster. - -Sample `values.yaml` for the Traefik Ingress: -``` -type: TRAEFIK - -# WLS domain as backend to the load balancer -wlsDomain: - domainUID: domain1 - clusterName: cluster1 - managedServerPort: 8001 - -# Traefik specific values -traefik: - # hostname used by host-routing - hostname: domain1.org -``` - -Sample `values.yaml` for the Voyager Ingress: -``` -type: VOYAGER - -# WLS domain as backend to the load balancer -wlsDomain: - domainUID: domain1 - clusterName: cluster1 - managedServerPort: 8001 - -# Voyager specific values -voyager: - # web port - webPort: 30305 - # stats port - statsPort: 30315 -``` -## Uninstalling the chart -To uninstall and delete the `my-ingress` deployment: -``` -$ helm delete --purge my-ingress -``` -## Configuration -The following table lists the configurable parameters of this chart and their default values. - -| Parameter | Description | Default | -| --- | --- | --- | -| `type` | Type of Ingress controller. Legal values are `TRAEFIK` or `VOYAGER`. | `TRAEFIK` | -| `wlsDomain.domainUID` | DomainUID of the WLS domain. | `domain1` | -| `wlsDomain.clusterName` | Cluster name in the WLS domain. | `cluster-1` | -| `wlsDomain.managedServerPort` | Port number of the managed servers in the WLS domain cluster. | `8001` | -| `traefik.hostname` | Hostname to route to the WLS domain cluster. | `domain1.org` | -| `voyager.webPort` | Web port to access the Voyager load balancer. | `30305` | -| `voyager.statsPort` | Port to access the Voyager/HAProxy stats page. | `30315` | - -**Note:** The input values `domainUID` and `clusterName` will be used to generate the Kubernetes `serviceName` of the WLS cluster with the format `domainUID-cluster-clusterName`. diff --git a/OracleAccessManagement/kubernetes/3.0.1/ingress-per-domain/nginx-ingress.yaml b/OracleAccessManagement/kubernetes/3.0.1/ingress-per-domain/nginx-ingress.yaml deleted file mode 100755 index fe1cfcdf7..000000000 --- a/OracleAccessManagement/kubernetes/3.0.1/ingress-per-domain/nginx-ingress.yaml +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright (c) 2020, Oracle Corporation and/or its affiliates. -# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. - -apiVersion: networking.k8s.io/v1beta1 -kind: Ingress -metadata: - name: access-ingress - namespace: accessns - annotations: - nginx.ingress.kubernetes.io/proxy-buffer-size: "2000k" - kubernetes.io/ingress.class: nginx - nginx.ingress.kubernetes.io/enable-access-log: "false" -spec: - rules: - - host: xxxxx.xxx.xxxxx.xxx - http: - paths: - - path: /console - backend: - serviceName: accessinfra-adminserver - servicePort: 7001 - - path: /rreg/rreg - backend: - serviceName: accessinfra-adminserver - servicePort: 7001 - - path: /em - backend: - serviceName: accessinfra-adminserver - servicePort: 7001 - - path: /oamconsole - backend: - serviceName: accessinfra-adminserver - servicePort: 7001 - - path: /dms - backend: - serviceName: accessinfra-adminserver - servicePort: 7001 - - path: /oam/services/rest - backend: - serviceName: accessinfra-adminserver - servicePort: 7001 - - path: /iam/admin/config - backend: - serviceName: accessinfra-adminserver - servicePort: 7001 - - path: /iam/admin/diag - backend: - serviceName: accessinfra-adminserver - servicePort: 7001 - - path: /iam/access - backend: - serviceName: accessinfra-cluster-oam-cluster - servicePort: 14100 - - path: /oam/admin/api - backend: - serviceName: accessinfra-adminserver - servicePort: 7001 - - path: /oam/services/rest/access/api - backend: - serviceName: accessinfra-cluster-oam-cluster - servicePort: 14100 - - path: /access - backend: - serviceName: accessinfra-cluster-policy-cluster - servicePort: 15100 - - path: / - backend: - serviceName: accessinfra-cluster-oam-cluster - servicePort: 14100 diff --git a/OracleAccessManagement/kubernetes/3.0.1/ingress-per-domain/ssl-nginx-ingress.yaml b/OracleAccessManagement/kubernetes/3.0.1/ingress-per-domain/ssl-nginx-ingress.yaml deleted file mode 100755 index aba781d04..000000000 --- a/OracleAccessManagement/kubernetes/3.0.1/ingress-per-domain/ssl-nginx-ingress.yaml +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright (c) 2020, Oracle Corporation and/or its affiliates. -# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. - -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: access-ingress - namespace: accessns - annotations: - nginx.ingress.kubernetes.io/proxy-buffer-size: "2000k" - kubernetes.io/ingress.class: nginx - nginx.ingress.kubernetes.io/enable-access-log: "false" - nginx.ingress.kubernetes.io/configuration-snippet: | - more_set_input_headers "X-Forwarded-Proto: https"; - more_set_input_headers "WL-Proxy-SSL: true"; - nginx.ingress.kubernetes.io/ingress.allow-http: "false" -spec: - rules: - - http: - paths: - - path: /console - backend: - serviceName: accessinfra-adminserver - servicePort: 7001 - - path: /rreg/rreg - backend: - serviceName: accessinfra-adminserver - servicePort: 7001 - - path: /em - backend: - serviceName: accessinfra-adminserver - servicePort: 7001 - - path: /oamconsole - backend: - serviceName: accessinfra-adminserver - servicePort: 7001 - - path: /dms - backend: - serviceName: accessinfra-adminserver - servicePort: 7001 - - path: /oam/services/rest - backend: - serviceName: accessinfra-adminserver - servicePort: 7001 - - path: /iam/admin/config - backend: - serviceName: accessinfra-adminserver - servicePort: 7001 - - path: /oam/admin/api - backend: - serviceName: accessinfra-adminserver - servicePort: 7001 - - path: /iam/admin/diag - backend: - serviceName: accessinfra-adminserver - servicePort: 7001 - - path: /iam/access - backend: - serviceName: accessinfra-cluster-oam-cluster - servicePort: 14100 - - path: /oam/services/rest/access/api - backend: - serviceName: accessinfra-cluster-oam-cluster - servicePort: 14100 - - path: /access - backend: - serviceName: accessinfra-cluster-policy-cluster - servicePort: 15100 - - path: / - backend: - serviceName: accessinfra-cluster-oam-cluster - servicePort: 14100 diff --git a/OracleAccessManagement/kubernetes/3.0.1/ingress-per-domain/templates/nginx-ingress.yaml b/OracleAccessManagement/kubernetes/3.0.1/ingress-per-domain/templates/nginx-ingress.yaml deleted file mode 100755 index 93e187351..000000000 --- a/OracleAccessManagement/kubernetes/3.0.1/ingress-per-domain/templates/nginx-ingress.yaml +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright (c) 2020, Oracle Corporation and/or its affiliates. -# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. - -{{- if eq .Values.type "NGINX" }} ---- -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: access-ingress - namespace: {{ .Release.Namespace }} - labels: - weblogic.resourceVersion: domain-v2 -{{- if eq .Values.tls "SSL" }} - annotations: - nginx.ingress.kubernetes.io/proxy-buffer-size: "2000k" - kubernetes.io/ingress.class: nginx - nginx.ingress.kubernetes.io/enable-access-log: "false" - nginx.ingress.kubernetes.io/configuration-snippet: | - more_set_input_headers "X-Forwarded-Proto: https"; - more_set_input_headers "WL-Proxy-SSL: true"; - nginx.ingress.kubernetes.io/ingress.allow-http: "false" -{{- end }} -spec: - rules: - - http: - paths: - - path: /console - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - - path: /rreg/rreg - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - - path: /em - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - - path: /oamconsole - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - - path: /dms - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - - path: /oam/services/rest - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - - path: /iam/admin/config - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - - path: /iam/admin/diag - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - - path: /iam/access - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oamClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.oamManagedServerPort }} - - path: /oam/admin/api - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - - path: /oam/services/rest/access/api - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oamClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.oamManagedServerPort }} - - path: /access - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.policyClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.policyServerPort }} - - path: / - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oamClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.oamManagedServerPort }} -{{- end }} diff --git a/OracleAccessManagement/kubernetes/3.0.1/ingress-per-domain/templates/traefik-ingress.yaml b/OracleAccessManagement/kubernetes/3.0.1/ingress-per-domain/templates/traefik-ingress.yaml deleted file mode 100755 index d2acf27ed..000000000 --- a/OracleAccessManagement/kubernetes/3.0.1/ingress-per-domain/templates/traefik-ingress.yaml +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright (c) 2020, Oracle Corporation and/or its affiliates. -# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. - -{{- if eq .Values.type "TRAEFIK" }} ---- -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: {{ .Values.wlsDomain.domainUID }}-traefik - namespace: {{ .Release.Namespace }} - labels: - weblogic.resourceVersion: domain-v2 - annotations: - kubernetes.io/ingress.class: traefik -spec: - rules: - - host: '{{ .Values.traefik.hostname }}' - http: - paths: - - path: /console - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - - path: /rreg/rreg - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - - path: /dms - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - - path: /oamconsole - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - - path: /oam/services/rest - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - - path: /iam/admin/config - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - - path: /oam/admin/api - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - - path: /iam/admin/diag - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - - path: /iam/access - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oamClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.oamManagedServerPort }} - - path: /em - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - - path: /access - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.policyClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.policyServerPort }} - - path: /oam/services/rest/access/api - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oamClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.oamManagedServerPort }} - - path: / - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oamClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.oamManagedServerPort }} -{{- end }} - diff --git a/OracleAccessManagement/kubernetes/3.0.1/ingress-per-domain/templates/voyager-ingress.yaml b/OracleAccessManagement/kubernetes/3.0.1/ingress-per-domain/templates/voyager-ingress.yaml deleted file mode 100755 index 405b55544..000000000 --- a/OracleAccessManagement/kubernetes/3.0.1/ingress-per-domain/templates/voyager-ingress.yaml +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright (c) 2020, Oracle Corporation and/or its affiliates. -# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. - -{{- if eq .Values.type "VOYAGER" }} ---- -apiVersion: voyager.appscode.com/v1beta1 -kind: Ingress -metadata: - name: {{ .Values.wlsDomain.domainUID }}-voyager - namespace: {{ .Release.Namespace }} - annotations: - ingress.appscode.com/type: 'NodePort' - kubernetes.io/ingress.class: 'voyager' - ingress.appscode.com/stats: 'true' - ingress.appscode.com/default-timeout: '{"connect": "1800s", "server": "1800s"}' - ingress.appscode.com/proxy-body-size: "2000000" - labels: - weblogic.resourceVersion: domain-v2 -spec: -{{- if eq .Values.tls "SSL" }} - frontendRules: - - port: 443 - rules: - - http-request set-header WL-Proxy-SSL true - tls: - - secretName: domain1-tls-cert - hosts: - - '*' -{{- end }} - rules: - - host: '*' - http: - nodePort: '{{ .Values.voyager.webPort }}' - paths: - - path: /console - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - - path: /rreg/rreg - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - - path: /em - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - - path: /oamconsole - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - - path: /dms - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - - path: /oam/services/rest - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - - path: /iam/admin/config - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - - path: /iam/admin/diag - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - - path: /iam/access - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oamClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.oamManagedServerPort }} - - path: /oam/admin/api - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - - path: /oam/services/rest/access/api - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oamClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.oamManagedServerPort }} - - path: /access - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.policyClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.policyServerPort }} - - path: / - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oamClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.oamManagedServerPort }} ---- -apiVersion: v1 -kind: Service -metadata: - name: {{ .Values.wlsDomain.domainUID }}-voyager-stats - namespace: {{ .Release.Namespace }} - labels: - app: voyager-stats -spec: - type: NodePort - ports: - - name: client - protocol: TCP - port: 56789 - targetPort: 56789 - nodePort: {{ .Values.voyager.statsPort }} - selector: - origin: voyager - origin-name: {{ .Values.wlsDomain.domainUID }}-voyager -{{- end }} diff --git a/OracleAccessManagement/kubernetes/3.0.1/ingress-per-domain/values.yaml b/OracleAccessManagement/kubernetes/3.0.1/ingress-per-domain/values.yaml deleted file mode 100755 index bc79f0d2d..000000000 --- a/OracleAccessManagement/kubernetes/3.0.1/ingress-per-domain/values.yaml +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright (c) 2020, Oracle Corporation and/or its affiliates. -# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. - -# Default values for ingress-per-domain. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. -# Load balancer type. Supported values are: NGINX, VOYAGER - -type: VOYAGER -# Type of Configuration Supported Values are : NONSSL,SSL -# tls: NONSSL -tls: SSL -# TLS secret name if the mode is SSL -secretName: domain1-tls-cert - - -# WLS domain as backend to the load balancer -wlsDomain: - domainUID: accessinfra - oamClusterName: oam_cluster - policyClusterName: policy_cluster - oamManagedServerPort: 14100 - policyServerPort: 15100 - adminServerName: adminserver - adminServerPort: 7001 - Namespace: accessns - -# Traefik specific values -#traefik: - # hostname used by host-routing - #hostname: xxxx.example.com - -# Voyager specific values -voyager: - # web port - webPort: 30305 - # stats port - statsPort: 30315 - diff --git a/OracleAccessManagement/kubernetes/charts/apache-samples/README.md b/OracleAccessManagement/kubernetes/charts/apache-samples/README.md new file mode 100755 index 000000000..dcaab57c7 --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/apache-samples/README.md @@ -0,0 +1,8 @@ +# Apache load balancer samples + +The sample package contains two samples that use the [Apache Helm chart](../apache-webtier/README.md). The samples use the Docker image for the Apache HTTP Server with the 12.2.1.3.0 and 12.2.1.4.0 Oracle WebLogic Server Proxy Plugin. See the details in [Apache HTTP Server with Oracle WebLogic Server Proxy Plugin on Docker](https://github.com/oracle/docker-images/tree/master/OracleWebLogic/samples/12213-webtier-apache). + +* [The default sample](default-sample/README.md) uses the built-in configuration in the Docker image. + +* [The custom sample](custom-sample/README.md) demonstrates how to customize the configuration of the Apache HTTP Server with the 12.2.1.3.0 and 12.2.1.4.0 Oracle WebLogic Server Proxy Plugins. + diff --git a/OracleAccessManagement/kubernetes/charts/apache-samples/custom-sample/README.md b/OracleAccessManagement/kubernetes/charts/apache-samples/custom-sample/README.md new file mode 100755 index 000000000..c35d2d700 --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/apache-samples/custom-sample/README.md @@ -0,0 +1,149 @@ +# Apache load balancer custom sample +In this sample, we will configure the Apache webtier as a load balancer for multiple WebLogic domains using a custom configuration. We will demonstrate how to use the Apache webtier to handle traffic to multiple backend WebLogic domains. + +## 1. Create a namespace +In this sample, both the Apache webtier and WebLogic domain instances are located in the namespace `apache-sample`. +```shell +$ kubectl create namespace apache-sample +``` + +## 2. Create WebLogic domains +We need to prepare some backend domains for load balancing by the Apache webtier. Refer to the [sample](/kubernetes/samples/scripts/create-weblogic-domain/domain-home-on-pv/README.md), to create two WebLogic domains under the namespace `apache-sample`. + +The first domain uses the following custom configuration parameters: +- namespace: `apache-sample` +- domainUID: `domain1` +- clusterName: `cluster-1` +- adminServerName: `admin-server` +- adminPort: `7001` +- adminNodePort: `30701` +- managedServerPort: `8001` + +The second domain uses the following custom configuration parameters: +- namespace: `apache-sample` +- domainUID: `domain2` +- clusterName: `cluster-1` +- adminServerName: `admin-server` +- adminPort: `7011` +- adminNodePort: `30702` +- managedServerPort: `8021` + +After the domains are successfully created, deploy the sample web application, `testwebapp.war`, on each domain cluster using the WLS Administration Console. The sample web application is located in the `kubernetes/samples/charts/application` directory. + +## 3. Build the Apache webtier Docker image +Refer to the [sample](https://github.com/oracle/docker-images/tree/master/OracleWebLogic/samples/12213-webtier-apache), to build the Apache webtier Docker image. + +## 4. Provide the custom Apache plugin configuration +In this sample, we will provide a custom Apache plugin configuration to fine tune the behavior of Apache. + +* Create a custom Apache plugin configuration file named `custom_mod_wl_apache.conf`. The file content is similar to below. + +``` +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + + +WebLogicHost ${WEBLOGIC_HOST} +WebLogicPort ${WEBLOGIC_PORT} + + +# Directive for weblogic admin Console deployed on Weblogic Admin Server + +SetHandler weblogic-handler +WebLogicHost domain1-admin-server +WebLogicPort ${WEBLOGIC_PORT} + + +# Directive for all application deployed on weblogic cluster with a prepath defined by LOCATION variable +# For example, if the LOCAITON is set to '/weblogic', all applications deployed on the cluster can be accessed via +# http://myhost:myport/weblogic/application_end_url +# where 'myhost' is the IP of the machine that runs the Apache web tier, and +# 'myport' is the port that the Apache web tier is publicly exposed to. +# Note that LOCATION cannot be set to '/' unless this is the only Location module configured. + +WLSRequest On +WebLogicCluster domain1-cluster-cluster-1:8001 +PathTrim /weblogic1 + + +# Directive for all application deployed on weblogic cluster with a prepath defined by LOCATION2 variable +# For example, if the LOCAITON2 is set to '/weblogic2', all applications deployed on the cluster can be accessed via +# http://myhost:myport/weblogic2/application_end_url +# where 'myhost' is the IP of the machine that runs the Apache web tier, and +# 'myport' is the port that the Apache webt ier is publicly exposed to. + +WLSRequest On +WebLogicCluster domain2-cluster-cluster-1:8021 +PathTrim /weblogic2 + +``` + +* Create a PV / PVC (pv-claim-name) that can be used to store the `custom_mod_wl_apache.conf`. Refer to the [Sample for creating a PV or PVC](/kubernetes/samples/scripts/create-weblogic-domain-pv-pvc/README.md). + +## 5. Prepare your own certificate and private key +In production, Oracle strongly recommends that you provide your own certificates. Run the following commands to generate your own certificate and private key using `openssl`. + +```shell +$ cd kubernetes/samples/charts/apache-samples/custom-sample +$ export VIRTUAL_HOST_NAME=apache-sample-host +$ export SSL_CERT_FILE=apache-sample.crt +$ export SSL_CERT_KEY_FILE=apache-sample.key +$ sh certgen.sh +``` + +## 6. Prepare the input values for the Apache webtier Helm chart +Run the following commands to prepare the input value file for the Apache webtier Helm chart. + +```shell +$ base64 -i ${SSL_CERT_FILE} | tr -d '\n' +$ base64 -i ${SSL_CERT_KEY_FILE} | tr -d '\n' +$ touch input.yaml +``` +Edit the input parameters file, `input.yaml`. The file content is similar to below. + +```yaml +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +# Use this to provide your own Apache webtier configuration as needed; simply define this +# Persistence Volume which contains your own custom_mod_wl_apache.conf file. +persistentVolumeClaimName: + +# The VirtualHostName of the Apache HTTP server. It is used to enable custom SSL configuration. +virtualHostName: apache-sample-host + +# The customer supplied certificate to use for Apache webtier SSL configuration. +# The value must be a string containing a base64 encoded certificate. Run following command to get it. +# base64 -i ${SSL_CERT_FILE} | tr -d '\n' +customCert: + +# The customer supplied private key to use for Apache webtier SSL configuration. +# The value must be a string containing a base64 encoded key. Run following command to get it. +# base64 -i ${SSL_KEY_FILE} | tr -d '\n' +customKey: +``` + +## 7. Install the Apache webtier Helm chart +The Apache webtier Helm chart is located in the `kubernetes/samples/charts/apache-webtier` directory. Install the Apache webtier Helm chart to the `apache-sample` namespace with the specified input parameters: + +```shell +$ cd kubernetes/samples/charts +$ helm install my-release --values apache-samples/custom-sample/input.yaml --namespace apache-sample apache-webtier +``` + +## 8. Run the sample application +Now you can send requests to different WebLogic domains with the unique entry point of Apache with different paths. Alternatively, you can access the URLs in a web browser. +```shell +$ curl --silent http://${HOSTNAME}:30305/weblogic1/testwebapp/ +$ curl --silent http://${HOSTNAME}:30305/weblogic2/testwebapp/ +``` +Also, you can use SSL URLs to send requests to different WebLogic domains. Access the SSL URL via the `curl` command or a web browser. +```shell +$ curl -k --silent https://${HOSTNAME}:30443/weblogic1/testwebapp/ +$ curl -k --silent https://${HOSTNAME}:30443/weblogic2/testwebapp/ +``` + +## 9. Uninstall the Apache webtier +```shell +$ helm uninstall my-release --namespace apache-sample +``` diff --git a/OracleAccessManagement/kubernetes/charts/apache-samples/custom-sample/certgen.sh b/OracleAccessManagement/kubernetes/charts/apache-samples/custom-sample/certgen.sh new file mode 100755 index 000000000..20dd9fa51 --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/apache-samples/custom-sample/certgen.sh @@ -0,0 +1,51 @@ +#!/bin/sh +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# Since: June, 2018 +# Author: dongbo.xiao@oracle.com +# Description: script to start Apache HTTP Server +# +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. + +# Generated configuration file +CONFIG_FILE="config.txt" + +cat > $CONFIG_FILE <<-EOF +[req] +default_bits = 2048 +prompt = no +default_md = sha256 +req_extensions=v3_req +extensions=v3_req +distinguished_name = dn + +[dn] +C = US +ST = CA +L = Redwood Shores +O = Oracle Corporation +OU = Apache HTTP Server With Plugin +CN = $VIRTUAL_HOST_NAME + +[v3_req] +subjectAltName = @alt_names +[alt_names] +DNS.1 = $VIRTUAL_HOST_NAME +DNS.2 = $VIRTUAL_HOST_NAME.cloud.oracle.com +DNS.3 = *.$VIRTUAL_HOST_NAME +DNS.4 = localhost +EOF + +echo "Generating certs for $VIRTUAL_HOST_NAME" + +# Generate our Private Key, CSR and Certificate +# Use SHA-2 as SHA-1 is unsupported from Jan 1, 2017 + +openssl req -x509 -newkey rsa:2048 -sha256 -nodes -keyout "$SSL_CERT_KEY_FILE" -days 3650 -out "$SSL_CERT_FILE" -config "$CONFIG_FILE" + +# OPTIONAL - write an info to see the details of the generated crt +openssl x509 -noout -fingerprint -text < "$SSL_CERT_FILE" > "$SSL_CERT_FILE.info" +# Protect the key +chmod 400 "$SSL_CERT_KEY_FILE" +chmod 400 "$SSL_CERT_FILE.info" diff --git a/OracleAccessManagement/kubernetes/charts/apache-samples/custom-sample/custom_mod_wl_apache.conf b/OracleAccessManagement/kubernetes/charts/apache-samples/custom-sample/custom_mod_wl_apache.conf new file mode 100755 index 000000000..8a2d05f0d --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/apache-samples/custom-sample/custom_mod_wl_apache.conf @@ -0,0 +1,37 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + + +WebLogicHost ${WEBLOGIC_HOST} +WebLogicPort ${WEBLOGIC_PORT} + + +# Directive for weblogic admin Console deployed on Weblogic Admin Server + +SetHandler weblogic-handler +WebLogicHost domain1-admin-server +WebLogicPort ${WEBLOGIC_PORT} + + +# Directive for all application deployed on weblogic cluster with a prepath defined by LOCATION variable +# For example, if the LOCAITON is set to '/weblogic', all applications deployed on the cluster can be accessed via +# http://myhost:myport/weblogic/application_end_url +# where 'myhost' is the IP of the machine that runs the Apache web tier, and +# 'myport' is the port that the Apache web tier is publicly exposed to. +# Note that LOCATION cannot be set to '/' unless this is the only Location module configured. + +WLSRequest On +WebLogicCluster domain1-cluster-cluster-1:8001 +PathTrim /weblogic1 + + +# Directive for all application deployed on weblogic cluster with a prepath defined by LOCATION2 variable +# For example, if the LOCAITON2 is set to '/weblogic2', all applications deployed on the cluster can be accessed via +# http://myhost:myport/weblogic2/application_end_url +# where 'myhost' is the IP of the machine that runs the Apache web tier, and +# 'myport' is the port that the Apache webt ier is publicly exposed to. + +WLSRequest On +WebLogicCluster domain2-cluster-cluster-1:8021 +PathTrim /weblogic2 + diff --git a/OracleAccessManagement/kubernetes/charts/apache-samples/custom-sample/input.yaml b/OracleAccessManagement/kubernetes/charts/apache-samples/custom-sample/input.yaml new file mode 100755 index 000000000..95eaec6e9 --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/apache-samples/custom-sample/input.yaml @@ -0,0 +1,28 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +# Use this to provide your own Apache webtier configuration as needed; simply define the +# Persistence Volume which contains your own custom_mod_wl_apache.conf file and provide the Persistence Volume Claim Name +persistentVolumeClaimName: + +# imagePullSecrets contains an optional list of Kubernetes secrets, that are needed +# to access the registry containing the apache webtier image. +# If no secrets are required, then omit this property. +# +# Example : a secret is needed, and has been stored in 'my-apache-webtier-secret' +# +# imagePullSecrets: +# - name: my-apache-webtier-secret + +# The VirtualHostName of the Apache HTTP server. It is used to enable custom SSL configuration. +virtualHostName: apache-sample-host + +# The customer supplied certificate to use for Apache webtier SSL configuration. +# The value must be a string containing a base64 encoded certificate. Run following command to get it. +# base64 -i ${SSL_CERT_FILE} | tr -d '\n' +customCert: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURxakNDQXBJQ0NRQ0w2Q2JwRWZ6QnB6QU5CZ2txaGtpRzl3MEJBUXNGQURDQmxqRUxNQWtHQTFVRUJoTUMKVlZNeEN6QUpCZ05WQkFnTUFrTkJNUmN3RlFZRFZRUUhEQTVTWldSM2IyOWtJRk5vYjNKbGN6RWJNQmtHQTFVRQpDZ3dTVDNKaFkyeGxJRU52Y25CdmNtRjBhVzl1TVNjd0pRWURWUVFMREI1QmNHRmphR1VnU0ZSVVVDQlRaWEoyClpYSWdWMmwwYUNCUWJIVm5hVzR4R3pBWkJnTlZCQU1NRW1Gd1lXTm9aUzF6WVcxd2JHVXRhRzl6ZERBZUZ3MHgKT0RFeE1UUXhOVEF3TURGYUZ3MHlPREV4TVRFeE5UQXdNREZhTUlHV01Rc3dDUVlEVlFRR0V3SlZVekVMTUFrRwpBMVVFQ0F3Q1EwRXhGekFWQmdOVkJBY01EbEpsWkhkdmIyUWdVMmh2Y21Wek1Sc3dHUVlEVlFRS0RCSlBjbUZqCmJHVWdRMjl5Y0c5eVlYUnBiMjR4SnpBbEJnTlZCQXNNSGtGd1lXTm9aU0JJVkZSUUlGTmxjblpsY2lCWGFYUm8KSUZCc2RXZHBiakViTUJrR0ExVUVBd3dTWVhCaFkyaGxMWE5oYlhCc1pTMW9iM04wTUlJQklqQU5CZ2txaGtpRwo5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBeXBVWjEzV3ltcUVnSUZOVTZDa2E0SkFqMXFNemZ4T2FjTklNClVKRE9zZUtqdjNOYmpJb0szQTArcE9lRDNPOXBNcUVxM3F5ZWlCTUtnVEQwREhZNS9HQldjeEUvdUJyWk0rQzgKcnl3RVk5QTl5Y1drZ3h4NUFqSFM1ZnRLMFhpQU9OZWdnUnV0RTBTTnRmbmY3T0FwaStzU0k1RlBzT2V2ZWZGVgoybjJHUDg0bHNDTTZ3Y3FLcXRKeStwOC94VEJKdW1MY2RoL1daYktGTDd5YzFGSzdUNXdPVTB3eS9nZ1lVOUVvCk9tT3M3MENQWmloSkNrc1hrd1d0Q0JISEEwWGJPMXpYM1VZdnRpeGMwb2U3aFltd29zZnlQWU1raC9hL2pWYzEKWkhac25wQXZiWTZrVEoyY1dBa1hyS0srVmc5ZGJrWGVPY0FFTnNHazIvcXFxVGNOV1FJREFRQUJNQTBHQ1NxRwpTSWIzRFFFQkN3VUFBNElCQVFDQXZZNzBHVzBTM1V4d01mUHJGYTZvOFJxS3FNSDlCRE9lZ29zZGc5Nm9QakZnClgzRGJjblU5U0QxTzAyZUhNb0RTRldiNFlsK3dwZk9zUDFKekdQTERQcXV0RWRuVjRsbUJlbG15Q09xb0F4R0gKRW1vZGNUSWVxQXBnVDNEaHR1NW90UW4zZTdGaGNRRHhDelN6SldkUTRJTFh4SExsTVBkeHpRN1NwTzVySERGeAo0eEd6dkNHRkMwSlhBZ2w4dFhvR3dUYkpDR1hxYWV2cUIrNXVLY1NpSUo2M2dhQk1USytjUmF5MkR4L1dwcEdBClZWTnJsTWs4TEVQT1VSN2RZMm0xT3RaU1hCckdib3QwQjNEUG9yRkNpeVF5Q20vd0FYMFk0Z0hiMlNmcitOeFoKQkppb2VXajZ6ZGFvU3dPZkwxd2taWlJjVGtlZlZyZXdVRjZRQ3BCcAotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + +# The customer supplied private key to use for Apache webtier SSL configuration. +# The value must be a string containing a base64 encoded key. Run following command to get it. +# base64 -i ${SSL_KEY_FILE} | tr -d '\n' +customKey: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2Z0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktnd2dnU2tBZ0VBQW9JQkFRREtsUm5YZGJLYW9TQWcKVTFUb0tScmdrQ1BXb3pOL0U1cHcwZ3hRa002eDRxTy9jMXVNaWdyY0RUNms1NFBjNzJreW9TcmVySjZJRXdxQgpNUFFNZGpuOFlGWnpFVCs0R3RrejRMeXZMQVJqMEQzSnhhU0RISGtDTWRMbCswclJlSUE0MTZDQkc2MFRSSTIxCitkL3M0Q21MNnhJamtVK3c1Njk1OFZYYWZZWS96aVd3SXpyQnlvcXEwbkw2bnovRk1FbTZZdHgySDlabHNvVXYKdkp6VVVydFBuQTVUVERMK0NCaFQwU2c2WTZ6dlFJOW1LRWtLU3hlVEJhMElFY2NEUmRzN1hOZmRSaSsyTEZ6UwpoN3VGaWJDaXgvSTlneVNIOXIrTlZ6VmtkbXlla0M5dGpxUk1uWnhZQ1Jlc29yNVdEMTF1UmQ0NXdBUTJ3YVRiCitxcXBOdzFaQWdNQkFBRUNnZ0VCQUtPKzR4VnFHRVN1aWxZMnBVSEd2K2ZWK25IcWxweFh6eFQwWTJuWHNvck0KZzhralNGT1AzUGxEWjJoSmppZE9DUDBZa3B0TWNoUFJPRU4ydXowN2J1RlZTV3RXL09jbUpIeXZZalJCWXdiKwo4b0tlVTd4NmprRTgzcGh3aDJoTGUzRDJzZERKK3hyQTViNjZ5OG9lNHRZcTJ3Mk96aGhUSFY1MnVRdVRQS2xpCjJpSHNYQzIwT1dMSmRuMGU1a0IycTJhV3JJaUJBVzI1Y0JyRDQ5MWFyTDh0emJQOWM4eUUyWUdNM1FKaUFtbkYKNUxZUElzZFdVczJYNEhscWtUM0d6ZEVxNUtzV0pzdjN5QUkxOVJ4eXAwZXd1ditTN3hsRjdIZGlhbnR6ZUp4WAp3MnRWbHpjb1BVQVhoVHIxS0N1UDNCT3BQVXNvMG9oaDNzRFVXamVVWUNVQ2dZRUE3L25QYTE5ckpKUExJOFZiCllhQ2pEKzhTR0FvVWZwSDdRTVFyT2RzR0RkcWRKa2VlNEJ0RDBITUEzL1lLVGFUK0JvRVZmQ2czSWpZVWpmeGcKSkp0VWlJVlcya0RsMU5NY0xXaldINExPaFErQlRGbWcvbFlkc2puMW9FbUJ1Rk1NYWF0ejNGdmZscFRCekg4cwpwMHFyL0hJYTFTbllBckVTUXZUVk9MMVhtcThDZ1lFQTJCd1V6NmpQdVVGR3ZKS3RxWTZVbE9yYm05WXFyYVdDCjlhQ3ZBTDFHZ0Q1U1FEcGRVZnl3MVlWdm9hUU9DWHBOL0Z5UHZCdFF2TzYrbHp0MjVTcmMwZk0weHI3d3ZHRmEKSW5FcmlSOXAvMXdXU01yaWFXZitKaE81NENneFZ0alBXZm1pOVNhc0pqOE1jZVk0cUNCNUVJLzM1cjVaa3lFRQozeEhzcEUxVnVuY0NnWUJLYXBveXZzVTM4NGprRDloMW50M1NIQjN0VEhyc2dSSjhGQmtmZU5jWXhybEMzS1RjCjlEZUVWWlZvM2lCMTBYdGd3dmpKcHFMcVBnRUR3c2FCczVWMFBIMGhjMHlTUWVFVUI5V1dzZmFlOXA3dThVQm0KZm9mNDg5WkNuV2pYb3hGUFYzYTNWOW92RlBSQUdSUGMwT0FpaWJQZWRIcGk0MHc1YlRrTnZsR0RTd0tCZ1FESApubWk2eUR2WDZ5dmowN2tGL2VYUkNIK0NHdm1oSEZremVoRXNwYWtSbkg5dFJId2UxMEtnZUhqODNnVDVURGZzCis3THBGbklsa29JS1A2czdVN1JWV2tsTnErSENvRW9adGw5NGNjUC9WSmhnOU1iZWhtaUQwNFRHUVZvUjFvTHgKb1YyZEJQUFBBRDRHbDVtTjh6RGcwNXN4VUhKOUxPckxBa3VNR01NdlVRS0JnQ2RUUGgwVHRwODNUUVZFZnR3bwpuSGVuSEQzMkhrZkR0MTV4Wk84NVZGcTlONVg2MjB2amZKNkNyVnloS1RISllUREs1N2owQ3Z2STBFTksxNytpCi9yaXgwVlFNMTBIMFFuTkZlb0pmS0VITHhXb2czSHVBSVZxTEg4NmJwcytmb25nOCtuMGgvbk5NZUZNYjdSNUMKdmFHNEVkc0VHV0hZS2FiL2lzRlowUVU0Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K diff --git a/OracleAccessManagement/kubernetes/charts/apache-samples/default-sample/README.md b/OracleAccessManagement/kubernetes/charts/apache-samples/default-sample/README.md new file mode 100755 index 000000000..806bab5c9 --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/apache-samples/default-sample/README.md @@ -0,0 +1,39 @@ +# Apache load balancer default sample +In this sample, we will configure the Apache webtier as a load balancer for a WebLogic domain using the default configuration. We will demonstrate how to use the Apache webtier to handle traffic to a backend WebLogic domain. + +## 1. Create a WebLogic domain +We need to prepare a backend domain for load balancing by the Apache webtier. Refer to the [sample](/kubernetes/samples/scripts/create-weblogic-domain/domain-home-on-pv/README.md), to create a WebLogic domain. Keep the default values for the following configuration parameters: +- namespace: `default` +- domainUID: `domain1` +- clusterName: `cluster-1` +- adminServerName: `admin-server` +- adminPort: `7001` +- managedServerPort: `8001` + +After the domain is successfully created, deploy the sample web application, `testwebapp.war`, on the domain cluster using the WLS Administration Console. The sample web application is located in the `kubernetes/samples/charts/application` directory. + +## 2. Build the Apache webtier Docker image +Refer to the [sample](https://github.com/oracle/docker-images/tree/master/OracleWebLogic/samples/12213-webtier-apache), to build the Apache webtier Docker image. + +## 3. Install the Apache webtier with a Helm chart +The Apache webtier Helm chart [is located here](../../apache-webtier/README.md). +Install the Apache webtier Helm chart into the default namespace with the default settings: +```shell +$ cd kubernetes/samples/charts +$ helm install my-release apache-webtier +``` + +## 4. Run the sample application +Now you can send request to the WebLogic domain with the unique entry point of Apache. Alternatively, you can access the URL in a web browser. +```shell +$ curl --silent http://${HOSTNAME}:30305/weblogic/testwebapp/ +``` +You can also use an SSL URL to send requests to the WebLogic domain. Access the SSL URL via the `curl` command or a web browser. +```shell +$ curl -k --silent https://${HOSTNAME}:30443/weblogic/testwebapp/ +``` + +## 5. Uninstall the Apache webtier +```shell +$ helm uninstall my-release +``` diff --git a/OracleAccessManagement/kubernetes/charts/apache-webtier/Chart.yaml b/OracleAccessManagement/kubernetes/charts/apache-webtier/Chart.yaml new file mode 100755 index 000000000..413b8ba2d --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/apache-webtier/Chart.yaml @@ -0,0 +1,20 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +apiVersion: v1 +name: apache-webtier +version: 1.0.0 +appVersion: 12.2.1.3 +description: Chart for Apache HTTP Server +keywords: +- apache +- http +- https +- load balance +- proxy +home: https://httpd.apache.org +sources: +- https://github.com/oracle/weblogic-kubernetes-operator/tree/master/kubernetes/samples/charts/apache-webtier +maintainers: +- name: Oracle +engine: gotpl diff --git a/OracleAccessManagement/kubernetes/charts/apache-webtier/README.md b/OracleAccessManagement/kubernetes/charts/apache-webtier/README.md new file mode 100755 index 000000000..2be875dd3 --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/apache-webtier/README.md @@ -0,0 +1,92 @@ +# Apache webtier Helm chart + +This Helm chart bootstraps an Apache HTTP Server deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +The chart depends on the Docker image for the Apache HTTP Server with Oracle WebLogic Server Proxy Plugin (supported versions 12.2.1.3.0 and 12.2.1.4.0). See the details in [Apache HTTP Server with Oracle WebLogic Server Proxy Plugin on Docker](https://github.com/oracle/docker-images/tree/master/OracleWebLogic/samples/12213-webtier-apache). + +## Prerequisites + +You will need to build a Docker image with the Apache webtier in it using the sample provided [here](https://github.com/oracle/docker-images/tree/master/OracleWebLogic/samples/12213-webtier-apache) +in order to use this load balancer. + +## Installing the Chart +To install the chart with the release name `my-release`: +```shell +$ helm install my-release apache-webtier +``` +The command deploys the Apache HTTP Server on the Kubernetes cluster with the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete `my-release`: + +```shell +$ helm uninstall my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following table lists the configurable parameters of the Apache webtier chart and their default values. + + +| Parameter | Description | Default | +| -----------------------------------| ------------------------------------------------------------- | ----------------------| +| `image` | Apache webtier Docker image | `oracle/apache:12.2.1.3` | +| `imagePullPolicy` | Image pull policy for the Apache webtier Docker image | `IfNotPresent` | +| `imagePullSecrets` | Image pull Secrets required to access the registry containing the Apache webtier Docker image| ``| +| `persistentVolumeClaimName` | Persistence Volume Claim name Apache webtier | `` | +| `createRBAC` | Boolean indicating if RBAC resources should be created | `true` | +| `httpNodePort` | Node port to expose for HTTP access | `30305` | +| `httpsNodePort` | Node port to expose for HTTPS access | `30443` | +| `virtualHostName` | The `VirtualHostName` of the Apache HTTP Server | `` | +| `customCert` | The customer supplied certificate | `` | +| `customKey` | The customer supplied private key | `` | +| `domainUID` | Unique ID identifying a domain | `domain1` | +| `clusterName` | Cluster name | `cluster-1` | +| `adminServerName` | Name of the Administration Server | `admin-server` | +| `adminPort` | Port number for Administration Server | `7001` | +| `managedServerPort` | Port number for each Managed Server | `8001` | +| `location` | Prepath for all applications deployed on the WebLogic cluster | `/weblogic` | +| `useNonPriviledgedPorts` | Configuration of Apache webtier on NonPriviledgedPort | `false` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example: + +```shell +$ helm install my-release --set persistentVolumeClaimName=webtier-apache-pvc apache-webtier +``` + +Alternatively, a YAML file that specifies the values for the parameters can be provided while +installing the chart. For example: + +```shell +$ helm install my-release --values values.yaml apache-webtier +``` +## useNonPriviledgedPorts +By default, the chart will install the Apache webtier on PriviledgedPort (port 80). Set the flag `useNonPriviledgedPorts=true` to enable the Apache webtier to listen on port `8080` + + +## RBAC +By default, the chart will install the recommended RBAC roles and role bindings. + +Set the flag `--authorization-mode=RBAC` on the API server. See the following document for how to enable [RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/). + +To determine if your cluster supports RBAC, run the following command: + +```shell +$ kubectl api-versions | grep rbac +``` + +If the output contains "beta", you may install the chart with RBAC enabled. + +### Disable RBAC role/rolebinding creation + +To disable the creation of RBAC resources (on clusters with RBAC). Do the following: + +```shell +$ helm install my-release apache-webtier --set createRBAC=false +``` diff --git a/OracleAccessManagement/kubernetes/charts/apache-webtier/templates/_helpers.tpl b/OracleAccessManagement/kubernetes/charts/apache-webtier/templates/_helpers.tpl new file mode 100755 index 000000000..c7999d287 --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/apache-webtier/templates/_helpers.tpl @@ -0,0 +1,25 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "apache.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +*/}} +{{- define "apache.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "apache.serviceAccountName" -}} +{{- printf "%s-%s" .Release.Name .Chart.Name | trunc 63 -}} +{{- end -}} diff --git a/OracleAccessManagement/kubernetes/charts/apache-webtier/templates/cluster-role-binding.yaml b/OracleAccessManagement/kubernetes/charts/apache-webtier/templates/cluster-role-binding.yaml new file mode 100755 index 000000000..188e54d1a --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/apache-webtier/templates/cluster-role-binding.yaml @@ -0,0 +1,17 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{ if .Values.createRBAC }} +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "apache.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "apache.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "apache.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} +{{ end }} diff --git a/OracleAccessManagement/kubernetes/charts/apache-webtier/templates/cluster-role.yaml b/OracleAccessManagement/kubernetes/charts/apache-webtier/templates/cluster-role.yaml new file mode 100755 index 000000000..449a87664 --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/apache-webtier/templates/cluster-role.yaml @@ -0,0 +1,29 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{ if .Values.createRBAC }} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "apache.fullname" . }} +rules: + - apiGroups: + - "" + resources: + - pods + - services + - endpoints + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - watch +{{ end }} diff --git a/OracleAccessManagement/kubernetes/charts/apache-webtier/templates/deployment.yaml b/OracleAccessManagement/kubernetes/charts/apache-webtier/templates/deployment.yaml new file mode 100755 index 000000000..cd7b07ad3 --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/apache-webtier/templates/deployment.yaml @@ -0,0 +1,106 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +kind: Deployment +apiVersion: apps/v1 +metadata: + name: {{ template "apache.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "apache.fullname" . }} +spec: + replicas: 1 + selector: + matchLabels: + app: {{ template "apache.fullname" . }} + template: + metadata: + labels: + app: {{ template "apache.fullname" . }} + spec: + serviceAccountName: {{ template "apache.serviceAccountName" . }} + terminationGracePeriodSeconds: 60 +{{- if or (and (.Values.virtualHostName) (.Values.customCert)) (.Values.persistentVolumeClaimName) }} + volumes: +{{- end }} +{{- if and (.Values.virtualHostName) (.Values.customCert) }} + - name: serving-cert + secret: + defaultMode: 420 + secretName: {{ template "apache.fullname" . }}-cert +{{- end }} +{{- if .Values.persistentVolumeClaimName }} + - name: {{ template "apache.fullname" . }} + persistentVolumeClaim: + claimName: {{ .Values.persistentVolumeClaimName | quote }} +{{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + {{ .Values.imagePullSecrets | toYaml }} + {{- end }} + containers: + - name: {{ template "apache.fullname" . }} + image: {{ .Values.image | quote }} + imagePullPolicy: {{ .Values.imagePullPolicy | quote }} +{{- if or (and (.Values.virtualHostName) (.Values.customCert)) (.Values.persistentVolumeClaimName) }} + volumeMounts: +{{- end }} +{{- if and (.Values.virtualHostName) (.Values.customCert) }} + - name: serving-cert + mountPath: "/var/serving-cert" +{{- end }} +{{- if .Values.persistentVolumeClaimName }} + - name: {{ template "apache.fullname" . }} + mountPath: "/config" +{{- end }} +{{- if or (not (.Values.persistentVolumeClaimName)) (.Values.virtualHostName) }} + env: +{{- end }} +{{- if .Values.useNonPriviledgedPorts }} + - name: NonPriviledgedPorts + value: "true" +{{- end }} +{{- if not (.Values.persistentVolumeClaimName) }} + - name: WEBLOGIC_CLUSTER + value: "{{ .Values.domainUID | replace "_" "-" | lower }}-cluster-{{ .Values.clusterName | replace "_" "-" | lower }}:{{ .Values.managedServerPort }}" + - name: LOCATION + value: {{ .Values.location | quote }} + - name: WEBLOGIC_HOST + value: "{{ .Values.domainUID | replace "_" "-" | lower }}-{{ .Values.adminServerName | replace "_" "-" | lower }}" + - name: WEBLOGIC_PORT + value: {{ .Values.adminPort | quote }} +{{- end }} +{{- if .Values.virtualHostName }} + - name: VIRTUAL_HOST_NAME + value: {{ .Values.virtualHostName | quote }} +{{- if .Values.customCert }} + - name: SSL_CERT_FILE + value: "/var/serving-cert/tls.crt" + - name: SSL_CERT_KEY_FILE + value: "/var/serving-cert/tls.key" +{{- end }} +{{- end }} + readinessProbe: + tcpSocket: +{{- if .Values.useNonPriviledgedPorts }} + port: 8080 +{{- else }} + port: 80 +{{- end }} + failureThreshold: 1 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 2 + livenessProbe: + tcpSocket: +{{- if .Values.useNonPriviledgedPorts }} + port: 8080 +{{- else }} + port: 80 +{{- end }} + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 2 diff --git a/OracleAccessManagement/kubernetes/charts/apache-webtier/templates/secret.yaml b/OracleAccessManagement/kubernetes/charts/apache-webtier/templates/secret.yaml new file mode 100755 index 000000000..bb716f50b --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/apache-webtier/templates/secret.yaml @@ -0,0 +1,14 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{ if .Values.customCert }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "apache.fullname" . }}-cert + namespace: {{ .Release.Namespace | quote }} +type: Opaque +data: + tls.crt: {{ .Values.customCert | quote }} + tls.key: {{ .Values.customKey | quote }} +{{ end }} diff --git a/OracleAccessManagement/kubernetes/charts/apache-webtier/templates/service-account.yaml b/OracleAccessManagement/kubernetes/charts/apache-webtier/templates/service-account.yaml new file mode 100755 index 000000000..f76d46aec --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/apache-webtier/templates/service-account.yaml @@ -0,0 +1,8 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "apache.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} diff --git a/OracleAccessManagement/kubernetes/charts/apache-webtier/templates/service.yaml b/OracleAccessManagement/kubernetes/charts/apache-webtier/templates/service.yaml new file mode 100755 index 000000000..c8b8089eb --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/apache-webtier/templates/service.yaml @@ -0,0 +1,28 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +apiVersion: v1 +kind: Service +metadata: + name: {{ template "apache.fullname" . }} + namespace: {{ .Release.Namespace | quote }} +spec: + type: NodePort + selector: + app: {{ template "apache.fullname" . }} + ports: +{{- if .Values.useNonPriviledgedPorts }} + - port: 8080 +{{- else}} + - port: 80 +{{- end }} + nodePort: {{ .Values.httpNodePort }} + name: http +{{- if .Values.virtualHostName }} + - port: 4433 +{{- else }} + - port: 443 +{{- end }} + nodePort: {{ .Values.httpsNodePort }} + name: https + diff --git a/OracleAccessManagement/kubernetes/charts/apache-webtier/values.yaml b/OracleAccessManagement/kubernetes/charts/apache-webtier/values.yaml new file mode 100755 index 000000000..ee0a8a815 --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/apache-webtier/values.yaml @@ -0,0 +1,79 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +# Apache webtier docker image +image: "oracle/apache:12.2.1.3" + +# imagePullPolicy specifies the image pull policy for the apache webiter docker image +imagePullPolicy: "IfNotPresent" + +# imagePullSecrets contains an optional list of Kubernetes secrets, that are needed +# to access the registry containing the apache webtier image. +# If no secrets are required, then omit this property. +# +# Example : a secret is needed, and has been stored in 'my-apache-webtier-secret' +# +# imagePullSecrets: +# - name: my-apache-webtier-secret +# +# imagePullSecrets: +# - name: + +# Volume path for Apache webtier. By default, it is empty, which causes the volume +# mount be disabled and, therefore, the built-in Apache plugin config be used. +# Use this to provide your own Apache webtier configuration as needed; simply define this +# path and put your own custom_mod_wl_apache.conf file under this path. +persistentVolumeClaimName: + +# Boolean indicating if RBAC resources should be created +createRBAC: true + +# NodePort to expose for http access +httpNodePort: 30305 + +# NodePort to expose for https access +httpsNodePort: 30443 + +# The VirtualHostName of the Apache HTTP server. It is used to enable custom SSL configuration. +# If it is set, the Apache HTTP Server is configured to listen to port 4433 for SSL traffic. +virtualHostName: + +# The customer supplied certificate to use for Apache webtier SSL configuration. +# The value must be a string containing a base64 encoded certificate. +# If 'virtualHostName' is set, the custom certificate and private key are not provided, +# the default built-in auto-generated sample certificate and private key in the apache image will be used. +# This parameter is ignored if 'virtualHostName' is not set. +customCert: + +# The customer supplied private key to use for Apache webtier SSL configuration. +# The value must be a string containing a base64 encoded key. +# If 'virtualHostName' is set, the custom certificate and private key are not provided, +# the default built-in auto-generated sample certificate and private key in the apache image will be used. +# This parameter is ignored if 'virtualHostName' is not set. +customKey: + +# Unique ID identifying a domain. +# This ID must not contain an underscore ("_"), and must be lowercase and unique across all domains in a Kubernetes cluster. +domainUID: "domain1" + +# Cluster name +clusterName: "cluster-1" + +# Name of the admin server +adminServerName: "admin-server" + +# Port number for admin server +adminPort: 7001 + +# Port number for each managed server +managedServerPort: 8001 + +# Prepath for all application deployed on WebLogic cluster. +# For example, if it is set to '/weblogic', all applications deployed on the cluster can be accessed via +# http://myhost:myport/weblogic/application_end_url +# where 'myhost' is the IP of the machine that runs the Apache web tier, and +# 'myport' is the port that the Apache web tier is publicly exposed to. +location: "/weblogic" + +# Use non privileged port 8080 to listen. If set to false, default privileged port 80 will be used. +useNonPriviledgedPorts: false diff --git a/OracleAccessManagement/kubernetes/3.0.1/ingress-per-domain/Chart.yaml b/OracleAccessManagement/kubernetes/charts/ingress-per-domain/Chart.yaml similarity index 78% rename from OracleAccessManagement/kubernetes/3.0.1/ingress-per-domain/Chart.yaml rename to OracleAccessManagement/kubernetes/charts/ingress-per-domain/Chart.yaml index a65e7bb2e..dc3981291 100755 --- a/OracleAccessManagement/kubernetes/3.0.1/ingress-per-domain/Chart.yaml +++ b/OracleAccessManagement/kubernetes/charts/ingress-per-domain/Chart.yaml @@ -1,6 +1,6 @@ -# Copyright (c) 2020, Oracle Corporation and/or its affiliates. +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. - +# apiVersion: v1 appVersion: "1.0" description: A Helm chart to create an Ingress for a WLS domain. diff --git a/OracleAccessManagement/kubernetes/charts/ingress-per-domain/templates/nginx-ingress.yaml b/OracleAccessManagement/kubernetes/charts/ingress-per-domain/templates/nginx-ingress.yaml new file mode 100755 index 000000000..956ac0acb --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/ingress-per-domain/templates/nginx-ingress.yaml @@ -0,0 +1,181 @@ +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{- if eq .Values.type "NGINX" }} +--- +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: access-ingress + namespace: {{ .Release.Namespace }} + labels: + weblogic.resourceVersion: domain-v2 +{{- if eq .Values.sslType "SSL" }} + annotations: + nginx.ingress.kubernetes.io/proxy-buffer-size: "2000k" + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/enable-access-log: "false" + nginx.ingress.kubernetes.io/configuration-snippet: | + more_set_input_headers "X-Forwarded-Proto: https"; + more_clear_input_headers "WL-Proxy-Client-IP" "WL-Proxy-SSL"; + more_set_input_headers "WL-Proxy-SSL: true"; + nginx.ingress.kubernetes.io/ingress.allow-http: "false" +{{- end }} +spec: + rules: + {{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }} + - http: + paths: + - path: /console + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /rreg/rreg + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /em + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /oamconsole + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /dms + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /oam/services/rest + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /iam/admin/config + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /iam/admin/diag + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /iam/access + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oamClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oamManagedServerPort }} + - path: /oam/admin/api + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /oam/services/rest/access/api + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oamClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oamManagedServerPort }} + - path: /access + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.policyClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.policyManagedServerPort }} + - path: / + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oamClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oamManagedServerPort }} +{{- else }} + - http: + paths: + - path: /console + backend: + serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + servicePort: {{ .Values.wlsDomain.adminServerPort }} + - path: /rreg/rreg + backend: + serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + servicePort: {{ .Values.wlsDomain.adminServerPort }} + - path: /em + backend: + serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + servicePort: {{ .Values.wlsDomain.adminServerPort }} + - path: /oamconsole + backend: + serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + servicePort: {{ .Values.wlsDomain.adminServerPort }} + - path: /dms + backend: + serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + servicePort: {{ .Values.wlsDomain.adminServerPort }} + - path: /oam/services/rest + backend: + serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + servicePort: {{ .Values.wlsDomain.adminServerPort }} + - path: /iam/admin/config + backend: + serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + servicePort: {{ .Values.wlsDomain.adminServerPort }} + - path: /iam/admin/diag + backend: + serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + servicePort: {{ .Values.wlsDomain.adminServerPort }} + - path: /iam/access + backend: + serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oamClusterName | lower | replace "_" "-" }}' + servicePort: {{ .Values.wlsDomain.oamManagedServerPort }} + - path: /oam/admin/api + backend: + serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + servicePort: {{ .Values.wlsDomain.adminServerPort }} + - path: /oam/services/rest/access/api + backend: + serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oamClusterName | lower | replace "_" "-" }}' + servicePort: {{ .Values.wlsDomain.oamManagedServerPort }} + - path: /access + backend: + serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.policyClusterName | lower | replace "_" "-" }}' + servicePort: {{ .Values.wlsDomain.policyManagedServerPort }} + - path: / + backend: + serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oamClusterName | lower | replace "_" "-" }}' + servicePort: {{ .Values.wlsDomain.oamManagedServerPort }} + +{{- end }} +{{- end }} diff --git a/OracleAccessManagement/kubernetes/charts/ingress-per-domain/values.yaml b/OracleAccessManagement/kubernetes/charts/ingress-per-domain/values.yaml new file mode 100755 index 000000000..2ecd64f08 --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/ingress-per-domain/values.yaml @@ -0,0 +1,27 @@ +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# +# Default values for ingress-per-domain. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +# +# Load balancer type. Supported values are: NGINX +type: NGINX + +# Type of Configuration Supported Values are : NONSSL and SSL +sslType: SSL + +#WLS domain as backend to the load balancer +wlsDomain: + domainUID: accessinfra + adminServerName: AdminServer + adminServerPort: 7001 + adminServerSSLPort: + oamClusterName: oam_cluster + oamManagedServerPort: 14100 + oamManagedServerSSLPort: + policyClusterName: policy_cluster + policyManagedServerPort: 15100 + policyManagedServerSSLPort: + diff --git a/OracleAccessManagement/kubernetes/charts/traefik/values.yaml b/OracleAccessManagement/kubernetes/charts/traefik/values.yaml new file mode 100755 index 000000000..e94bf24f2 --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/traefik/values.yaml @@ -0,0 +1,52 @@ +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +image: + name: traefik + tag: 2.2.8 + pullPolicy: IfNotPresent +ingressRoute: + dashboard: + enabled: true + # Additional ingressRoute annotations (e.g. for kubernetes.io/ingress.class) + annotations: {} + # Additional ingressRoute labels (e.g. for filtering IngressRoute by custom labels) + labels: {} +providers: + kubernetesCRD: + enabled: true + kubernetesIngress: + enabled: true + # IP used for Kubernetes Ingress endpoints +ports: + traefik: + port: 9000 + expose: true + # The exposed port for this service + exposedPort: 9000 + # The port protocol (TCP/UDP) + protocol: TCP + web: + port: 8000 + # hostPort: 8000 + expose: true + exposedPort: 30305 + nodePort: 30305 + # The port protocol (TCP/UDP) + protocol: TCP + # Use nodeport if set. This is useful if you have configured Traefik in a + # LoadBalancer + # nodePort: 32080 + # Port Redirections + # Added in 2.2, you can make permanent redirects via entrypoints. + # https://docs.traefik.io/routing/entrypoints/#redirection + # redirectTo: websecure + websecure: + port: 8443 +# # hostPort: 8443 + expose: true + exposedPort: 30443 + # The port protocol (TCP/UDP) + protocol: TCP + nodePort: 30443 + diff --git a/OracleAccessManagement/kubernetes/charts/weblogic-operator/.helmignore b/OracleAccessManagement/kubernetes/charts/weblogic-operator/.helmignore new file mode 100755 index 000000000..1397cc19f --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/weblogic-operator/.helmignore @@ -0,0 +1,12 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +.git/ +.gitignore +*.bak +*.tmp +*.orig +*~ +.project +.idea/ diff --git a/OracleAccessManagement/kubernetes/charts/weblogic-operator/Chart.yaml b/OracleAccessManagement/kubernetes/charts/weblogic-operator/Chart.yaml new file mode 100755 index 000000000..b5cac770e --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/weblogic-operator/Chart.yaml @@ -0,0 +1,10 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +apiVersion: v1 +name: weblogic-operator +description: Helm chart for configuring the WebLogic operator. + +type: application +version: 3.3.0 +appVersion: 3.3.0 diff --git a/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_domain-namespaces.tpl b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_domain-namespaces.tpl new file mode 100755 index 000000000..08988c28d --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_domain-namespaces.tpl @@ -0,0 +1,134 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{- define "operator.domainNamespaces" }} +{{- if (or (eq (default "List" .domainNamespaceSelectionStrategy) "Dedicated") (and .dedicated (eq (default "List" .domainNamespaceSelectionStrategy) "List"))) }} +{{- $args := include "utils.cloneDictionary" . | fromYaml -}} +{{- $key := .Release.Namespace -}} +{{- $ignore := set $args "domainNamespace" $key -}} +{{- include "operator.operatorRoleBindingNamespace" $args -}} +{{- else if eq (default "List" .domainNamespaceSelectionStrategy) "List" }} +{{- $args := include "utils.cloneDictionary" . | fromYaml -}} +{{- range $key := $args.domainNamespaces -}} +{{- $ignore := set $args "domainNamespace" $key -}} +{{- include "operator.operatorRoleBindingNamespace" $args -}} +{{- end }} +{{- else if eq .domainNamespaceSelectionStrategy "LabelSelector" }} +{{- $args := include "utils.cloneDictionary" . | fromYaml -}} +{{- /* + Split terms on commas not contained in parentheses. Unfortunately, the regular expression + support included with Helm templates does not include lookarounds. +*/ -}} +{{- $working := dict "rejected" (list) "terms" (list $args.domainNamespaceLabelSelector) }} +{{- if contains "," $args.domainNamespaceLabelSelector }} +{{- $cs := regexSplit "," $args.domainNamespaceLabelSelector -1 }} +{{- $ignore := set $working "st" (list) }} +{{- $ignore := set $working "item" "" }} +{{- range $c := $cs }} +{{- if and (contains "(" $c) (not (contains ")" $c)) }} +{{- $ignore := set $working "item" (print $working.item $c) }} +{{- else if not (eq $working.item "") }} +{{- $ignore := set $working "st" (append $working.st (print $working.item "," $c)) }} +{{- if contains ")" $c }} +{{- $ignore := set $working "item" "" }} +{{- end }} +{{- else }} +{{- $ignore := set $working "st" (append $working.st $c) }} +{{- end }} +{{- end }} +{{- $ignore := set $working "terms" $working.st }} +{{- end }} +{{- $namespaces := (lookup "v1" "Namespace" "" "").items }} +{{- range $t := $working.terms }} +{{- $term := trim $t }} +{{- range $index, $namespace := $namespaces }} +{{- /* + Label selector patterns + Equality-based: =, ==, != + Set-based: x in (a, b), x notin (a, b) + Existence: x, !x +*/ -}} +{{- if not $namespace.metadata.labels }} +{{- $ignore := set $namespace.metadata "labels" (dict) }} +{{- end }} +{{- if hasPrefix "!" $term }} +{{- if hasKey $namespace.metadata.labels (trimPrefix "!" $term) }} +{{- $ignore := set $working "rejected" (append $working.rejected $namespace.metadata.name) }} +{{- end }} +{{- else if contains "!=" $term }} +{{- $split := regexSplit "!=" $term 2 }} +{{- $key := nospace (first $split) }} +{{- if hasKey $namespace.metadata.labels $key }} +{{- if eq (last $split | nospace) (get $namespace.metadata.labels $key) }} +{{- $ignore := set $working "rejected" (append $working.rejected $namespace.metadata.name) }} +{{- end }} +{{- end }} +{{- else if contains "==" $term }} +{{- $split := regexSplit "==" $term 2 }} +{{- $key := nospace (first $split) }} +{{- if or (not (hasKey $namespace.metadata.labels $key)) (not (eq (last $split | nospace) (get $namespace.metadata.labels $key))) }} +{{- $ignore := set $working "rejected" (append $working.rejected $namespace.metadata.name) }} +{{- end }} +{{- else if contains "=" $term }} +{{- $split := regexSplit "=" $term 2 }} +{{- $key := nospace (first $split) }} +{{- if or (not (hasKey $namespace.metadata.labels $key)) (not (eq (last $split | nospace) (get $namespace.metadata.labels $key))) }} +{{- $ignore := set $working "rejected" (append $working.rejected $namespace.metadata.name) }} +{{- end }} +{{- else if contains " notin " $term }} +{{- $split := regexSplit " notin " $term 2 }} +{{- $key := nospace (first $split) }} +{{- if hasKey $namespace.metadata.labels $key }} +{{- $second := nospace (last $split) }} +{{- $parenContents := substr 1 (int (sub (len $second) 1)) $second }} +{{- $values := regexSplit "," $parenContents -1 }} +{{- range $value := $values }} +{{- if eq ($value | nospace) (get $namespace.metadata.labels $key) }} +{{- $ignore := set $working "rejected" (append $working.rejected $namespace.metadata.name) }} +{{- end }} +{{- end }} +{{- end }} +{{- else if contains " in " $term }} +{{- $split := regexSplit " in " $term 2 }} +{{- $key := nospace (first $split) }} +{{- if not (hasKey $namespace.metadata.labels $key) }} +{{- $ignore := set $working "rejected" (append $working.rejected $namespace.metadata.name) }} +{{- else }} +{{- $second := nospace (last $split) }} +{{- $parenContents := substr 1 (int (sub (len $second) 1)) $second }} +{{- $values := regexSplit "," $parenContents -1 }} +{{- $ignore := set $working "found" false }} +{{- range $value := $values }} +{{- if eq ($value | nospace) (get $namespace.metadata.labels $key) }} +{{- $ignore := set $working "found" true }} +{{- end }} +{{- end }} +{{- if not $working.found }} +{{- $ignore := set $working "rejected" (append $working.rejected $namespace.metadata.name) }} +{{- end }} +{{- end }} +{{- else }} +{{- if not (hasKey $namespace.metadata.labels $term) }} +{{- $ignore := set $working "rejected" (append $working.rejected $namespace.metadata.name) }} +{{- end }} +{{- end }} +{{- end }} +{{- end }} +{{- range $index, $namespace := $namespaces }} +{{- $key := $namespace.metadata.name -}} +{{- if not (has $key $working.rejected) }} +{{- $ignore := set $args "domainNamespace" $key -}} +{{- include "operator.operatorRoleBindingNamespace" $args -}} +{{- end }} +{{- end }} +{{- else if eq .domainNamespaceSelectionStrategy "RegExp" }} +{{- $args := include "utils.cloneDictionary" . | fromYaml -}} +{{- range $index, $namespace := (lookup "v1" "Namespace" "" "").items }} +{{- if regexMatch $args.domainNamespaceRegExp $namespace.metadata.name }} +{{- $key := $namespace.metadata.name -}} +{{- $ignore := set $args "domainNamespace" $key -}} +{{- include "operator.operatorRoleBindingNamespace" $args -}} +{{- end }} +{{- end }} +{{- end }} +{{- end }} diff --git a/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-clusterrole-domain-admin.tpl b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-clusterrole-domain-admin.tpl new file mode 100755 index 000000000..94cab9df7 --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-clusterrole-domain-admin.tpl @@ -0,0 +1,40 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{- define "operator.operatorClusterRoleDomainAdmin" }} +--- +{{- if (or (eq (default "List" .domainNamespaceSelectionStrategy) "Dedicated") (and .dedicated (eq (default "List" .domainNamespaceSelectionStrategy) "List"))) }} +kind: "Role" +{{- else }} +kind: "ClusterRole" +{{- end }} +apiVersion: "rbac.authorization.k8s.io/v1" +metadata: + {{- if (or (eq (default "List" .domainNamespaceSelectionStrategy) "Dedicated") (and .dedicated (eq (default "List" .domainNamespaceSelectionStrategy) "List"))) }} + name: "weblogic-operator-role-domain-admin" + namespace: {{ .Release.Namespace | quote }} + {{- else }} + name: {{ list .Release.Namespace "weblogic-operator-clusterrole-domain-admin" | join "-" | quote }} + {{- end }} + labels: + weblogic.operatorName: {{ .Release.Namespace | quote }} +rules: +- apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete", "deletecollection"] +- apiGroups: [""] + resources: ["secrets", "pods", "events"] + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: ["pods/log"] + verbs: ["get", "list"] +- apiGroups: [""] + resources: ["pods/exec"] + verbs: ["get", "create"] +- apiGroups: ["weblogic.oracle"] + resources: ["domains"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete", "deletecollection"] +- apiGroups: ["weblogic.oracle"] + resources: ["domains/status"] + verbs: ["get", "watch"] +{{- end }} diff --git a/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-clusterrole-general.tpl b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-clusterrole-general.tpl new file mode 100755 index 000000000..2eba13b95 --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-clusterrole-general.tpl @@ -0,0 +1,39 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{- define "operator.operatorClusterRoleGeneral" }} +--- +{{- if (or (eq (default "List" .domainNamespaceSelectionStrategy) "Dedicated") (and .dedicated (eq (default "List" .domainNamespaceSelectionStrategy) "List"))) }} +kind: "Role" +{{- else }} +kind: "ClusterRole" +{{- end }} +apiVersion: "rbac.authorization.k8s.io/v1" +metadata: + {{- if (or (eq (default "List" .domainNamespaceSelectionStrategy) "Dedicated") (and .dedicated (eq (default "List" .domainNamespaceSelectionStrategy) "List"))) }} + name: "weblogic-operator-role-general" + namespace: {{ .Release.Namespace | quote }} + {{- else }} + name: {{ list .Release.Namespace "weblogic-operator-clusterrole-general" | join "-" | quote }} + {{- end }} + labels: + weblogic.operatorName: {{ .Release.Namespace | quote }} +rules: +{{- if not (or (eq (default "List" .domainNamespaceSelectionStrategy) "Dedicated") (and .dedicated (eq (default "List" .domainNamespaceSelectionStrategy) "List"))) }} +- apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "list", "watch"] +- apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +{{- end }} +- apiGroups: ["weblogic.oracle"] + resources: ["domains", "domains/status"] + verbs: ["get", "list", "watch", "update", "patch"] +- apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: ["create"] +- apiGroups: ["authorization.k8s.io"] + resources: ["selfsubjectrulesreviews"] + verbs: ["create"] +{{- end }} diff --git a/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-clusterrole-namespace.tpl b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-clusterrole-namespace.tpl new file mode 100755 index 000000000..6310779bb --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-clusterrole-namespace.tpl @@ -0,0 +1,40 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{- define "operator.operatorClusterRoleNamespace" }} +--- +{{- if (or (eq (default "List" .domainNamespaceSelectionStrategy) "Dedicated") (and .dedicated (eq (default "List" .domainNamespaceSelectionStrategy) "List"))) }} +kind: "Role" +{{- else }} +kind: "ClusterRole" +{{- end }} +apiVersion: "rbac.authorization.k8s.io/v1" +metadata: + {{- if (or (eq (default "List" .domainNamespaceSelectionStrategy) "Dedicated") (and .dedicated (eq (default "List" .domainNamespaceSelectionStrategy) "List"))) }} + name: "weblogic-operator-role-namespace" + namespace: {{ .Release.Namespace | quote }} + {{- else }} + name: {{ list .Release.Namespace "weblogic-operator-clusterrole-namespace" | join "-" | quote }} + {{- end }} + labels: + weblogic.operatorName: {{ .Release.Namespace | quote }} +rules: +- apiGroups: [""] + resources: ["services", "configmaps", "pods", "events"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete", "deletecollection"] +- apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: ["pods/log"] + verbs: ["get", "list"] +- apiGroups: [""] + resources: ["pods/exec"] + verbs: ["get", "create"] +- apiGroups: ["batch"] + resources: ["jobs"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete", "deletecollection"] +- apiGroups: ["policy"] + resources: ["poddisruptionbudgets"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete", "deletecollection"] +{{- end }} diff --git a/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-clusterrole-nonresource.tpl b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-clusterrole-nonresource.tpl new file mode 100755 index 000000000..e3b6a2785 --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-clusterrole-nonresource.tpl @@ -0,0 +1,15 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{- define "operator.operatorClusterRoleNonResource" }} +--- +kind: "ClusterRole" +apiVersion: "rbac.authorization.k8s.io/v1" +metadata: + name: {{ list .Release.Namespace "weblogic-operator-clusterrole-nonresource" | join "-" | quote }} + labels: + weblogic.operatorName: {{ .Release.Namespace | quote }} +rules: +- nonResourceURLs: ["/version/*"] + verbs: ["get"] +{{- end }} diff --git a/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-clusterrole-operator-admin.tpl b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-clusterrole-operator-admin.tpl new file mode 100755 index 000000000..46faed184 --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-clusterrole-operator-admin.tpl @@ -0,0 +1,34 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{- define "operator.operatorClusterRoleOperatorAdmin" }} +--- +{{- if (or (eq (default "List" .domainNamespaceSelectionStrategy) "Dedicated") (and .dedicated (eq (default "List" .domainNamespaceSelectionStrategy) "List"))) }} +kind: "Role" +{{- else }} +kind: "ClusterRole" +{{- end }} +apiVersion: "rbac.authorization.k8s.io/v1" +metadata: + {{- if (or (eq (default "List" .domainNamespaceSelectionStrategy) "Dedicated") (and .dedicated (eq (default "List" .domainNamespaceSelectionStrategy) "List"))) }} + name: "weblogic-operator-role-operator-admin" + namespace: {{ .Release.Namespace | quote }} + {{- else }} + name: {{ list .Release.Namespace "weblogic-operator-clusterrole-operator-admin" | join "-" | quote }} + {{- end }} + labels: + weblogic.operatorName: {{ .Release.Namespace | quote }} +rules: +- apiGroups: [""] + resources: ["configmaps", "secrets"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete", "deletecollection"] +- apiGroups: [""] + resources: ["pods", "events"] + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: ["pods/log"] + verbs: ["get", "list"] +- apiGroups: [""] + resources: ["pods/exec"] + verbs: ["get", "create"] +{{- end }} diff --git a/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-clusterrolebinding-auth-delegator.tpl b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-clusterrolebinding-auth-delegator.tpl new file mode 100755 index 000000000..783f970e7 --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-clusterrolebinding-auth-delegator.tpl @@ -0,0 +1,30 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{- define "operator.clusterRoleBindingAuthDelegator" }} +--- +apiVersion: "rbac.authorization.k8s.io/v1" +{{- if (or (eq (default "List" .domainNamespaceSelectionStrategy) "Dedicated") (and .dedicated (eq (default "List" .domainNamespaceSelectionStrategy) "List"))) }} +kind: "RoleBinding" +{{- else }} +kind: "ClusterRoleBinding" +{{- end }} +metadata: + labels: + weblogic.operatorName: {{ .Release.Namespace | quote}} + {{- if (or (eq (default "List" .domainNamespaceSelectionStrategy) "Dedicated") (and .dedicated (eq (default "List" .domainNamespaceSelectionStrategy) "List"))) }} + name: "weblogic-operator-rolebinding-auth-delegator" + namespace: {{ .Release.Namespace | quote }} + {{- else }} + name: {{ list .Release.Namespace "weblogic-operator-clusterrolebinding-auth-delegator" | join "-" | quote }} + {{- end }} +roleRef: + apiGroup: "rbac.authorization.k8s.io" + kind: "ClusterRole" + name: "system:auth-delegator" +subjects: +- kind: "ServiceAccount" + apiGroup: "" + name: {{ .serviceAccount | quote }} + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-clusterrolebinding-discovery.tpl b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-clusterrolebinding-discovery.tpl new file mode 100755 index 000000000..48c505fa5 --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-clusterrolebinding-discovery.tpl @@ -0,0 +1,30 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{- define "operator.clusterRoleBindingDiscovery" }} +--- +apiVersion: "rbac.authorization.k8s.io/v1" +{{- if (or (eq (default "List" .domainNamespaceSelectionStrategy) "Dedicated") (and .dedicated (eq (default "List" .domainNamespaceSelectionStrategy) "List"))) }} +kind: "RoleBinding" +{{- else }} +kind: "ClusterRoleBinding" +{{- end }} +metadata: + labels: + weblogic.operatorName: {{ .Release.Namespace | quote }} + {{- if (or (eq (default "List" .domainNamespaceSelectionStrategy) "Dedicated") (and .dedicated (eq (default "List" .domainNamespaceSelectionStrategy) "List"))) }} + name: "weblogic-operator-rolebinding-discovery" + namespace: {{ .Release.Namespace | quote }} + {{- else }} + name: {{ list .Release.Namespace "weblogic-operator-clusterrolebinding-discovery" | join "-" | quote }} + {{- end }} +roleRef: + apiGroup: "rbac.authorization.k8s.io" + kind: "ClusterRole" + name: "system:discovery" +subjects: +- kind: "ServiceAccount" + apiGroup: "" + name: {{ .serviceAccount | quote }} + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-clusterrolebinding-general.tpl b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-clusterrolebinding-general.tpl new file mode 100755 index 000000000..f2994da33 --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-clusterrolebinding-general.tpl @@ -0,0 +1,35 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{- define "operator.clusterRoleBindingGeneral" }} +--- +apiVersion: "rbac.authorization.k8s.io/v1" +{{- if (or (eq (default "List" .domainNamespaceSelectionStrategy) "Dedicated") (and .dedicated (eq (default "List" .domainNamespaceSelectionStrategy) "List"))) }} +kind: "RoleBinding" +{{- else }} +kind: "ClusterRoleBinding" +{{- end }} +metadata: + labels: + weblogic.operatorName: {{ .Release.Namespace | quote }} + {{- if (or (eq (default "List" .domainNamespaceSelectionStrategy) "Dedicated") (and .dedicated (eq (default "List" .domainNamespaceSelectionStrategy) "List"))) }} + name: "weblogic-operator-rolebinding-general" + namespace: {{ .Release.Namespace | quote }} + {{- else }} + name: {{ list .Release.Namespace "weblogic-operator-clusterrolebinding-general" | join "-" | quote }} + {{- end }} +roleRef: + apiGroup: "rbac.authorization.k8s.io" + {{- if (or (eq (default "List" .domainNamespaceSelectionStrategy) "Dedicated") (and .dedicated (eq (default "List" .domainNamespaceSelectionStrategy) "List"))) }} + kind: "Role" + name: "weblogic-operator-role-general" + {{- else }} + kind: "ClusterRole" + name: {{ list .Release.Namespace "weblogic-operator-clusterrole-general" | join "-" | quote }} + {{- end }} +subjects: +- kind: "ServiceAccount" + apiGroup: "" + name: {{ .serviceAccount | quote }} + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-clusterrolebinding-nonresource.tpl b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-clusterrolebinding-nonresource.tpl new file mode 100755 index 000000000..d998ab0e9 --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-clusterrolebinding-nonresource.tpl @@ -0,0 +1,21 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{- define "operator.clusterRoleBindingNonResource" }} +--- +apiVersion: "rbac.authorization.k8s.io/v1" +kind: "ClusterRoleBinding" +metadata: + labels: + weblogic.operatorName: {{ .Release.Namespace | quote }} + name: {{ list .Release.Namespace "weblogic-operator-clusterrolebinding-nonresource" | join "-" | quote }} +roleRef: + apiGroup: "rbac.authorization.k8s.io" + kind: "ClusterRole" + name: {{ list .Release.Namespace "weblogic-operator-clusterrole-nonresource" | join "-" | quote }} +subjects: +- kind: "ServiceAccount" + apiGroup: "" + name: {{ .serviceAccount | quote }} + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-cm.tpl b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-cm.tpl new file mode 100755 index 000000000..dd6594de2 --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-cm.tpl @@ -0,0 +1,58 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{- define "operator.operatorConfigMap" }} +--- +apiVersion: "v1" +data: + {{- if .externalRestEnabled }} + {{- if (hasKey . "externalRestIdentitySecret") }} + externalRestIdentitySecret: {{ .externalRestIdentitySecret | quote }} + {{- else }} + externalOperatorCert: {{ .externalOperatorCert | quote }} + {{- end }} + {{- end }} + {{- $configmap := (lookup "v1" "ConfigMap" .Release.Namespace "weblogic-operator-cm") }} + {{- if (and $configmap $configmap.data) }} + {{- $internalOperatorCert := index $configmap.data "internalOperatorCert" }} + {{- if $internalOperatorCert }} + internalOperatorCert: {{ $internalOperatorCert }} + {{- end }} + {{- end }} + serviceaccount: {{ .serviceAccount | quote }} + domainNamespaceSelectionStrategy: {{ (default "List" .domainNamespaceSelectionStrategy) | quote }} + domainNamespaces: {{ .domainNamespaces | uniq | sortAlpha | join "," | quote }} + {{- if .dedicated }} + dedicated: {{ .dedicated | quote }} + {{- end }} + {{- if .domainNamespaceLabelSelector }} + domainNamespaceLabelSelector: {{ .domainNamespaceLabelSelector | quote }} + {{- end }} + {{- if .domainNamespaceRegExp }} + domainNamespaceRegExp: {{ .domainNamespaceRegExp | quote }} + {{- end }} + {{- if .dns1123Fields }} + dns1123Fields: {{ .dns1123Fields | quote }} + {{- end }} + {{- if .featureGates }} + featureGates: {{ .featureGates | quote }} + {{- end }} + {{- if .introspectorJobNameSuffix }} + introspectorJobNameSuffix: {{ .introspectorJobNameSuffix | quote }} + {{- end }} + {{- if .externalServiceNameSuffix }} + externalServiceNameSuffix: {{ .externalServiceNameSuffix | quote }} + {{- end }} + {{- if .clusterSizePaddingValidationEnabled }} + clusterSizePaddingValidationEnabled: {{ .clusterSizePaddingValidationEnabled | quote }} + {{- end }} + {{- if .tokenReviewAuthentication }} + tokenReviewAuthentication: {{ .tokenReviewAuthentication | quote }} + {{- end }} +kind: "ConfigMap" +metadata: + labels: + weblogic.operatorName: {{ .Release.Namespace | quote }} + name: "weblogic-operator-cm" + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-dep.tpl b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-dep.tpl new file mode 100755 index 000000000..3fadac7dc --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-dep.tpl @@ -0,0 +1,158 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{- define "operator.operatorDeployment" }} +--- +apiVersion: "apps/v1" +kind: "Deployment" +metadata: + name: "weblogic-operator" + namespace: {{ .Release.Namespace | quote }} + labels: + weblogic.operatorName: {{ .Release.Namespace | quote }} +spec: + strategy: + type: Recreate + selector: + matchLabels: + weblogic.operatorName: {{ .Release.Namespace | quote }} + replicas: 1 + template: + metadata: + {{- with .annotations }} + annotations: + {{- end }} + {{- range $key, $value := .annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + labels: + weblogic.operatorName: {{ .Release.Namespace | quote }} + app: "weblogic-operator" + {{- range $key, $value := .labels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + spec: + serviceAccountName: {{ .serviceAccount | quote }} + {{- with .nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: "weblogic-operator" + image: {{ .image | quote }} + imagePullPolicy: {{ .imagePullPolicy | quote }} + command: ["bash"] + args: ["/operator/operator.sh"] + env: + - name: "OPERATOR_NAMESPACE" + valueFrom: + fieldRef: + fieldPath: "metadata.namespace" + - name: "OPERATOR_POD_NAME" + valueFrom: + fieldRef: + fieldPath: "metadata.name" + - name: "OPERATOR_POD_UID" + valueFrom: + fieldRef: + fieldPath: "metadata.uid" + - name: "OPERATOR_VERBOSE" + value: "false" + - name: "JAVA_LOGGING_LEVEL" + value: {{ .javaLoggingLevel | quote }} + - name: "JAVA_LOGGING_MAXSIZE" + value: {{ .javaLoggingFileSizeLimit | default 20000000 | quote }} + - name: "JAVA_LOGGING_COUNT" + value: {{ .javaLoggingFileCount | default 10 | quote }} + {{- if .remoteDebugNodePortEnabled }} + - name: "REMOTE_DEBUG_PORT" + value: {{ .internalDebugHttpPort | quote }} + - name: "DEBUG_SUSPEND" + {{- if .suspendOnDebugStartup }} + value: "y" + {{- else }} + value: "n" + {{- end }} + {{- end }} + {{- if .mockWLS }} + - name: "MOCK_WLS" + value: "true" + {{- end }} + resources: + requests: + cpu: {{ .cpuRequests | default "250m" }} + memory: {{ .memoryRequests | default "512Mi" }} + limits: + {{- if .cpuLimits}} + cpu: {{ .cpuLimits }} + {{- end }} + {{- if .memoryLimits}} + memory: {{ .memoryLimits }} + {{- end }} + volumeMounts: + - name: "weblogic-operator-cm-volume" + mountPath: "/operator/config" + - name: "weblogic-operator-debug-cm-volume" + mountPath: "/operator/debug-config" + - name: "weblogic-operator-secrets-volume" + mountPath: "/operator/secrets" + readOnly: true + {{- if .elkIntegrationEnabled }} + - mountPath: "/logs" + name: "log-dir" + readOnly: false + {{- end }} + {{- if not .remoteDebugNodePortEnabled }} + livenessProbe: + exec: + command: + - "bash" + - "/operator/livenessProbe.sh" + initialDelaySeconds: 20 + periodSeconds: 5 + readinessProbe: + exec: + command: + - "bash" + - "/operator/readinessProbe.sh" + initialDelaySeconds: 2 + periodSeconds: 10 + {{- end }} + {{- if .elkIntegrationEnabled }} + - name: "logstash" + image: {{ .logStashImage | quote }} + args: [ "-f", "/logs/logstash.conf" ] + volumeMounts: + - name: "log-dir" + mountPath: "/logs" + env: + - name: "ELASTICSEARCH_HOST" + value: {{ .elasticSearchHost | quote }} + - name: "ELASTICSEARCH_PORT" + value: {{ .elasticSearchPort | quote }} + {{- end }} + {{- if .imagePullSecrets }} + imagePullSecrets: + {{ .imagePullSecrets | toYaml }} + {{- end }} + volumes: + - name: "weblogic-operator-cm-volume" + configMap: + name: "weblogic-operator-cm" + - name: "weblogic-operator-debug-cm-volume" + configMap: + name: "weblogic-operator-debug-cm" + optional: true + - name: "weblogic-operator-secrets-volume" + secret: + secretName: "weblogic-operator-secrets" + {{- if .elkIntegrationEnabled }} + - name: "log-dir" + emptyDir: + medium: "Memory" + {{- end }} +{{- end }} diff --git a/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-external-svc.tpl b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-external-svc.tpl new file mode 100755 index 000000000..44bfc1191 --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-external-svc.tpl @@ -0,0 +1,30 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{- define "operator.operatorExternalService" }} +{{- if or .externalRestEnabled .remoteDebugNodePortEnabled }} +--- +apiVersion: "v1" +kind: "Service" +metadata: + name: "external-weblogic-operator-svc" + namespace: {{ .Release.Namespace | quote }} + labels: + weblogic.operatorName: {{ .Release.Namespace | quote }} +spec: + type: "NodePort" + selector: + app: "weblogic-operator" + ports: + {{- if .externalRestEnabled }} + - name: "rest" + port: 8081 + nodePort: {{ .externalRestHttpsPort }} + {{- end }} + {{- if .remoteDebugNodePortEnabled }} + - name: "debug" + port: {{ .internalDebugHttpPort }} + nodePort: {{ .externalDebugHttpPort }} + {{- end }} +{{- end }} +{{- end }} diff --git a/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-internal-svc.tpl b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-internal-svc.tpl new file mode 100755 index 000000000..0108738de --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-internal-svc.tpl @@ -0,0 +1,20 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{- define "operator.operatorInternalService" }} +--- +apiVersion: "v1" +kind: "Service" +metadata: + name: "internal-weblogic-operator-svc" + namespace: {{ .Release.Namespace | quote }} + labels: + weblogic.operatorName: {{ .Release.Namespace | quote }} +spec: + type: "ClusterIP" + selector: + app: "weblogic-operator" + ports: + - port: 8082 + name: "rest" +{{- end }} diff --git a/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-role.tpl b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-role.tpl new file mode 100755 index 000000000..e0c386b98 --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-role.tpl @@ -0,0 +1,17 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{- define "operator.operatorRole" }} +--- +kind: "Role" +apiVersion: "rbac.authorization.k8s.io/v1" +metadata: + name: "weblogic-operator-role" + namespace: {{ .Release.Namespace | quote }} + labels: + weblogic.operatorName: {{ .Release.Namespace | quote }} +rules: +- apiGroups: [""] + resources: ["events", "secrets", "configmaps"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete", "deletecollection"] +{{- end }} diff --git a/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-rolebinding-namespace.tpl b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-rolebinding-namespace.tpl new file mode 100755 index 000000000..d55ed3f47 --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-rolebinding-namespace.tpl @@ -0,0 +1,35 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{- define "operator.operatorRoleBindingNamespace" }} +--- +{{- if .enableClusterRoleBinding }} +kind: "ClusterRoleBinding" +{{- else }} +kind: "RoleBinding" +{{- end }} +apiVersion: "rbac.authorization.k8s.io/v1" +metadata: + {{- if .enableClusterRoleBinding }} + name: {{ list .Release.Namespace "weblogic-operator-clusterrolebinding-namespace" | join "-" | quote }} + {{- else }} + name: "weblogic-operator-rolebinding-namespace" + namespace: {{ .domainNamespace | quote }} + {{- end }} + labels: + weblogic.operatorName: {{ .Release.Namespace | quote }} +subjects: +- kind: "ServiceAccount" + name: {{ .serviceAccount | quote }} + namespace: {{ .Release.Namespace | quote }} + apiGroup: "" +roleRef: + {{- if (or (eq (default "List" .domainNamespaceSelectionStrategy) "Dedicated") (and .dedicated (eq (default "List" .domainNamespaceSelectionStrategy) "List"))) }} + kind: "Role" + name: "weblogic-operator-role-namespace" + {{- else }} + kind: "ClusterRole" + name: {{ list .Release.Namespace "weblogic-operator-clusterrole-namespace" | join "-" | quote }} + {{- end }} + apiGroup: "rbac.authorization.k8s.io" +{{- end }} diff --git a/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-rolebinding.tpl b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-rolebinding.tpl new file mode 100755 index 000000000..98a09424e --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-rolebinding.tpl @@ -0,0 +1,22 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{- define "operator.operatorRoleBinding" }} +--- +kind: "RoleBinding" +apiVersion: "rbac.authorization.k8s.io/v1" +metadata: + name: "weblogic-operator-rolebinding" + namespace: {{ .Release.Namespace | quote }} + labels: + weblogic.operatorName: {{ .Release.Namespace | quote }} +subjects: +- kind: "ServiceAccount" + name: {{ .serviceAccount | quote }} + namespace: {{ .Release.Namespace | quote }} + apiGroup: "" +roleRef: + kind: "Role" + name: "weblogic-operator-role" + apiGroup: "rbac.authorization.k8s.io" +{{- end }} diff --git a/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-secret.tpl b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-secret.tpl new file mode 100755 index 000000000..6a7442718 --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-secret.tpl @@ -0,0 +1,25 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{- define "operator.operatorSecrets" }} +--- +apiVersion: "v1" +kind: "Secret" +data: + {{- if (and .externalRestEnabled (hasKey . "externalOperatorKey")) }} + externalOperatorKey: {{ .externalOperatorKey | quote }} + {{- end }} + {{- $secret := (lookup "v1" "Secret" .Release.Namespace "weblogic-operator-secrets") }} + {{- if (and $secret $secret.data) }} + {{- $internalOperatorKey := index $secret.data "internalOperatorKey" }} + {{- if $internalOperatorKey }} + internalOperatorKey: {{ $internalOperatorKey }} + {{- end }} + {{- end }} +metadata: + labels: + weblogic.operatorName: {{ .Release.Namespace | quote }} + name: "weblogic-operator-secrets" + namespace: {{ .Release.Namespace | quote }} +type: "Opaque" +{{- end }} diff --git a/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator.tpl b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator.tpl new file mode 100755 index 000000000..c24d7eebf --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator.tpl @@ -0,0 +1,30 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{- define "operator.operator" -}} +{{- include "operator.operatorClusterRoleGeneral" . }} +{{- include "operator.operatorClusterRoleNamespace" . }} +{{- if not (or (eq (default "List" .domainNamespaceSelectionStrategy) "Dedicated") (and .dedicated (eq (default "List" .domainNamespaceSelectionStrategy) "List"))) }} +{{- include "operator.operatorClusterRoleNonResource" . }} +{{- end }} +{{- include "operator.operatorClusterRoleOperatorAdmin" . }} +{{- include "operator.operatorClusterRoleDomainAdmin" . }} +{{- include "operator.clusterRoleBindingGeneral" . }} +{{- include "operator.clusterRoleBindingAuthDelegator" . }} +{{- include "operator.clusterRoleBindingDiscovery" . }} +{{- if not (or (eq (default "List" .domainNamespaceSelectionStrategy) "Dedicated") (and .dedicated (eq (default "List" .domainNamespaceSelectionStrategy) "List"))) }} +{{- include "operator.clusterRoleBindingNonResource" . }} +{{- end }} +{{- include "operator.operatorRole" . }} +{{- include "operator.operatorRoleBinding" . }} +{{- include "operator.operatorConfigMap" . }} +{{- include "operator.operatorSecrets" . }} +{{- include "operator.operatorDeployment" . }} +{{- include "operator.operatorInternalService" . }} +{{- include "operator.operatorExternalService" . }} +{{- if .enableClusterRoleBinding }} +{{- include "operator.operatorRoleBindingNamespace" . }} +{{- else }} +{{- include "operator.domainNamespaces" . }} +{{- end }} +{{- end }} diff --git a/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_utils.tpl b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_utils.tpl new file mode 100755 index 000000000..9f2ed825c --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_utils.tpl @@ -0,0 +1,493 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{/* +Start validation +*/}} +{{- define "utils.startValidation" -}} +{{- $scope := . -}} +{{- $context := dict "scope" $scope "path" list -}} +{{- $stack := list $context -}} +{{- $ignore := set $scope "validationContextStack" $stack -}} +{{- $ignore := include "utils.setCurrentValidationContext" $scope -}} +{{- end -}} + +{{/* +End validation +If there were any validation errors, report them and kill the helm chart installation. +*/}} +{{- define "utils.endValidation" -}} +{{- $scope := . -}} +{{- if hasKey $scope "validationErrors" -}} +{{- fail $scope.validationErrors -}} +{{- end -}} +{{- end -}} + +{{/* +Push a new validation context +*/}} +{{- define "utils.pushValidationContext" -}} +{{- $scope := index . 0 }} +{{- $scopeName := index . 1 }} +{{- $newScope := index $scope.validationScope $scopeName -}} +{{- $newPath := append $scope.validationPath $scopeName -}} +{{- $newContext := dict "scope" $newScope "path" $newPath -}} +{{- $newStack := append $scope.validationContextStack $newContext -}} +{{- $ignore := set $scope "validationContextStack" $newStack -}} +{{- $ignore := include "utils.setCurrentValidationContext" $scope -}} +{{- end -}} + +{{/* +Pop the validation context +*/}} +{{- define "utils.popValidationContext" -}} +{{- $scope := . }} +{{- $stack := $scope.validationContextStack -}} +{{- $ignore := set $scope "validationContextStack" (initial $stack) -}} +{{- $ignore := include "utils.setCurrentValidationContext" $scope -}} +{{- end -}} + +{{/* +Set the current validation context from the stack +*/}} +{{- define "utils.setCurrentValidationContext" -}} +{{- $scope := . }} +{{- $context := $scope.validationContextStack | last -}} +{{- $ignore := set $scope "validationScope" (index $context "scope") -}} +{{- $ignore := set $scope "validationPath" (index $context "path") -}} +{{- end -}} + +{{/* +Record a validation error (it will get reported later by utils.reportValidationErrors) +*/}} +{{- define "utils.recordValidationError" -}} +{{- $scope := index . 0 -}} +{{- $errorMsg := index . 1 -}} +{{- $path := $scope.validationPath -}} +{{- $pathStr := $path | join "." | trim -}} +{{- $scopedErrorMsg := (list "\n" $pathStr $errorMsg) | compact | join " " -}} +{{- if hasKey $scope "validationErrors" -}} +{{- $newValidationErrors := cat $scope.validationErrors $scopedErrorMsg -}} +{{- $ignore := set $scope "validationErrors" $newValidationErrors -}} +{{- else -}} +{{- $newValidationErrors := $scopedErrorMsg -}} +{{- $ignore := set $scope "validationErrors" $newValidationErrors -}} +{{- end -}} +{{- end -}} + +{{/* +Returns whether any errors have been reported +*/}} +{{- define "utils.haveValidationErrors" -}} +{{- if hasKey . "validationErrors" -}} + true +{{- end -}} +{{- end -}} + +{{/* +Determine whether a dictionary has a non-null value for a key +*/}} +{{- define "utils.dictionaryHasNonNullValue" -}} +{{- $dict := index . 0 -}} +{{- $name := index . 1 -}} +{{- if and (hasKey $dict $name) (not ( eq (typeOf (index $dict $name)) "" )) -}} + true +{{- end -}} +{{- end -}} + +{{/* +Verify that a value of a specific kind has been specified. +*/}} +{{- define "utils.verifyValue" -}} +{{- $requiredKind := index . 0 -}} +{{- $scope := index . 1 -}} +{{- $name := index . 2 -}} +{{- $isRequired := index . 3 -}} +{{- if $scope.trace -}} +{{- $errorMsg := cat "TRACE" $name $requiredKind $isRequired -}} +{{- $ignore := include "utils.recordValidationError" (list $scope $errorMsg) -}} +{{- end -}} +{{- $parent := $scope.validationScope -}} +{{- if include "utils.dictionaryHasNonNullValue" (list $parent $name) -}} +{{- $value := index $parent $name -}} +{{- $actualKind := kindOf $value -}} +{{- if eq $requiredKind $actualKind -}} + true +{{- else -}} +{{- $errorMsg := cat $name "must be a" $requiredKind ":" $actualKind -}} +{{- include "utils.recordValidationError" (list $scope $errorMsg) -}} +{{- end -}} +{{- else -}} +{{- if $isRequired -}} +{{- $errorMsg := cat $requiredKind $name "must be specified" -}} +{{- include "utils.recordValidationError" (list $scope $errorMsg) -}} +{{- else -}} + true +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Verify that a list value has been specified +*/}} +{{- define "utils.verifyListValue" -}} +{{- $requiredKind := index . 0 -}} +{{- $scope := index . 1 -}} +{{- $name := index . 2 -}} +{{- $isRequired := index . 3 -}} +{{- $parent := $scope.validationScope -}} +{{- $args := . -}} +{{- if include "utils.verifyValue" (list "slice" $scope $name $isRequired) -}} +{{- $status := dict -}} +{{- if hasKey $parent $name -}} +{{- $list := index $parent $name -}} +{{- range $value := $list -}} +{{- $actualKind := kindOf $value -}} +{{- if not (eq $requiredKind $actualKind) -}} +{{- $errorMsg := cat $name "must only contain" $requiredKind "elements:" $actualKind -}} +{{- include "utils.recordValidationError" (list $scope $errorMsg) -}} +{{- $ignore := set $status "error" true -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- if not (hasKey $status "error") -}} + true +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Verify a string value +*/}} +{{- define "utils.baseVerifyString" -}} +{{- include "utils.verifyValue" (prepend . "string") -}} +{{- end -}} + +{{/* +Verify a required string value +*/}} +{{- define "utils.verifyString" -}} +{{- include "utils.baseVerifyString" (append . true) -}} +{{- end -}} + +{{/* +Verify an optional string value +*/}} +{{- define "utils.verifyOptionalString" -}} +{{- include "utils.baseVerifyString" (append . false) -}} +{{- end -}} + +{{/* +Verify a boolean value +*/}} +{{- define "utils.baseVerifyBoolean" -}} +{{- include "utils.verifyValue" (prepend . "bool") -}} +{{- end -}} + +{{/* +Verify a required boolean value +*/}} +{{- define "utils.verifyBoolean" -}} +{{- include "utils.baseVerifyBoolean" (append . true) -}} +{{- end -}} + +{{/* +Verify an optional boolean value +*/}} +{{- define "utils.verifyOptionalBoolean" -}} +{{- include "utils.baseVerifyBoolean" (append . false) -}} +{{- end -}} + +{{/* +Verify an integer value +*/}} +{{- define "utils.baseVerifyInteger" -}} +{{- include "utils.verifyValue" (prepend . "float64") -}} +{{- end -}} + +{{/* +Verify a required integer value +*/}} +{{- define "utils.verifyInteger" -}} +{{- include "utils.baseVerifyInteger" (append . true) -}} +{{- end -}} + +{{/* +Verify an optional required integer value +*/}} +{{- define "utils.verifyOptionalInteger" -}} +{{- include "utils.baseVerifyInteger" (append . false) -}} +{{- end -}} + +{{/* +Verify a dictionary value +*/}} +{{- define "utils.baseVerifyDictionary" -}} +{{- include "utils.verifyValue" (prepend . "map") -}} +{{- end -}} + +{{/* +Verify a required dictionary value +*/}} +{{- define "utils.verifyDictionary" -}} +{{- include "utils.baseVerifyDictionary" (append . true) -}} +{{- end -}} + +{{/* +Verify an optional dictionary value +*/}} +{{- define "utils.verifyOptionalDictionary" -}} +{{- include "utils.baseVerifyDictionary" (append . false) -}} +{{- end -}} + +{{/* +Verify a enum string value +*/}} +{{- define "utils.baseVerifyEnum" -}} +{{- $scope := index . 0 -}} +{{- $name := index . 1 -}} +{{- $legalValues := index . 2 -}} +{{- $isRequired := index . 3 -}} +{{- if include "utils.baseVerifyString" (list $scope $name $isRequired) -}} +{{- $parent := $scope.validationScope -}} +{{- if include "utils.dictionaryHasNonNullValue" (list $parent $name) -}} +{{- $value := index $parent $name -}} +{{- if has $value $legalValues -}} + true +{{- else -}} +{{ $errorMsg := cat $name "must be one of the following values" $legalValues ":" $value -}} +{{- include "utils.recordValidationError" (list $scope $errorMsg) -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Verify a required enum string value +*/}} +{{- define "utils.verifyEnum" -}} +{{- include "utils.baseVerifyEnum" (append . true) -}} +{{- end -}} + +{{/* +Verify an optional enum string value +*/}} +{{- define "utils.verifyOptionalEnum" -}} +{{- include "utils.baseVerifyEnum" (append . false) -}} +{{- end -}} + +{{/* +Verify a kubernetes resource name string value +*/}} +{{- define "utils.baseVerifyResourceName" -}} +{{/* https://kubernetes.io/docs/concepts/overview/working-with-objects/names */}} +{{/* names: only lower case, numbers, dot, dash, max 253 */}} +{{/* https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set */}} +{{/* labels/selectors - upper & lower case, numbers, dot, dash, underscore, max 63 */}} +{{- $scope := index . 0 -}} +{{- $name := index . 1 -}} +{{- $max := index . 2 -}} +{{- $isRequired := index . 3 -}} +{{- if include "utils.baseVerifyString" (list $scope $name $isRequired) -}} +{{- $parent := $scope.validationScope -}} +{{- if include "utils.dictionaryHasNonNullValue" (list $parent $name) -}} +{{- $value := index $parent $name -}} +{{- $len := len $value -}} +{{- if and (le $len $max) (regexMatch "^[a-z0-9.-]+$" $value) -}} + true +{{- else -}} +{{- $errorMsg := cat $name "must only contain lower case letters, numbers, dashes and dots, and must not contain more than" $max "characters: " $value -}} +{{- include "utils.recordValidationError" (list $scope $errorMsg) -}} +{{- end -}} +{{- end -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* +Verify a required kubernetes resource name string value +*/}} +{{- define "utils.verifyResourceName" -}} +{{- include "utils.baseVerifyResourceName" (append . true) -}} +{{- end -}} + +{{/* +Verify an optional kubernetes resource name string value +*/}} +{{- define "utils.verifyOptionalResourceName" -}} +{{- include "utils.baseVerifyResourceName" (append . false) -}} +{{- end -}} + +{{/* +Verify external service name suffix string value +*/}} +{{- define "utils.verifyExternalServiceNameSuffix" -}} +{{- include "utils.baseVerifyResourceName" (append . false) -}} +{{- end -}} + +{{/* +Verify introspector job name suffix string value +*/}} +{{- define "utils.verifyIntrospectorJobNameSuffix" -}} +{{- include "utils.baseVerifyResourceName" (append . false) -}} +{{- end -}} + +{{/* +Verify a list of strings value +*/}} +{{- define "utils.baseVerifyStringList" -}} +{{- include "utils.verifyListValue" (prepend . "string") -}} +{{- end -}} + +{{/* +Verify a required list of strings value +*/}} +{{- define "utils.verifyStringList" -}} +{{- include "utils.baseVerifyStringList" (append . true) -}} +{{- end -}} + +{{/* +Verify an optional list of strings value +*/}} +{{- define "utils.verifyOptionalStringList" -}} +{{- include "utils.baseVerifyStringList" (append . false) -}} +{{- end -}} + +{{/* +Verify a list of dictionaries value +*/}} +{{- define "utils.baseVerifyDictionaryList" -}} +{{- include "utils.verifyListValue" (prepend . "map") -}} +{{- end -}} + +{{/* +Verify a required list of dictionaries value +*/}} +{{- define "utils.verifyDictionaryList" -}} +{{- include "utils.baseVerifyDictionaryList" (append . true) -}} +{{- end -}} + +{{/* +Verify an optional list of dictionaries value +*/}} +{{- define "utils.verifyOptionalDictionaryList" -}} +{{- include "utils.baseVerifyDictionaryList" (append . false) -}} +{{- end -}} + +{{/* +Merge a set of dictionaries into a single dictionary. + +The scope must be a list of dictionaries, starting with the least specific +and ending with the most specific. + +First it makes an empty destinaction dictionary, then iterates over the dictionaries, +overlaying their values on the destination dictionary. + +If a value is null, then it removes that key from the destination dictionary. + +If the value is already present in the destination dictionary, and the old and +new values are both dictionaries, it merges them into the destination. +*/}} +{{- define "utils.mergeDictionaries" -}} +{{- $dest := dict -}} +{{- range $src := . -}} +{{- if not (empty $src) -}} +{{- range $key, $value := $src -}} +{{- $ignore := include "utils.mergeDictionaryValue" (list $dest $key $value) -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- toYaml $dest -}} +{{- end -}} + +{{/* +Merge a value into a dictionary. +This is like helm's 'merge' function, except that it handles null entries too. +*/}} +{{- define "utils.mergeDictionaryValue" -}} +{{- $dest := index . 0 -}} +{{- $key := index . 1 -}} +{{- $newValue := index . 2 -}} +{{- $newType := typeOf $newValue -}} +{{- if hasKey $dest $key -}} +{{- if eq $newType "" -}} +{{/* # if the value already existed, and the new value is null, remove the old value */}} +{{- $ignore := unset $dest $key -}} +{{- else -}} +{{- $oldValue := index $dest $key -}} +{{- $oldKind := kindOf $oldValue -}} +{{- $newKind := kindOf $newValue -}} +{{- if (and (eq $oldKind "map") (eq $newKind "map")) -}} +{{/* # if both values are maps, merge them */}} +{{- $merged := include "utils.mergeDictionaries" (list $oldValue $newValue) | fromYaml -}} +{{- $ignore := set $dest $key $merged -}} +{{- else -}} +{{/* # replace the old value with the new one */}} +{{- $ignore := set $dest $key $newValue -}} +{{- end -}} +{{- end -}} +{{- else -}} +{{- if not (eq $newType "") -}} +{{/* #if there was no old value, and the new value isn't null, use the new value */}} +{{- $ignore := set $dest $key $newValue -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Make a writable copy of a dictionary. +TBD - does helm provide a clone method we can use instead? +*/}} +{{- define "utils.cloneDictionary" -}} +{{- include "utils.mergeDictionaries" (list .) -}} +{{- end -}} + +{{/* +Verify that a list of values (exclude) can not be defined if another value (key) is already defined +*/}} +{{- define "utils.mutexValue" -}} +{{- $scope := index . 0 -}} +{{- $key := index . 1 -}} +{{- $exclude := index . 2 -}} +{{- $type := index . 3 -}} +{{- $parent := $scope.validationScope -}} +{{- $args := . -}} +{{- $status := dict -}} +{{- if hasKey $parent $key -}} +{{- range $value := $exclude -}} +{{- if hasKey $parent $value -}} +{{- $errorMsg := cat $value "can not be present when" $key "is defined" " " -}} +{{- include "utils.recordValidationError" (list $scope $errorMsg) -}} +{{- $ignore := set $status "error" true -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- if not (hasKey $status "error") -}} + true +{{- end -}} +{{- end -}} + +{{/* +Verify that a list of strings can not be defined if another string is already defined +*/}} +{{- define "utils.mutexString" -}} +{{- include "utils.mutexValue" (append . "string") -}} +{{- end -}} + +{{/* +Verify that a Kubernetes resource exists in a given namespace +*/}} +{{- define "utils.verifyK8SResource" -}} +{{- $scope := index . 0 -}} +{{- $name := index . 1 -}} +{{- $type := index . 2 -}} +{{- $namespace := index . 3 -}} +{{- $foundNS := (lookup "v1" "Namespace" "" $namespace) }} +{{- if $foundNS }} +{{- $foundResource := (lookup "v1" $type $namespace $name) }} +{{- if not $foundResource }} +{{- $errorMsg := cat $type $name " not found in namespace " $namespace -}} +{{- include "utils.recordValidationError" (list $scope $errorMsg) -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_validate-inputs.tpl b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_validate-inputs.tpl new file mode 100755 index 000000000..a6ee7dd02 --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_validate-inputs.tpl @@ -0,0 +1,63 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{- define "operator.validateInputs" -}} +{{- $scope := include "utils.cloneDictionary" . | fromYaml -}} +{{- $ignore:= include "utils.startValidation" $scope -}} +{{- $ignore := include "utils.pushValidationContext" (list $scope "Release") -}} +{{- $ignore := include "utils.verifyResourceName" (list $scope "Namespace" 63) -}} +{{- $ignore := include "utils.popValidationContext" $scope -}} +{{- $ignore := include "utils.verifyString" (list $scope "serviceAccount") -}} +{{- $ignore := include "utils.verifyK8SResource" (list $scope .serviceAccount "ServiceAccount" .Release.Namespace) -}} +{{- $ignore := include "utils.verifyString" (list $scope "image") -}} +{{- $ignore := include "utils.verifyEnum" (list $scope "imagePullPolicy" (list "Always" "IfNotPresent" "Never")) -}} +{{- $ignore := include "utils.verifyOptionalDictionaryList" (list $scope "imagePullSecrets") -}} +{{- $ignore := include "utils.verifyEnum" (list $scope "javaLoggingLevel" (list "SEVERE" "WARNING" "INFO" "CONFIG" "FINE" "FINER" "FINEST")) -}} +{{- if include "utils.verifyBoolean" (list $scope "externalRestEnabled") -}} +{{- if $scope.externalRestEnabled -}} +{{- $ignore := include "utils.verifyInteger" (list $scope "externalRestHttpsPort") -}} +{{- $ignore := include "utils.mutexString" (list $scope "externalRestIdentitySecret" (list "externalOperatorKey" "externalOperatorCert")) -}} +{{- if (or (hasKey $scope "externalOperatorCert") (hasKey $scope "externalOperatorKey")) -}} +{{- $ignore := include "utils.verifyString" (list $scope "externalOperatorCert") -}} +{{- $ignore := include "utils.verifyString" (list $scope "externalOperatorKey") -}} +{{- else }} +{{- $ignore := include "utils.verifyString" (list $scope "externalRestIdentitySecret") -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- if include "utils.verifyBoolean" (list $scope "remoteDebugNodePortEnabled") -}} +{{- if $scope.remoteDebugNodePortEnabled -}} +{{- $ignore := include "utils.verifyBoolean" (list $scope "suspendOnDebugStartup") -}} +{{- $ignore := include "utils.verifyInteger" (list $scope "internalDebugHttpPort") -}} +{{- $ignore := include "utils.verifyInteger" (list $scope "externalDebugHttpPort") -}} +{{- end -}} +{{- end -}} +{{- $ignore := include "utils.verifyOptionalBoolean" (list $scope "enableClusterRoleBinding") -}} +{{- if and .enableClusterRoleBinding (or (eq (default "List" .domainNamespaceSelectionStrategy) "Dedicated") (and .dedicated (eq (default "List" .domainNamespaceSelectionStrategy) "List"))) }} +{{- $errorMsg := "The enableClusterRoleBinding value may not be true when either dedicated is true or domainNamespaceSelectionStrategy is Dedicated" -}} +{{- include "utils.recordValidationError" (list $scope $errorMsg) -}} +{{- end -}} +{{- if eq (default "List" $scope.domainNamespaceSelectionStrategy) "List" -}} +{{- $ignore := include "utils.verifyStringList" (list $scope "domainNamespaces") -}} +{{- end -}} +{{- if include "utils.verifyBoolean" (list $scope "elkIntegrationEnabled") -}} +{{- if $scope.elkIntegrationEnabled -}} +{{- $ignore := include "utils.verifyString" (list $scope "logStashImage") -}} +{{- $ignore := include "utils.verifyString" (list $scope "elasticSearchHost") -}} +{{- $ignore := include "utils.verifyInteger" (list $scope "elasticSearchPort") -}} +{{- end -}} +{{- end -}} +{{- $ignore := include "utils.verifyOptionalBoolean" (list $scope "dedicated") -}} +{{- $ignore := include "utils.verifyOptionalEnum" (list $scope "domainNamespaceSelectionStrategy" (list "List" "LabelSelector" "RegExp" "Dedicated")) -}} +{{- if eq (default "List" $scope.domainNamespaceSelectionStrategy) "LabelSelector" -}} +{{- $ignore := include "utils.verifyString" (list $scope "domainNamespaceLabelSelector") -}} +{{- end -}} +{{- if eq (default "List" $scope.domainNamespaceSelectionStrategy) "RegExp" -}} +{{- $ignore := include "utils.verifyString" (list $scope "domainNamespaceRegExp") -}} +{{- end -}} +{{- $ignore := include "utils.verifyOptionalBoolean" (list $scope "mockWLS") -}} +{{- $ignore := include "utils.verifyIntrospectorJobNameSuffix" (list $scope "introspectorJobNameSuffix" 25) -}} +{{- $ignore := include "utils.verifyExternalServiceNameSuffix" (list $scope "externalServiceNameSuffix" 10) -}} +{{- $ignore := include "utils.verifyOptionalBoolean" (list $scope "clusterSizePaddingValidationEnabled") -}} +{{- $ignore := include "utils.endValidation" $scope -}} +{{- end -}} diff --git a/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/main.yaml b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/main.yaml new file mode 100755 index 000000000..fb7e731f9 --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/main.yaml @@ -0,0 +1,11 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{- $scope := include "utils.cloneDictionary" .Values | fromYaml -}} +{{- $ignore := set $scope "Files" .Files -}} +{{- $ignore := set $scope "Chart" .Chart -}} +{{- $ignore := set $scope "Release" .Release -}} +{{- $ignore := set $scope "APIVersions" .Capabilities.APIVersions -}} + +{{ include "operator.validateInputs" $scope }} +{{- include "operator.operator" $scope }} diff --git a/OracleAccessManagement/kubernetes/charts/weblogic-operator/values.yaml b/OracleAccessManagement/kubernetes/charts/weblogic-operator/values.yaml new file mode 100755 index 000000000..dac9a5382 --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/weblogic-operator/values.yaml @@ -0,0 +1,224 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +# serviceAccount specifies the name of the ServiceAccount in the operator's namespace that the +# operator will use to make requests to the Kubernetes API server. +# The customer is responsible for creating the ServiceAccount in the same namespace as this Helm release. +# If not specified, the the operator will use the Helm release namespace's 'default' ServiceAccount. +serviceAccount: "default" + +# domainNamespaceSelectionStrategy specifies how the operator will select the set of namespaces +# that it will manage. Legal values are: List, LabelSelector, RegExp, and Dedicated. If set to 'List', +# then the operator will manage the set of namespaces listed by the 'domainNamespaces' value. +# If set to 'LabelSelector', then the operator will manage the set of namespaces discovered by a list +# of namespaces using the value specified by 'domainNamespaceLabelSelector' as a label selector. +# If set to 'RegExp', then the operator will manage the set of namespaces discovered by a list +# of namespaces using the value specified by 'domainNamespaceRegExp' as a regular expression matched +# against the namespace names. +# If set to 'Dedicated', then operator will manage WebLogic Domains only in the same namespace +# where the operator itself is deployed, which is the namespace of the Helm release. +domainNamespaceSelectionStrategy: List + +# This value is deprecated. Please use 'domainNamespaceSelectionStrategy: Dedicated'. +# dedicated specifies if this operator will manage WebLogic Domains only in the same namespace in +# which the operator itself is deployed. If set to 'true', then the 'domainNamespaces' value below +# is ignored. This value is ignored if 'domainNamespaceSelectionStrategy' is set to a value other +# than 'List'. +# dedicated: false + +# domainNamespaces specifies list of WebLogic Domain namespaces that this operator manages. This value +# is ignored if 'domainNamespaceSelectionStrategy' is not 'List'. The customer is responsible for creating these +# namespaces. If not specified, then the operator will manage WebLogic Domains in the Kubernetes 'default' namespace. +# +# Example: In the configuration below, the operator will manage namespace1 and namespace2. +# +# domainNamespaces: +# - "namespace1" +# - "namespace2" +domainNamespaces: +- "default" + +# domainNamespaceLabelSelector specifies the label selector value that the operator will use when listing +# namespaces in search of the namespaces that contain WebLogic Domains that this operator will manage. Ignored +# if 'domainNamespaceSelectionStrategy' is not 'LabelSelector'. +# +# Example: manage any namespace with a label named "weblogic-operator". +# +# domainNamespaceLabelSelector: "weblogic-operator" +# +# domainNamespaceLabelSelector: + +# domainNamespaceRegExp specifies a regular expression that will be matched against namespace names when listing +# namespaces in search of the namespaces that contain WebLogic Domains that this operator will manage. Ignored +# if 'domainNamespaceSelectionStrategy' is not 'RegExp'. +# +# Example: manage any namespace where the namespace name starts with "prod". +# +# domainNamespaceRegExp: "^prod" +# +# domainNamespaceRegExp: + +# enableClusterRoleBinding specifies whether the roles necessary for the operator to manage domains +# will be granted using a ClusterRoleBinding rather than using RoleBindings in each managed namespace. +enableClusterRoleBinding: false + +# image specifies the container image containing the operator. +image: "ghcr.io/oracle/weblogic-kubernetes-operator:3.3.0" + +# imagePullPolicy specifies the image pull policy for the operator's container image. +imagePullPolicy: IfNotPresent + +# imagePullSecrets contains an optional list of Kubernetes Secrets, in the operator's namespace, +# that are needed to access the registry containing the operator's container image. +# The customer is responsible for creating the Secret. +# If no Secrets are required, then omit this property. +# +# Example: a Secret is needed, and has been stored in 'my-operator-secret' +# +# imagePullSecrets: +# - name: "my-operator-secret" + +# externalRestEnabled specifies whether the the operator's REST interface is exposed +# outside of the Kubernetes cluster on the port specified by the 'externalRestHttpsPort' +# property. +# +# If set to true, then the customer must provide the SSL certificate and private key for +# the operator's external REST interface by specifying the 'externalOperatorCert' and +# 'externalOperatorKey' properties. +externalRestEnabled: false + +# externalRestHttpsPort specifies the node port that should be allocated for the external operator REST HTTPS interface. +# This parameter is required if 'externalRestEnabled' is true. +# Otherwise, it is ignored. +externalRestHttpsPort: 31001 + +# The name of the Secret used to store the certificate and private key to use for the external operator REST HTTPS interface. +# The Secret has to be created in the same namespace of the WebLogic operator. +# This parameter is required if 'externalRestEnabled' is true. Otherwise, it is ignored. +# As example, an external REST identity can be created using the following sample script +# kubernetes/samples/scripts/rest/generate-external-rest-identity.sh +# externalRestIdentitySecret: + +# elkIntegrationEnabled specifies whether or not ELK integration is enabled. +elkIntegrationEnabled: false + +# logStashImage specifies the container image containing logstash. +# This parameter is ignored if 'elkIntegrationEnabled' is false. +logStashImage: "logstash:6.6.0" + +# elasticSearchHost specifies the hostname of where elasticsearch is running. +# This parameter is ignored if 'elkIntegrationEnabled' is false. +elasticSearchHost: "elasticsearch.default.svc.cluster.local" + +# elasticSearchPort specifies the port number of where elasticsearch is running. +# This parameter is ignored if 'elkIntegrationEnabled' is false. +elasticSearchPort: 9200 + +# featureGates specifies a set of key=value pairs separated by commas that describe whether a given +# operator feature is enabled. You enable a feature by including a key=value pair where the key is the +# feature name and the value is "true". This will allow the operator team to release features that +# are not yet ready to be enabled by default, but that are ready for testing by customers. Once a feature is +# stable then it will be enabled by default and can not be disabled using this configuration. +# featureGates: "...,AuxiliaryImage=true" + +# javaLoggingLevel specifies the Java logging level for the operator. This affects the operator pod's +# log output and the contents of log files in the container's /logs/ directory. +# Valid values are: "SEVERE", "WARNING", "INFO", "CONFIG", "FINE", "FINER", and "FINEST". +javaLoggingLevel: "INFO" + +# javaLoggingFileSizeLimit specifies the maximum size in bytes for an individual Java logging file in the operator container's +# /logs/ directory. +javaLoggingFileSizeLimit: 20000000 + +# javaLoggingFileCount specifies the number of Java logging files to preserve in the operator container's /logs/ +# directory as the files are rotated. +javaLoggingFileCount: 10 + +# labels specifies a set of key-value labels that will be added to each pod running the operator. +# See https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +#labels: + +# annotations specifies a set of key-value annotations that will be added to each pod running the operator. +# See https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +#annotations: + +# nodeSelector specifies a matching rule that the Kubernetes scheduler will use when selecting the node +# where the operator will run. If the nodeSelector value is specified, then this content will be added to +# the operator's deployment. See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector +# for more information on node selectors. +#nodeSelector: + +# affinity specifies a set of matching rules related to the presence of other workloads that the Kubernetes scheduler +# will use when selecting the node where the operator will run. If the affinity value is specified, then this content +# will be added to the operator's deployment. See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity +# for more information on affinity and anti-affinity. +#affinity: + +# Values related to debugging the operator. +# Customers should not need to use the following properties + +# remoteDebugNodePortEnabled specifies whether or not the operator will provide a Java remote debug interface on the +# provided port. If the 'suspendOnDebugStartup' property is specified, the operator will suspend execution +# until a remote debugger has attached. +# The 'internalDebugHttpPort' property controls the port number inside the Kubernetes +# cluster and the 'externalDebugHttpPort' property controls the port number outside +# the Kubernetes cluster. +remoteDebugNodePortEnabled: false + +#suspendOnDebugStartup specifies whether the operator will suspend on startup when a Java remote debugging is enabled. +suspendOnDebugStartup: false + +# internalDebugHttpPort specifies the port number inside the Kubernetes cluster for the operator's Java +# remote debug interface. +# This parameter is required if 'remoteDebugNodePortEnabled' is true. +# Otherwise, it is ignored. +internalDebugHttpPort: 30999 + +# externalDebugHttpPort specifies the node port that should be allocated for the operator's +# Java remote debug interface. +# This parameter is required if 'remoteDebugNodePortEnabled' is true. +# Otherwise, it is ignored. +externalDebugHttpPort: 30999 + +# dns1123Fields overrides the default list of field names that the operator +# converts to DNS-1123 legal values when replacing variable references in the +# Domain resource. The default list can be found inside the class LegalNames +# in the oracle.kubernetes.operator.helpers package. +# Supply a comma separated list of field names to customize the list of fields +# such as "name, claimName, volumeName", or leave it commented out to use +# the default list of field names. +# dns1123Fields: "" + +# introspectorJobNameSuffix overrides the default suffix that the operator uses +# to append to the domainUID to form the name of the domain introspector job name. +# Note that the resultant job name should not be more than 58 characters due to +# the Kubernetes limit to the name of a job and Kubernetes appends five additional +# characters to the name of the pod that is created by the job controller. +# The default suffix is '-introspector'. +# The default suffix in pre-3.1.0 is "-introspect-domain-job" +introspectorJobNameSuffix: "-introspector" + +# externalServiceNameSuffix overrides the default suffix that the operator uses +# to append to the domainUID and the WebLogic admin server name, to form the name +# of the domain's admin server external service. +# Note that the resultant name should not be more than 63 characters due to +# the Kubernetes limit to the name of a service. +# The default suffix is '-ext'. +# The default suffix in pre-3.1.0 is "-external". +externalServiceNameSuffix: "-ext" + +# clusterSizePaddingValidationEnabled specifies if additional one or two characters +# need to be reserved to account for longer managed server names because of an increased +# cluster size. +# The default value is true. +clusterSizePaddingValidationEnabled: true + +# tokenReviewAuthentication, if set to true, specifies whether the the operator's REST API should use +# 1. Kubernetes token review API for authenticating users, and +# 2. Kubernetes subject access review API for authorizing a user's operation (get, list, +# patch, etc) on a resource. +# 3. Update the Domain resource using the operator's privileges. +# This parameter, if set to false, will use the caller's bearer token for any update +# to the Domain resource so that it is done using the caller's privileges. +# The default value is false. +#tokenReviewAuthentication: false diff --git a/OracleAccessManagement/kubernetes/common/createFMWJRFDomain.py b/OracleAccessManagement/kubernetes/common/createFMWJRFDomain.py new file mode 100755 index 000000000..bde936ca5 --- /dev/null +++ b/OracleAccessManagement/kubernetes/common/createFMWJRFDomain.py @@ -0,0 +1,332 @@ +# Copyright (c) 2014, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +import os +import sys + +import com.oracle.cie.domain.script.jython.WLSTException as WLSTException + +class Infra12213Provisioner: + + MACHINES = { + 'machine1' : { + 'NMType': 'SSL', + 'ListenAddress': 'localhost', + 'ListenPort': 5658 + } + } + + JRF_12213_TEMPLATES = { + 'baseTemplate' : '@@ORACLE_HOME@@/wlserver/common/templates/wls/wls.jar', + 'extensionTemplates' : [ + '@@ORACLE_HOME@@/oracle_common/common/templates/wls/oracle.jrf_template.jar', + '@@ORACLE_HOME@@/oracle_common/common/templates/wls/oracle.jrf.ws.async_template.jar', + '@@ORACLE_HOME@@/oracle_common/common/templates/wls/oracle.wsmpm_template.jar', + '@@ORACLE_HOME@@/oracle_common/common/templates/wls/oracle.ums_template.jar', + '@@ORACLE_HOME@@/em/common/templates/wls/oracle.em_wls_template.jar' + ], + 'serverGroupsToTarget' : [ 'JRF-MAN-SVR', 'WSMPM-MAN-SVR' ] + } + + def __init__(self, oracleHome, javaHome, domainParentDir, adminListenPort, adminName, managedNameBase, managedServerPort, prodMode, managedCount, clusterName): + self.oracleHome = self.validateDirectory(oracleHome) + self.javaHome = self.validateDirectory(javaHome) + self.domainParentDir = self.validateDirectory(domainParentDir, create=True) + return + + def createInfraDomain(self, domainName, user, password, db, dbPrefix, dbPassword, adminListenPort, adminName, + managedNameBase, managedServerPort, prodMode, managedCount, clusterName, + exposeAdminT3Channel=None, t3ChannelPublicAddress=None, t3ChannelPort=None): + domainHome = self.createBaseDomain(domainName, user, password, adminListenPort, adminName, managedNameBase, + managedServerPort, prodMode, managedCount, clusterName + ) + self.extendDomain(domainHome, db, dbPrefix, dbPassword, exposeAdminT3Channel, t3ChannelPublicAddress, + t3ChannelPort) + + def createBaseDomain(self, domainName, user, password, adminListenPort, adminName, managedNameBase, managedServerPort, prodMode, managedCount, clusterName): + baseTemplate = self.replaceTokens(self.JRF_12213_TEMPLATES['baseTemplate']) + + readTemplate(baseTemplate) + setOption('DomainName', domainName) + setOption('JavaHome', self.javaHome) + if (prodMode == 'true'): + setOption('ServerStartMode', 'prod') + else: + setOption('ServerStartMode', 'dev') + set('Name', domainName) + + admin_port = int(adminListenPort) + ms_port = int(managedServerPort) + ms_count = int(managedCount) + + # Create Admin Server + # ======================= + print 'Creating Admin Server...' + cd('/Servers/AdminServer') + #set('ListenAddress', '%s-%s' % (domain_uid, admin_server_name_svc)) + set('ListenPort', admin_port) + set('Name', adminName) + + # Define the user password for weblogic + # ===================================== + cd('/Security/' + domainName + '/User/weblogic') + set('Name', user) + set('Password', password) + + # Create a cluster + # ====================== + print 'Creating cluster...' + cd('/') + cl=create(clusterName, 'Cluster') + + # Create managed servers + for index in range(0, ms_count): + cd('/') + msIndex = index+1 + cd('/') + name = '%s%s' % (managedNameBase, msIndex) + create(name, 'Server') + cd('/Servers/%s/' % name ) + print('managed server name is %s' % name); + set('ListenPort', ms_port) + set('NumOfRetriesBeforeMSIMode', 0) + set('RetryIntervalBeforeMSIMode', 1) + set('Cluster', clusterName) + + # Create Node Manager + # ======================= + print 'Creating Node Managers...' + for machine in self.MACHINES: + cd('/') + create(machine, 'Machine') + cd('Machine/' + machine) + create(machine, 'NodeManager') + cd('NodeManager/' + machine) + for param in self.MACHINES[machine]: + set(param, self.MACHINES[machine][param]) + + + setOption('OverwriteDomain', 'true') + domainHome = self.domainParentDir + '/' + domainName + print 'Will create Base domain at ' + domainHome + + print 'Writing base domain...' + writeDomain(domainHome) + closeTemplate() + print 'Base domain created at ' + domainHome + return domainHome + + + def extendDomain(self, domainHome, db, dbPrefix, dbPassword, exposeAdminT3Channel, t3ChannelPublicAddress, + t3ChannelPort): + print 'Extending domain at ' + domainHome + print 'Database ' + db + readDomain(domainHome) + setOption('AppDir', self.domainParentDir + '/applications') + + print 'ExposeAdminT3Channel %s with %s:%s ' % (exposeAdminT3Channel, t3ChannelPublicAddress, t3ChannelPort) + if 'true' == exposeAdminT3Channel: + self.enable_admin_channel(t3ChannelPublicAddress, t3ChannelPort) + + print 'Applying JRF templates...' + for extensionTemplate in self.JRF_12213_TEMPLATES['extensionTemplates']: + addTemplate(self.replaceTokens(extensionTemplate)) + + print 'Extension Templates added' + + print 'Configuring the Service Table DataSource...' + fmwDb = 'jdbc:oracle:thin:@' + db + print 'fmwDatabase ' + fmwDb + cd('/JDBCSystemResource/LocalSvcTblDataSource/JdbcResource/LocalSvcTblDataSource') + cd('JDBCDriverParams/NO_NAME_0') + set('DriverName', 'oracle.jdbc.OracleDriver') + set('URL', fmwDb) + set('PasswordEncrypted', dbPassword) + + stbUser = dbPrefix + '_STB' + cd('Properties/NO_NAME_0/Property/user') + set('Value', stbUser) + + print 'Getting Database Defaults...' + getDatabaseDefaults() + + print 'Targeting Server Groups...' + managedName= '%s%s' % (managedNameBase, 1) + print "Set CoherenceClusterSystemResource to defaultCoherenceCluster for server:" + managedName + serverGroupsToTarget = list(self.JRF_12213_TEMPLATES['serverGroupsToTarget']) + cd('/') + setServerGroups(managedName, serverGroupsToTarget) + print "Set CoherenceClusterSystemResource to defaultCoherenceCluster for server:" + managedName + cd('/Servers/' + managedName) + set('CoherenceClusterSystemResource', 'defaultCoherenceCluster') + + print 'Targeting Cluster ...' + cd('/') + print "Set CoherenceClusterSystemResource to defaultCoherenceCluster for cluster:" + clusterName + cd('/Cluster/' + clusterName) + set('CoherenceClusterSystemResource', 'defaultCoherenceCluster') + print "Set WLS clusters as target of defaultCoherenceCluster:" + clusterName + cd('/CoherenceClusterSystemResource/defaultCoherenceCluster') + set('Target', clusterName) + + print 'Preparing to update domain...' + updateDomain() + print 'Domain updated successfully' + closeDomain() + return + + + ########################################################################### + # Helper Methods # + ########################################################################### + + def validateDirectory(self, dirName, create=False): + directory = os.path.realpath(dirName) + if not os.path.exists(directory): + if create: + os.makedirs(directory) + else: + message = 'Directory ' + directory + ' does not exist' + raise WLSTException(message) + elif not os.path.isdir(directory): + message = 'Directory ' + directory + ' is not a directory' + raise WLSTException(message) + return self.fixupPath(directory) + + + def fixupPath(self, path): + result = path + if path is not None: + result = path.replace('\\', '/') + return result + + + def replaceTokens(self, path): + result = path + if path is not None: + result = path.replace('@@ORACLE_HOME@@', oracleHome) + return result + + def enable_admin_channel(self, admin_channel_address, admin_channel_port): + if admin_channel_address == None or admin_channel_port == 'None': + return + cd('/') + admin_server_name = get('AdminServerName') + print('setting admin server t3channel for ' + admin_server_name) + cd('/Servers/' + admin_server_name) + create('T3Channel', 'NetworkAccessPoint') + cd('/Servers/' + admin_server_name + '/NetworkAccessPoint/T3Channel') + set('ListenPort', int(admin_channel_port)) + set('PublicPort', int(admin_channel_port)) + set('PublicAddress', admin_channel_address) + +############################# +# Entry point to the script # +############################# + +def usage(): + print sys.argv[0] + ' -oh -jh -parent -name ' + \ + '-user -password ' + \ + '-rcuDb -rcuPrefix -rcuSchemaPwd ' \ + '-adminListenPort -adminName ' \ + '-managedNameBase -managedServerPort -prodMode ' \ + '-managedServerCount -clusterName ' \ + '-exposeAdminT3Channel -t3ChannelPublicAddress
' \ + '-t3ChannelPort ' + sys.exit(0) + +# Uncomment for Debug only +#print str(sys.argv[0]) + " called with the following sys.argv array:" +#for index, arg in enumerate(sys.argv): +# print "sys.argv[" + str(index) + "] = " + str(sys.argv[index]) + +if len(sys.argv) < 16: + usage() + +#oracleHome will be passed by command line parameter -oh. +oracleHome = None +#javaHome will be passed by command line parameter -jh. +javaHome = None +#domainParentDir will be passed by command line parameter -parent. +domainParentDir = None +#domainUser is hard-coded to weblogic. You can change to other name of your choice. Command line paramter -user. +domainUser = 'weblogic' +#domainPassword will be passed by Command line parameter -password. +domainPassword = None +#rcuDb will be passed by command line parameter -rcuDb. +rcuDb = None +#change rcuSchemaPrefix to your infra schema prefix. Command line parameter -rcuPrefix. +rcuSchemaPrefix = 'DEV12' +#change rcuSchemaPassword to your infra schema password. Command line parameter -rcuSchemaPwd. +rcuSchemaPassword = None +exposeAdminT3Channel = None +t3ChannelPort = None +t3ChannelPublicAddress = None +i = 1 +while i < len(sys.argv): + if sys.argv[i] == '-oh': + oracleHome = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-jh': + javaHome = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-parent': + domainParentDir = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-name': + domainName = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-user': + domainUser = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-password': + domainPassword = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-rcuDb': + rcuDb = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-rcuPrefix': + rcuSchemaPrefix = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-rcuSchemaPwd': + rcuSchemaPassword = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-adminListenPort': + adminListenPort = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-adminName': + adminName = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-managedNameBase': + managedNameBase = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-managedServerPort': + managedServerPort = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-prodMode': + prodMode = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-managedServerCount': + managedCount = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-clusterName': + clusterName = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-t3ChannelPublicAddress': + t3ChannelPublicAddress = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-t3ChannelPort': + t3ChannelPort = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-exposeAdminT3Channel': + exposeAdminT3Channel = sys.argv[i + 1] + i += 2 + else: + print 'Unexpected argument switch at position ' + str(i) + ': ' + str(sys.argv[i]) + usage() + sys.exit(1) + +provisioner = Infra12213Provisioner(oracleHome, javaHome, domainParentDir, adminListenPort, adminName, managedNameBase, managedServerPort, prodMode, managedCount, clusterName) +provisioner.createInfraDomain(domainName, domainUser, domainPassword, rcuDb, rcuSchemaPrefix, rcuSchemaPassword, + adminListenPort, adminName, managedNameBase, managedServerPort, prodMode, managedCount, + clusterName, exposeAdminT3Channel, t3ChannelPublicAddress, t3ChannelPort) \ No newline at end of file diff --git a/OracleAccessManagement/kubernetes/common/createFMWRestrictedJRFDomain.py b/OracleAccessManagement/kubernetes/common/createFMWRestrictedJRFDomain.py new file mode 100755 index 000000000..acfe5da80 --- /dev/null +++ b/OracleAccessManagement/kubernetes/common/createFMWRestrictedJRFDomain.py @@ -0,0 +1,291 @@ +# Copyright (c) 2014, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +import os +import sys + +import com.oracle.cie.domain.script.jython.WLSTException as WLSTException + +class Infra12213Provisioner: + + MACHINES = { + 'machine1' : { + 'NMType': 'SSL', + 'ListenAddress': 'localhost', + 'ListenPort': 5658 + } + } + + JRF_12213_TEMPLATES = { + 'baseTemplate' : '@@ORACLE_HOME@@/wlserver/common/templates/wls/wls.jar', + 'extensionTemplates' : [ + '@@ORACLE_HOME@@/oracle_common/common/templates/wls/oracle.jrf_restricted_template.jar', + '@@ORACLE_HOME@@/em/common/templates/wls/oracle.em_wls_restricted_template.jar' + ], + 'serverGroupsToTarget' : [ 'JRF-MAN-SVR', 'WSMPM-MAN-SVR' ] + } + + def __init__(self, oracleHome, javaHome, domainParentDir, adminListenPort, adminName, managedNameBase, managedServerPort, prodMode, managedCount, clusterName): + self.oracleHome = self.validateDirectory(oracleHome) + self.javaHome = self.validateDirectory(javaHome) + self.domainParentDir = self.validateDirectory(domainParentDir, create=True) + return + + def createInfraDomain(self, domainName, user, password, adminListenPort, adminName, + managedNameBase, managedServerPort, prodMode, managedCount, clusterName, + exposeAdminT3Channel=None, t3ChannelPublicAddress=None, t3ChannelPort=None): + domainHome = self.createBaseDomain(domainName, user, password, adminListenPort, adminName, managedNameBase, + managedServerPort, prodMode, managedCount, clusterName + ) + self.extendDomain(domainHome, exposeAdminT3Channel, t3ChannelPublicAddress, t3ChannelPort) + + def createBaseDomain(self, domainName, user, password, adminListenPort, adminName, managedNameBase, managedServerPort, prodMode, managedCount, clusterName): + baseTemplate = self.replaceTokens(self.JRF_12213_TEMPLATES['baseTemplate']) + + readTemplate(baseTemplate) + setOption('DomainName', domainName) + setOption('JavaHome', self.javaHome) + if (prodMode == 'true'): + setOption('ServerStartMode', 'prod') + else: + setOption('ServerStartMode', 'dev') + set('Name', domainName) + + admin_port = int(adminListenPort) + ms_port = int(managedServerPort) + ms_count = int(managedCount) + + # Create Admin Server + # ======================= + print 'Creating Admin Server...' + cd('/Servers/AdminServer') + #set('ListenAddress', '%s-%s' % (domain_uid, admin_server_name_svc)) + set('ListenPort', admin_port) + set('Name', adminName) + + # Define the user password for weblogic + # ===================================== + cd('/Security/' + domainName + '/User/weblogic') + set('Name', user) + set('Password', password) + + # Create a cluster + # ====================== + print 'Creating cluster...' + cd('/') + cl=create(clusterName, 'Cluster') + + # Create managed servers + for index in range(0, ms_count): + cd('/') + msIndex = index+1 + cd('/') + name = '%s%s' % (managedNameBase, msIndex) + create(name, 'Server') + cd('/Servers/%s/' % name ) + print('managed server name is %s' % name); + set('ListenPort', ms_port) + set('NumOfRetriesBeforeMSIMode', 0) + set('RetryIntervalBeforeMSIMode', 1) + set('Cluster', clusterName) + + # Create Node Manager + # ======================= + print 'Creating Node Managers...' + for machine in self.MACHINES: + cd('/') + create(machine, 'Machine') + cd('Machine/' + machine) + create(machine, 'NodeManager') + cd('NodeManager/' + machine) + for param in self.MACHINES[machine]: + set(param, self.MACHINES[machine][param]) + + + setOption('OverwriteDomain', 'true') + domainHome = self.domainParentDir + '/' + domainName + print 'Will create Base domain at ' + domainHome + + print 'Writing base domain...' + writeDomain(domainHome) + closeTemplate() + print 'Base domain created at ' + domainHome + return domainHome + + + def extendDomain(self, domainHome, exposeAdminT3Channel, t3ChannelPublicAddress, + t3ChannelPort): + print 'Extending domain at ' + domainHome + readDomain(domainHome) + setOption('AppDir', self.domainParentDir + '/applications') + + print 'ExposeAdminT3Channel %s with %s:%s ' % (exposeAdminT3Channel, t3ChannelPublicAddress, t3ChannelPort) + if 'true' == exposeAdminT3Channel: + self.enable_admin_channel(t3ChannelPublicAddress, t3ChannelPort) + + print 'Applying JRF templates...' + for extensionTemplate in self.JRF_12213_TEMPLATES['extensionTemplates']: + addTemplate(self.replaceTokens(extensionTemplate)) + + print 'Extension Templates added' + + print 'Preparing to update domain...' + updateDomain() + print 'Domain updated successfully' + closeDomain() + return + + + ########################################################################### + # Helper Methods # + ########################################################################### + + def validateDirectory(self, dirName, create=False): + directory = os.path.realpath(dirName) + if not os.path.exists(directory): + if create: + os.makedirs(directory) + else: + message = 'Directory ' + directory + ' does not exist' + raise WLSTException(message) + elif not os.path.isdir(directory): + message = 'Directory ' + directory + ' is not a directory' + raise WLSTException(message) + return self.fixupPath(directory) + + + def fixupPath(self, path): + result = path + if path is not None: + result = path.replace('\\', '/') + return result + + + def replaceTokens(self, path): + result = path + if path is not None: + result = path.replace('@@ORACLE_HOME@@', oracleHome) + return result + + def enable_admin_channel(self, admin_channel_address, admin_channel_port): + if admin_channel_address == None or admin_channel_port == 'None': + return + cd('/') + admin_server_name = get('AdminServerName') + print('setting admin server t3channel for ' + admin_server_name) + cd('/Servers/' + admin_server_name) + create('T3Channel', 'NetworkAccessPoint') + cd('/Servers/' + admin_server_name + '/NetworkAccessPoint/T3Channel') + set('ListenPort', int(admin_channel_port)) + set('PublicPort', int(admin_channel_port)) + set('PublicAddress', admin_channel_address) + +############################# +# Entry point to the script # +############################# + +def usage(): + print sys.argv[0] + ' -oh -jh -parent -name ' + \ + '-user -password ' + \ + '-rcuDb -rcuPrefix -rcuSchemaPwd ' \ + '-adminListenPort -adminName ' \ + '-managedNameBase -managedServerPort -prodMode ' \ + '-managedServerCount -clusterName ' \ + '-exposeAdminT3Channel -t3ChannelPublicAddress
' \ + '-t3ChannelPort ' + sys.exit(0) + +# Uncomment for Debug only +#print str(sys.argv[0]) + " called with the following sys.argv array:" +#for index, arg in enumerate(sys.argv): +# print "sys.argv[" + str(index) + "] = " + str(sys.argv[index]) + +if len(sys.argv) < 16: + usage() + +#oracleHome will be passed by command line parameter -oh. +oracleHome = None +#javaHome will be passed by command line parameter -jh. +javaHome = None +#domainParentDir will be passed by command line parameter -parent. +domainParentDir = None +#domainUser is hard-coded to weblogic. You can change to other name of your choice. Command line paramter -user. +domainUser = 'weblogic' +#domainPassword will be passed by Command line parameter -password. +domainPassword = None +#rcuDb will be passed by command line parameter -rcuDb. +rcuDb = None +#change rcuSchemaPrefix to your infra schema prefix. Command line parameter -rcuPrefix. +rcuSchemaPrefix = 'DEV12' +#change rcuSchemaPassword to your infra schema password. Command line parameter -rcuSchemaPwd. +rcuSchemaPassword = None +exposeAdminT3Channel = None +t3ChannelPort = None +t3ChannelPublicAddress = None +i = 1 +while i < len(sys.argv): + if sys.argv[i] == '-oh': + oracleHome = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-jh': + javaHome = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-parent': + domainParentDir = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-name': + domainName = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-user': + domainUser = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-password': + domainPassword = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-rcuDb': + rcuDb = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-rcuPrefix': + rcuSchemaPrefix = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-rcuSchemaPwd': + rcuSchemaPassword = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-adminListenPort': + adminListenPort = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-adminName': + adminName = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-managedNameBase': + managedNameBase = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-managedServerPort': + managedServerPort = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-prodMode': + prodMode = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-managedServerCount': + managedCount = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-clusterName': + clusterName = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-t3ChannelPublicAddress': + t3ChannelPublicAddress = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-t3ChannelPort': + t3ChannelPort = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-exposeAdminT3Channel': + exposeAdminT3Channel = sys.argv[i + 1] + i += 2 + else: + print 'Unexpected argument switch at position ' + str(i) + ': ' + str(sys.argv[i]) + usage() + sys.exit(1) + +provisioner = Infra12213Provisioner(oracleHome, javaHome, domainParentDir, adminListenPort, adminName, managedNameBase, managedServerPort, prodMode, managedCount, clusterName) +provisioner.createInfraDomain(domainName, domainUser, domainPassword, adminListenPort, adminName, managedNameBase, managedServerPort, prodMode, managedCount, + clusterName, exposeAdminT3Channel, t3ChannelPublicAddress, t3ChannelPort) diff --git a/OracleAccessManagement/kubernetes/common/domain-template.yaml b/OracleAccessManagement/kubernetes/common/domain-template.yaml new file mode 100755 index 000000000..2d081de7d --- /dev/null +++ b/OracleAccessManagement/kubernetes/common/domain-template.yaml @@ -0,0 +1,119 @@ +# Copyright (c) 2017, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# This is an example of how to define a Domain resource. +# +apiVersion: "weblogic.oracle/v8" +kind: Domain +metadata: + name: %DOMAIN_UID% + namespace: %NAMESPACE% + labels: + weblogic.domainUID: %DOMAIN_UID% +spec: + # The WebLogic Domain Home + domainHome: %DOMAIN_HOME% + + # The domain home source type + # Set to PersistentVolume for domain-in-pv, Image for domain-in-image, or FromModel for model-in-image + domainHomeSourceType: %DOMAIN_HOME_SOURCE_TYPE% + + # The WebLogic Server image that the Operator uses to start the domain + image: "%WEBLOGIC_IMAGE%" + + # imagePullPolicy defaults to "Always" if image version is :latest + imagePullPolicy: "%WEBLOGIC_IMAGE_PULL_POLICY%" + + # Identify which Secret contains the credentials for pulling an image + %WEBLOGIC_IMAGE_PULL_SECRET_PREFIX%imagePullSecrets: + %WEBLOGIC_IMAGE_PULL_SECRET_PREFIX%- name: %WEBLOGIC_IMAGE_PULL_SECRET_NAME% + + # Identify which Secret contains the WebLogic Admin credentials (note that there is an example of + # how to create that Secret at the end of this file) + webLogicCredentialsSecret: + name: %WEBLOGIC_CREDENTIALS_SECRET_NAME% + + # Whether to include the server out file into the pod's stdout, default is true + includeServerOutInPodLog: %INCLUDE_SERVER_OUT_IN_POD_LOG% + + # Whether to enable log home + %LOG_HOME_ON_PV_PREFIX%logHomeEnabled: %LOG_HOME_ENABLED% + + # Whether to write HTTP access log file to log home + %LOG_HOME_ON_PV_PREFIX%httpAccessLogInLogHome: %HTTP_ACCESS_LOG_IN_LOG_HOME% + + # The in-pod location for domain log, server logs, server out, introspector out, and Node Manager log files + %LOG_HOME_ON_PV_PREFIX%logHome: %LOG_HOME% + # An (optional) in-pod location for data storage of default and custom file stores. + # If not specified or the value is either not set or empty (e.g. dataHome: "") then the + # data storage directories are determined from the WebLogic domain home configuration. + dataHome: "%DATA_HOME%" + + + # serverStartPolicy legal values are "NEVER", "IF_NEEDED", or "ADMIN_ONLY" + # This determines which WebLogic Servers the Operator will start up when it discovers this Domain + # - "NEVER" will not start any server in the domain + # - "ADMIN_ONLY" will start up only the administration server (no managed servers will be started) + # - "IF_NEEDED" will start all non-clustered servers, including the administration server and clustered servers up to the replica count + serverStartPolicy: "%SERVER_START_POLICY%" + + serverPod: + # an (optional) list of environment variable to be set on the servers + env: + - name: JAVA_OPTIONS + value: "%JAVA_OPTIONS%" + - name: USER_MEM_ARGS + value: "-Djava.security.egd=file:/dev/./urandom -Xms256m -Xmx512m " + %OPTIONAL_SERVERPOD_RESOURCES% + %LOG_HOME_ON_PV_PREFIX%volumes: + %LOG_HOME_ON_PV_PREFIX%- name: weblogic-domain-storage-volume + %LOG_HOME_ON_PV_PREFIX% persistentVolumeClaim: + %LOG_HOME_ON_PV_PREFIX% claimName: %DOMAIN_PVC_NAME% + %LOG_HOME_ON_PV_PREFIX%volumeMounts: + %LOG_HOME_ON_PV_PREFIX%- mountPath: %DOMAIN_ROOT_DIR% + %LOG_HOME_ON_PV_PREFIX% name: weblogic-domain-storage-volume + + # adminServer is used to configure the desired behavior for starting the administration server. + adminServer: + # serverStartState legal values are "RUNNING" or "ADMIN" + # "RUNNING" means the listed server will be started up to "RUNNING" mode + # "ADMIN" means the listed server will be start up to "ADMIN" mode + serverStartState: "RUNNING" + %EXPOSE_ANY_CHANNEL_PREFIX%adminService: + %EXPOSE_ANY_CHANNEL_PREFIX% channels: + # The Admin Server's NodePort + %EXPOSE_ADMIN_PORT_PREFIX% - channelName: default + %EXPOSE_ADMIN_PORT_PREFIX% nodePort: %ADMIN_NODE_PORT% + # Uncomment to export the T3Channel as a service + %EXPOSE_T3_CHANNEL_PREFIX% - channelName: T3Channel + + # clusters is used to configure the desired behavior for starting member servers of a cluster. + # If you use this entry, then the rules will be applied to ALL servers that are members of the named clusters. + clusters: + - clusterName: %CLUSTER_NAME% + serverStartState: "RUNNING" + serverPod: + # Instructs Kubernetes scheduler to prefer nodes for new cluster members where there are not + # already members of the same cluster. + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: "weblogic.clusterName" + operator: In + values: + - $(CLUSTER_NAME) + topologyKey: "kubernetes.io/hostname" + replicas: %INITIAL_MANAGED_SERVER_REPLICAS% + # The number of managed servers to start for unlisted clusters + # replicas: 1 + + # Istio + %ISTIO_PREFIX%configuration: + %ISTIO_PREFIX% istio: + %ISTIO_PREFIX% enabled: %ISTIO_ENABLED% + %ISTIO_PREFIX% readinessPort: %ISTIO_READINESS_PORT% + diff --git a/OracleAccessManagement/kubernetes/common/jrf-domain-template.yaml b/OracleAccessManagement/kubernetes/common/jrf-domain-template.yaml new file mode 100755 index 000000000..ccd35b84f --- /dev/null +++ b/OracleAccessManagement/kubernetes/common/jrf-domain-template.yaml @@ -0,0 +1,123 @@ +# Copyright (c) 2017, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# This is an example of how to define a Domain resource. +# +apiVersion: "weblogic.oracle/v8" +kind: Domain +metadata: + name: %DOMAIN_UID% + namespace: %NAMESPACE% + labels: + weblogic.domainUID: %DOMAIN_UID% +spec: + # The WebLogic Domain Home + domainHome: %DOMAIN_HOME% + + # The domain home source type + # Set to PersistentVolume for domain-in-pv, Image for domain-in-image, or FromModel for model-in-image + domainHomeSourceType: %DOMAIN_HOME_SOURCE_TYPE% + + # The WebLogic Server image that the Operator uses to start the domain + image: "%WEBLOGIC_IMAGE%" + + # imagePullPolicy defaults to "Always" if image version is :latest + imagePullPolicy: "%WEBLOGIC_IMAGE_PULL_POLICY%" + + # Identify which Secret contains the credentials for pulling an image + %WEBLOGIC_IMAGE_PULL_SECRET_PREFIX%imagePullSecrets: + %WEBLOGIC_IMAGE_PULL_SECRET_PREFIX%- name: %WEBLOGIC_IMAGE_PULL_SECRET_NAME% + + # Identify which Secret contains the WebLogic Admin credentials (note that there is an example of + # how to create that Secret at the end of this file) + webLogicCredentialsSecret: + name: %WEBLOGIC_CREDENTIALS_SECRET_NAME% + + # Whether to include the server out file into the pod's stdout, default is true + includeServerOutInPodLog: %INCLUDE_SERVER_OUT_IN_POD_LOG% + + # Whether to enable log home + %LOG_HOME_ON_PV_PREFIX%logHomeEnabled: %LOG_HOME_ENABLED% + + # Whether to write HTTP access log file to log home + %LOG_HOME_ON_PV_PREFIX%httpAccessLogInLogHome: %HTTP_ACCESS_LOG_IN_LOG_HOME% + + # The in-pod location for domain log, server logs, server out, introspector out, and Node Manager log files + %LOG_HOME_ON_PV_PREFIX%logHome: %LOG_HOME% + # An (optional) in-pod location for data storage of default and custom file stores. + # If not specified or the value is either not set or empty (e.g. dataHome: "") then the + # data storage directories are determined from the WebLogic domain home configuration. + dataHome: "%DATA_HOME%" + + # serverStartPolicy legal values are "NEVER", "IF_NEEDED", or "ADMIN_ONLY" + # This determines which WebLogic Servers the Operator will start up when it discovers this Domain + # - "NEVER" will not start any server in the domain + # - "ADMIN_ONLY" will start up only the administration server (no managed servers will be started) + # - "IF_NEEDED" will start all non-clustered servers, including the administration server and clustered servers up to the replica count + serverStartPolicy: "%SERVER_START_POLICY%" + + serverPod: + # an (optional) list of environment variable to be set on the servers + env: + - name: JAVA_OPTIONS + value: "%JAVA_OPTIONS%" + - name: USER_MEM_ARGS + value: "-Djava.security.egd=file:/dev/./urandom -Xms256m -Xmx1024m " + %OPTIONAL_SERVERPOD_RESOURCES% + %LOG_HOME_ON_PV_PREFIX%volumes: + %LOG_HOME_ON_PV_PREFIX%- name: weblogic-domain-storage-volume + %LOG_HOME_ON_PV_PREFIX% persistentVolumeClaim: + %LOG_HOME_ON_PV_PREFIX% claimName: %DOMAIN_PVC_NAME% + %LOG_HOME_ON_PV_PREFIX%volumeMounts: + %LOG_HOME_ON_PV_PREFIX%- mountPath: %DOMAIN_ROOT_DIR% + %LOG_HOME_ON_PV_PREFIX% name: weblogic-domain-storage-volume + + # adminServer is used to configure the desired behavior for starting the administration server. + adminServer: + # serverStartState legal values are "RUNNING" or "ADMIN" + # "RUNNING" means the listed server will be started up to "RUNNING" mode + # "ADMIN" means the listed server will be start up to "ADMIN" mode + serverStartState: "RUNNING" + %EXPOSE_ANY_CHANNEL_PREFIX%adminService: + %EXPOSE_ANY_CHANNEL_PREFIX% channels: + # The Admin Server's NodePort + %EXPOSE_ADMIN_PORT_PREFIX% - channelName: default + %EXPOSE_ADMIN_PORT_PREFIX% nodePort: %ADMIN_NODE_PORT% + # Uncomment to export the T3Channel as a service + %EXPOSE_T3_CHANNEL_PREFIX% - channelName: T3Channel + serverPod: + # an (optional) list of environment variable to be set on the admin servers + env: + - name: USER_MEM_ARGS + value: "-Djava.security.egd=file:/dev/./urandom -Xms512m -Xmx1024m " + + # clusters is used to configure the desired behavior for starting member servers of a cluster. + # If you use this entry, then the rules will be applied to ALL servers that are members of the named clusters. + clusters: + - clusterName: %CLUSTER_NAME% + serverStartState: "RUNNING" + serverPod: + # Instructs Kubernetes scheduler to prefer nodes for new cluster members where there are not + # already members of the same cluster. + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: "weblogic.clusterName" + operator: In + values: + - $(CLUSTER_NAME) + topologyKey: "kubernetes.io/hostname" + replicas: %INITIAL_MANAGED_SERVER_REPLICAS% + # The number of managed servers to start for unlisted clusters + # replicas: 1 + + # Istio + %ISTIO_PREFIX%configuration: + %ISTIO_PREFIX% istio: + %ISTIO_PREFIX% enabled: %ISTIO_ENABLED% + %ISTIO_PREFIX% readinessPort: %ISTIO_READINESS_PORT% + diff --git a/OracleAccessManagement/kubernetes/common/utility.sh b/OracleAccessManagement/kubernetes/common/utility.sh new file mode 100755 index 000000000..aafc57021 --- /dev/null +++ b/OracleAccessManagement/kubernetes/common/utility.sh @@ -0,0 +1,928 @@ +#!/usr/bin/env bash +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +# +# Utility functions that are shared by multiple scripts +# + +# +# Function to exit and print an error message +# $1 - text of message +function fail { + printError $* + exit 1 +} + +# Function to print an error message +function printError { + echo [ERROR] $* +} + +# Function to see if there is more than 1 input file. +# This could happen if the user has a properties file from +# running wdt discover domain on a on-prem domain +function checkInputFiles { + if [[ "${valuesInputFile}" =~ [,] ]] ; then + echo "Found a comma separated list of input files" + IFS=',' + read -a temp <<< "${valuesInputFile}" + + # We want to keep valuesInputFile pointing to the yaml since + # the validate function expects it. + local extension=$(echo "${temp[0]}" | sed 's/^.*\.//') + if [ ${extension} == 'yaml' ]; then + valuesInputFile=${temp[0]} + valuesInputFile1=${temp[1]} + else + valuesInputFile=${temp[1]} + valuesInputFile1=${temp[0]} + fi + fi +} + +# +# Function to parse a yaml file and generate the bash exports +# $1 - Input filename +# $2 - Output filename +function parseYaml { + local s='[[:space:]]*' w='[a-zA-Z0-9_]*' fs=$(echo @|tr @ '\034') + sed -ne "s|^\($s\):|\1|" \ + -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \ + -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $1 | + awk -F$fs '{ + if (length($3) > 0) { + # javaOptions may contain tokens that are not allowed in export command + # we need to handle it differently. + if ($2=="javaOptions") { + printf("%s=%s\n", $2, $3); + } else { + printf("export %s=\"%s\"\n", $2, $3); + } + } + }' > $2 +} + +# +# Function to parse a properties file and generate the bash exports +# $1 - Input filename +# $2 - Output filename +function parseProperties { + while IFS='=' read -r key value + do + echo "export ${key}=\"${value}\"" >> $2 + done < $1 +} + +# +# Function to remove a file if it exists +# +function removeFileIfExists { + if [ -f $1 ]; then + rm $1 + fi +} + +# +# Function to parse the common parameter inputs file +# +function parseCommonInputs { + exportValuesFile=$(mktemp /tmp/export-values-XXXXXXXXX.sh) + tmpFile=$(mktemp /tmp/javaoptions_tmp-XXXXXXXXX.dat) + parseYaml ${valuesInputFile} ${exportValuesFile} + + if [ ! -z ${valuesInputFile1} ]; then + parseProperties ${valuesInputFile1} ${exportValuesFile} + fi + + if [ ! -f ${exportValuesFile} ]; then + echo Unable to locate the parsed output of ${valuesInputFile}. + fail 'The file ${exportValuesFile} could not be found.' + fi + + # Define the environment variables that will be used to fill in template values + echo Input parameters being used + cat ${exportValuesFile} + echo + + # If we have 2 input files, we need to create a combined inputs file + # exportsValueFile contains all the properties already + # We just need to remove the term export from the file + if [ ! -z ${valuesInputFile1} ]; then + propsFile="domain.properties" + cat ${exportValuesFile} > ${propsFile} + sed -i 's/export //g' ${propsFile} + sed -i 's/"//g' ${propsFile} + valuesInputFile=${propsFile} + cat ${valuesInputFile} + fi + + # javaOptions may contain tokens that are not allowed in export command + # we need to handle it differently. + # we set the javaOptions variable that can be used later + tmpStr=`grep "javaOptions" ${exportValuesFile}` + javaOptions=${tmpStr//"javaOptions="/} + + # We exclude javaOptions from the exportValuesFile + grep -v "javaOptions" ${exportValuesFile} > ${tmpFile} + source ${tmpFile} + + rm ${exportValuesFile} ${tmpFile} +} + +# +# Function to delete a kubernetes object +# $1 object type +# $2 object name +# $3 yaml file +function deleteK8sObj { + # If the yaml file does not exist yet, unable to do the delete + if [ ! -f $3 ]; then + fail "Unable to delete object type $1 with name $2 because file $3 does not exist" + fi + + echo Checking if object type $1 with name $2 exists + K8SOBJ=`kubectl get $1 -n ${namespace} | grep $2 | wc | awk ' { print $1; }'` + if [ "${K8SOBJ}" = "1" ]; then + echo Deleting $2 using $3 + kubectl delete -f $3 + fi +} + +# +# Function to lowercase a value +# $1 - value to convert to lowercase +function toLower { + local lc=`echo $1 | tr "[:upper:]" "[:lower:]"` + echo "$lc" +} + +# +# Function to lowercase a value and make it a legal DNS1123 name +# $1 - value to convert to lowercase +function toDNS1123Legal { + local val=`echo $1 | tr "[:upper:]" "[:lower:]"` + val=${val//"_"/"-"} + echo "$val" +} + +# +# Check the state of a persistent volume. +# $1 - name of volume +# $2 - expected state of volume +function checkPvState { + + echo "Checking if the persistent volume ${1:?} is ${2:?}" + local pv_state=`kubectl get pv $1 -o jsonpath='{.status.phase}'` + attempts=0 + while [ ! "$pv_state" = "$2" ] && [ ! $attempts -eq 10 ]; do + attempts=$((attempts + 1)) + sleep 1 + pv_state=`kubectl get pv $1 -o jsonpath='{.status.phase}'` + done + if [ "$pv_state" != "$2" ]; then + fail "The persistent volume state should be $2 but is $pv_state" + fi +} + +# +# Function to check if a persistent volume exists +# $1 - name of volume +function checkPvExists { + + echo "Checking if the persistent volume ${1} exists" + PV_EXISTS=`kubectl get pv | grep ${1} | wc | awk ' { print $1; } '` + if [ "${PV_EXISTS}" = "1" ]; then + echo "The persistent volume ${1} already exists" + PV_EXISTS="true" + else + echo "The persistent volume ${1} does not exist" + PV_EXISTS="false" + fi +} + +# +# Function to check if a persistent volume claim exists +# $1 - name of persistent volume claim +# $2 - NameSpace +function checkPvcExists { + echo "Checking if the persistent volume claim ${1} in NameSpace ${2} exists" + PVC_EXISTS=`kubectl get pvc -n ${2} | grep ${1} | wc | awk ' { print $1; } '` + if [ "${PVC_EXISTS}" = "1" ]; then + echo "The persistent volume claim ${1} already exists in NameSpace ${2}" + PVC_EXISTS="true" + else + echo "The persistent volume claim ${1} does not exist in NameSpace ${2}" + PVC_EXISTS="false" + fi +} + +# Copy the inputs file from the command line into the output directory +# for the domain/operator unless the output directory already has an +# inputs file and the file is the same as the one from the commandline. +# $1 the inputs file from the command line +# $2 the file in the output directory that needs to be made the same as $1 +function copyInputsFileToOutputDirectory { + local from=$1 + local to=$2 + local doCopy="true" + if [ -f "${to}" ]; then + local difference=`diff ${from} ${to}` + if [ -z "${difference}" ]; then + # the output file already exists and is the same as the inputs file. + # don't make a copy. + doCopy="false" + fi + fi + if [ "${doCopy}" = "true" ]; then + cp ${from} ${to} + fi +} + +# +# Function to obtain the IP address of the kubernetes cluster. This information +# is used to form the URL's for accessing services that were deployed. +# +function getKubernetesClusterIP { + + # Get name of the current context + local CUR_CTX=`kubectl config current-context | awk ' { print $1; } '` + + # Get the name of the current cluster + local CUR_CLUSTER_CMD="kubectl config view -o jsonpath='{.contexts[?(@.name == \"${CUR_CTX}\")].context.cluster}' | awk ' { print $1; } '" + local CUR_CLUSTER=`eval ${CUR_CLUSTER_CMD}` + + # Get the server address for the current cluster + local SVR_ADDR_CMD="kubectl config view -o jsonpath='{.clusters[?(@.name == \"${CUR_CLUSTER}\")].cluster.server}' | awk ' { print $1; } '" + local SVR_ADDR=`eval ${SVR_ADDR_CMD}` + + # Server address is expected to be of the form http://address:port. Delimit + # string on the colon to obtain the address. + local array=(${SVR_ADDR//:/ }) + K8S_IP="${array[1]/\/\//}" + +} + +# +# Function to set the serverPodResources variable for including into the generated +# domain.yaml, base on the serverPod resource requests and limits input values, +# if specified. +# The serverPodResources variable remains unset if none of the input values are provided. +# +function buildServerPodResources { + + if [ -n "${serverPodMemoryRequest}" ]; then + local memoryRequest=" memory\: \"${serverPodMemoryRequest}\"\n" + fi + if [ -n "${serverPodCpuRequest}" ]; then + local cpuRequest=" cpu\: \"${serverPodCpuRequest}\"\n" + fi + if [ -n "${memoryRequest}" ] || [ -n "${cpuRequest}" ]; then + local requests=" requests\: \n$memoryRequest $cpuRequest" + fi + + if [ -n "${serverPodMemoryLimit}" ]; then + local memoryLimit=" memory\: \"${serverPodMemoryLimit}\"\n" + fi + if [ -n "${serverPodCpuLimit}" ]; then + local cpuLimit=" cpu\: \"${serverPodCpuLimit}\"\n" + fi + if [ -n "${memoryLimit}" ] || [ -n "${cpuLimit}" ]; then + local limits=" limits\: \n$memoryLimit $cpuLimit" + fi + + if [ -n "${requests}" ] || [ -n "${limits}" ]; then + # build resources element and remove last '\n' + serverPodResources=$(echo "resources\:\n${requests}${limits}" | sed -e 's/\\n$//') + fi +} + +# +# Function to generate the properties and yaml files for creating a domain +# +function createFiles { + + update=false + if [ "$#" == 1 ]; then + echo Trying to update the domain + update=true + fi + + # Make sure the output directory has a copy of the inputs file. + # The user can either pre-create the output directory, put the inputs + # file there, and create the domain from it, or the user can put the + # inputs file some place else and let this script create the output directory + # (if needed) and copy the inputs file there. + echo createFiles - valuesInputFile is ${valuesInputFile} + copyInputsFileToOutputDirectory ${valuesInputFile} "${domainOutputDir}/create-domain-inputs.yaml" + + if [ "${domainHomeInImage}" == "true" ]; then + if [ -z "${domainHomeImageBase}" ]; then + fail "Please specify domainHomeImageBase in your input YAML" + fi + else + if [ -z "${image}" ]; then + fail "Please specify image in your input YAML" + fi + fi + + dcrOutput="${domainOutputDir}/domain.yaml" + + domainName=${domainUID} + + enabledPrefix="" # uncomment the feature + disabledPrefix="# " # comment out the feature + + exposeAnyChannelPrefix="${disabledPrefix}" + if [ "${exposeAdminT3Channel}" = true ]; then + exposeAdminT3ChannelPrefix="${enabledPrefix}" + exposeAnyChannelPrefix="${enabledPrefix}" + # set t3PublicAddress if not set + if [ -z "${t3PublicAddress}" ]; then + getKubernetesClusterIP + t3PublicAddress="${K8S_IP}" + fi + else + exposeAdminT3ChannelPrefix="${disabledPrefix}" + fi + + if [ "${exposeAdminNodePort}" = true ]; then + exposeAdminNodePortPrefix="${enabledPrefix}" + exposeAnyChannelPrefix="${enabledPrefix}" + else + exposeAdminNodePortPrefix="${disabledPrefix}" + fi + + if [ "${istioEnabled}" == "true" ]; then + istioPrefix="${enabledPrefix}" + else + istioPrefix="${disabledPrefix}" + fi + + # The FromModel, MII (model-in-image), and WDT_DOMAIN_TYPE updates in this script + # must remain even though they are not referenced by a sample. They're used by the + # Operator integration test code. If you're interested in MII, + # see './kubernetes/samples/scripts/create-weblogic-domain/model-in-image'. + + # MII settings are used for model-in-image integration testing + if [ "${domainHomeSourceType}" == "FromModel" ]; then + miiPrefix="${enabledPrefix}" + else + miiPrefix="${disabledPrefix}" + fi + + # MII settings are used for model-in-image integration testing + if [ -z "${miiConfigMap}" ]; then + miiConfigMapPrefix="${disabledPrefix}" + else + miiConfigMapPrefix="${enabledPrefix}" + fi + + # For some parameters, use the default value if not defined. + if [ -z "${domainPVMountPath}" ]; then + domainPVMountPath="/shared" + fi + + if [ -z "${logHome}" ]; then + logHome="${domainPVMountPath}/logs/${domainUID}" + fi + + if [ -z "${httpAccessLogInLogHome}" ]; then + httpAccessLogInLogHome="true" + fi + + if [ -z "${dataHome}" ]; then + dataHome="" + fi + + if [ -z "${persistentVolumeClaimName}" ]; then + persistentVolumeClaimName="${domainUID}-weblogic-sample-pvc" + fi + + if [ -z "${weblogicCredentialsSecretName}" ]; then + weblogicCredentialsSecretName="${domainUID}-weblogic-credentials" + fi + + if [ "${domainHomeInImage}" == "true" ]; then + domainPropertiesOutput="${domainOutputDir}/domain.properties" + domainHome="${domainHome:-/u01/oracle/user_projects/domains/${domainName}}" + + # Generate the properties file that will be used when creating the weblogic domain + echo Generating ${domainPropertiesOutput} from ${domainPropertiesInput} + + cp ${domainPropertiesInput} ${domainPropertiesOutput} + sed -i -e "s:%DOMAIN_NAME%:${domainName}:g" ${domainPropertiesOutput} + sed -i -e "s:%DOMAIN_HOME%:${domainHome}:g" ${domainPropertiesOutput} + sed -i -e "s:%ADMIN_PORT%:${adminPort}:g" ${domainPropertiesOutput} + sed -i -e "s:%ADMIN_SERVER_SSL_PORT%:${adminServerSSLPort}:g" ${domainPropertiesOutput} + sed -i -e "s:%ADMIN_SERVER_NAME%:${adminServerName}:g" ${domainPropertiesOutput} + sed -i -e "s:%MANAGED_SERVER_PORT%:${managedServerPort}:g" ${domainPropertiesOutput} + sed -i -e "s:%MANAGED_SERVER_SSL_PORT%:${managedServerSSLPort}:g" ${domainPropertiesOutput} + sed -i -e "s:%MANAGED_SERVER_NAME_BASE%:${managedServerNameBase}:g" ${domainPropertiesOutput} + sed -i -e "s:%CONFIGURED_MANAGED_SERVER_COUNT%:${configuredManagedServerCount}:g" ${domainPropertiesOutput} + sed -i -e "s:%CLUSTER_NAME%:${clusterName}:g" ${domainPropertiesOutput} + sed -i -e "s:%SSL_ENABLED%:${sslEnabled}:g" ${domainPropertiesOutput} + sed -i -e "s:%PRODUCTION_MODE_ENABLED%:${productionModeEnabled}:g" ${domainPropertiesOutput} + sed -i -e "s:%CLUSTER_TYPE%:${clusterType}:g" ${domainPropertiesOutput} + sed -i -e "s;%JAVA_OPTIONS%;${javaOptions};g" ${domainPropertiesOutput} + sed -i -e "s:%T3_CHANNEL_PORT%:${t3ChannelPort}:g" ${domainPropertiesOutput} + sed -i -e "s:%T3_PUBLIC_ADDRESS%:${t3PublicAddress}:g" ${domainPropertiesOutput} + sed -i -e "s:%EXPOSE_T3_CHANNEL%:${exposeAdminT3Channel}:g" ${domainPropertiesOutput} + sed -i -e "s:%FMW_DOMAIN_TYPE%:${fmwDomainType}:g" ${domainPropertiesOutput} + sed -i -e "s:%WDT_DOMAIN_TYPE%:${wdtDomainType}:g" ${domainPropertiesOutput} + sed -i -e "s:%ADMIN_USER_NAME%:${username}:g" ${domainPropertiesOutput} + sed -i -e "s:%ADMIN_USER_PASS%:${password}:g" ${domainPropertiesOutput} + sed -i -e "s:%RCU_SCHEMA_PREFIX%:${rcuSchemaPrefix}:g" ${domainPropertiesOutput} + sed -i -e "s:%RCU_SCHEMA_PASSWORD%:${rcuSchemaPassword}:g" ${domainPropertiesOutput} + sed -i -e "s|%RCU_DB_CONN_STRING%|${rcuDatabaseURL}|g" ${domainPropertiesOutput} + + if [ -z "${image}" ]; then + # calculate the internal name to tag the generated image + defaultImageName="domain-home-in-image" + baseTag=${domainHomeImageBase#*:} + defaultImageName=${defaultImageName}:${baseTag:-"latest"} + sed -i -e "s|%IMAGE_NAME%|${defaultImageName}|g" ${domainPropertiesOutput} + export BUILD_IMAGE_TAG=${defaultImageName} + else + sed -i -e "s|%IMAGE_NAME%|${image}|g" ${domainPropertiesOutput} + export BUILD_IMAGE_TAG=${image} + fi + else + # we're in the domain in PV case + + wdtVersion="${WDT_VERSION:-${wdtVersion}}" + httpsProxy="${https_proxy}" + + createJobOutput="${domainOutputDir}/create-domain-job.yaml" + deleteJobOutput="${domainOutputDir}/delete-domain-job.yaml" + + if [ -z "${domainHome}" ]; then + domainHome="${domainPVMountPath}/domains/${domainUID}" + fi + + # Use the default value if not defined. + if [ -z "${createDomainScriptsMountPath}" ]; then + createDomainScriptsMountPath="/u01/weblogic" + fi + + if [ "${update}" == "true" ]; then + createDomainScriptName="update-domain-job.sh" + elif [ -z "${createDomainScriptName}" ]; then + createDomainScriptName="create-domain-job.sh" + fi + echo createDomainScriptName is ${createDomainScriptName} + + # Must escape the ':' value in image for sed to properly parse and replace + image=$(echo ${image} | sed -e "s/\:/\\\:/g") + + # Generate the yaml to create the kubernetes job that will create the weblogic domain + echo Generating ${createJobOutput} + + cp ${createJobInput} ${createJobOutput} + sed -i -e "s:%NAMESPACE%:$namespace:g" ${createJobOutput} + sed -i -e "s:%WEBLOGIC_CREDENTIALS_SECRET_NAME%:${weblogicCredentialsSecretName}:g" ${createJobOutput} + sed -i -e "s:%WEBLOGIC_IMAGE%:${image}:g" ${createJobOutput} + sed -i -e "s:%WEBLOGIC_IMAGE_PULL_POLICY%:${imagePullPolicy}:g" ${createJobOutput} + sed -i -e "s:%WEBLOGIC_IMAGE_PULL_SECRET_NAME%:${imagePullSecretName}:g" ${createJobOutput} + sed -i -e "s:%WEBLOGIC_IMAGE_PULL_SECRET_PREFIX%:${imagePullSecretPrefix}:g" ${createJobOutput} + sed -i -e "s:%DOMAIN_UID%:${domainUID}:g" ${createJobOutput} + sed -i -e "s:%DOMAIN_NAME%:${domainName}:g" ${createJobOutput} + sed -i -e "s:%DOMAIN_HOME%:${domainHome}:g" ${createJobOutput} + sed -i -e "s:%SSL_ENABLED%:${sslEnabled}:g" ${createJobOutput} + sed -i -e "s:%PRODUCTION_MODE_ENABLED%:${productionModeEnabled}:g" ${createJobOutput} + sed -i -e "s:%ADMIN_SERVER_NAME%:${adminServerName}:g" ${createJobOutput} + sed -i -e "s:%ADMIN_SERVER_NAME_SVC%:${adminServerNameSVC}:g" ${createJobOutput} + sed -i -e "s:%ADMIN_PORT%:${adminPort}:g" ${createJobOutput} + sed -i -e "s:%ADMIN_SERVER_SSL_PORT%:${adminServerSSLPort}:g" ${createJobOutput} + sed -i -e "s:%CONFIGURED_MANAGED_SERVER_COUNT%:${configuredManagedServerCount}:g" ${createJobOutput} + sed -i -e "s:%MANAGED_SERVER_NAME_BASE%:${managedServerNameBase}:g" ${createJobOutput} + sed -i -e "s:%MANAGED_SERVER_NAME_BASE_SVC%:${managedServerNameBaseSVC}:g" ${createJobOutput} + sed -i -e "s:%MANAGED_SERVER_PORT%:${managedServerPort}:g" ${createJobOutput} + sed -i -e "s:%MANAGED_SERVER_SSL_PORT%:${managedServerSSLPort}:g" ${createJobOutput} + sed -i -e "s:%T3_CHANNEL_PORT%:${t3ChannelPort}:g" ${createJobOutput} + sed -i -e "s:%T3_PUBLIC_ADDRESS%:${t3PublicAddress}:g" ${createJobOutput} + sed -i -e "s:%CLUSTER_NAME%:${clusterName}:g" ${createJobOutput} + sed -i -e "s:%CLUSTER_TYPE%:${clusterType}:g" ${createJobOutput} + sed -i -e "s:%DOMAIN_PVC_NAME%:${persistentVolumeClaimName}:g" ${createJobOutput} + sed -i -e "s:%DOMAIN_ROOT_DIR%:${domainPVMountPath}:g" ${createJobOutput} + sed -i -e "s:%CREATE_DOMAIN_SCRIPT_DIR%:${createDomainScriptsMountPath}:g" ${createJobOutput} + sed -i -e "s:%CREATE_DOMAIN_SCRIPT%:${createDomainScriptName}:g" ${createJobOutput} + # extra entries for FMW Infra domains + sed -i -e "s:%RCU_CREDENTIALS_SECRET_NAME%:${rcuCredentialsSecret}:g" ${createJobOutput} + sed -i -e "s:%CUSTOM_RCUPREFIX%:${rcuSchemaPrefix}:g" ${createJobOutput} + sed -i -e "s|%CUSTOM_CONNECTION_STRING%|${rcuDatabaseURL}|g" ${createJobOutput} + sed -i -e "s:%EXPOSE_T3_CHANNEL_PREFIX%:${exposeAdminT3Channel}:g" ${createJobOutput} + sed -i -e "s:%FRONTEND_HOST%:${frontEndHost}:g" ${createJobOutput} + sed -i -e "s:%FRONTEND_PORT%:${frontEndPort}:g" ${createJobOutput} + # entries for Istio + sed -i -e "s:%ISTIO_PREFIX%:${istioPrefix}:g" ${createJobOutput} + sed -i -e "s:%ISTIO_ENABLED%:${istioEnabled}:g" ${createJobOutput} + sed -i -e "s:%ISTIO_READINESS_PORT%:${istioReadinessPort}:g" ${createJobOutput} + sed -i -e "s:%WDT_VERSION%:${wdtVersion}:g" ${createJobOutput} + #sed -i -e "s|%DOMAIN_TYPE%|${domain_type}|g" ${createJobOutput} + sed -i -e "s|%PROXY_VAL%|${httpsProxy}|g" ${createJobOutput} + + # Generate the yaml to create the kubernetes job that will delete the weblogic domain_home folder + echo Generating ${deleteJobOutput} + + cp ${deleteJobInput} ${deleteJobOutput} + sed -i -e "s:%NAMESPACE%:$namespace:g" ${deleteJobOutput} + sed -i -e "s:%WEBLOGIC_IMAGE%:${image}:g" ${deleteJobOutput} + sed -i -e "s:%WEBLOGIC_IMAGE_PULL_POLICY%:${imagePullPolicy}:g" ${deleteJobOutput} + sed -i -e "s:%WEBLOGIC_CREDENTIALS_SECRET_NAME%:${weblogicCredentialsSecretName}:g" ${deleteJobOutput} + sed -i -e "s:%WEBLOGIC_IMAGE_PULL_SECRET_NAME%:${imagePullSecretName}:g" ${deleteJobOutput} + sed -i -e "s:%WEBLOGIC_IMAGE_PULL_SECRET_PREFIX%:${imagePullSecretPrefix}:g" ${deleteJobOutput} + sed -i -e "s:%DOMAIN_UID%:${domainUID}:g" ${deleteJobOutput} + sed -i -e "s:%DOMAIN_NAME%:${domainName}:g" ${deleteJobOutput} + sed -i -e "s:%DOMAIN_HOME%:${domainHome}:g" ${deleteJobOutput} + sed -i -e "s:%DOMAIN_PVC_NAME%:${persistentVolumeClaimName}:g" ${deleteJobOutput} + sed -i -e "s:%DOMAIN_ROOT_DIR%:${domainPVMountPath}:g" ${deleteJobOutput} + fi + + if [ "${domainHomeSourceType}" == "FromModel" ]; then + echo domainHomeSourceType is FromModel + # leave domainHomeSourceType to FromModel + if [ "${logHomeOnPV}" == "true" ]; then + logHomeOnPVPrefix="${enabledPrefix}" + else + logHomeOnPVPrefix="${disabledPrefix}" + fi + elif [ "${domainHomeInImage}" == "true" ]; then + domainHomeSourceType="Image" + if [ "${logHomeOnPV}" == "true" ]; then + logHomeOnPVPrefix="${enabledPrefix}" + else + logHomeOnPVPrefix="${disabledPrefix}" + fi + else + domainHomeSourceType="PersistentVolume" + logHomeOnPVPrefix="${enabledPrefix}" + logHomeOnPV=true + fi + + # Generate the yaml file for creating the domain resource + # We want to use wdt's extractDomainResource.sh to get the domain resource + # for domain on pv use case. For others, generate domain resource here + + if [ "${domainHomeSourceType}" != "PersistentVolume" ] || [ "${wdtDomainType}" != "WLS" ] || + [ "${useWdt}" != true ]; then + echo Generating ${dcrOutput} + + cp ${dcrInput} ${dcrOutput} + sed -i -e "s:%DOMAIN_UID%:${domainUID}:g" ${dcrOutput} + sed -i -e "s:%NAMESPACE%:$namespace:g" ${dcrOutput} + sed -i -e "s:%DOMAIN_HOME%:${domainHome}:g" ${dcrOutput} + sed -i -e "s:%DOMAIN_HOME_SOURCE_TYPE%:${domainHomeSourceType}:g" ${dcrOutput} + sed -i -e "s:%WEBLOGIC_IMAGE_PULL_POLICY%:${imagePullPolicy}:g" ${dcrOutput} + sed -i -e "s:%WEBLOGIC_IMAGE_PULL_SECRET_PREFIX%:${imagePullSecretPrefix}:g" ${dcrOutput} + sed -i -e "s:%WEBLOGIC_IMAGE_PULL_SECRET_NAME%:${imagePullSecretName}:g" ${dcrOutput} + sed -i -e "s:%WEBLOGIC_CREDENTIALS_SECRET_NAME%:${weblogicCredentialsSecretName}:g" ${dcrOutput} + sed -i -e "s:%INCLUDE_SERVER_OUT_IN_POD_LOG%:${includeServerOutInPodLog}:g" ${dcrOutput} + sed -i -e "s:%LOG_HOME_ON_PV_PREFIX%:${logHomeOnPVPrefix}:g" ${dcrOutput} + sed -i -e "s:%LOG_HOME_ENABLED%:${logHomeOnPV}:g" ${dcrOutput} + sed -i -e "s:%LOG_HOME%:${logHome}:g" ${dcrOutput} + sed -i -e "s:%HTTP_ACCESS_LOG_IN_LOG_HOME%:${httpAccessLogInLogHome}:g" ${dcrOutput} + sed -i -e "s:%DATA_HOME%:${dataHome}:g" ${dcrOutput} + sed -i -e "s:%SERVER_START_POLICY%:${serverStartPolicy}:g" ${dcrOutput} + sed -i -e "s;%JAVA_OPTIONS%;${javaOptions};g" ${dcrOutput} + sed -i -e "s:%DOMAIN_PVC_NAME%:${persistentVolumeClaimName}:g" ${dcrOutput} + sed -i -e "s:%DOMAIN_ROOT_DIR%:${domainPVMountPath}:g" ${dcrOutput} + + if [ "${istioEnabled}" == "true" ]; then + exposeAdminNodePortPrefix="${disabledPrefix}" + fi + + sed -i -e "s:%EXPOSE_T3_CHANNEL_PREFIX%:${exposeAdminT3ChannelPrefix}:g" ${dcrOutput} + sed -i -e "s:%EXPOSE_ANY_CHANNEL_PREFIX%:${exposeAnyChannelPrefix}:g" ${dcrOutput} + sed -i -e "s:%EXPOSE_ADMIN_PORT_PREFIX%:${exposeAdminNodePortPrefix}:g" ${dcrOutput} + sed -i -e "s:%ADMIN_NODE_PORT%:${adminNodePort}:g" ${dcrOutput} + sed -i -e "s:%CLUSTER_NAME%:${clusterName}:g" ${dcrOutput} + sed -i -e "s:%INITIAL_MANAGED_SERVER_REPLICAS%:${initialManagedServerReplicas}:g" ${dcrOutput} + sed -i -e "s:%ISTIO_PREFIX%:${istioPrefix}:g" ${dcrOutput} + sed -i -e "s:%ISTIO_ENABLED%:${istioEnabled}:g" ${dcrOutput} + sed -i -e "s:%ISTIO_READINESS_PORT%:${istioReadinessPort}:g" ${dcrOutput} + # MII settings are used for model-in-image integration testing + sed -i -e "s:%MII_PREFIX%:${miiPrefix}:g" ${dcrOutput} + sed -i -e "s:%MII_CONFIG_MAP_PREFIX%:${miiConfigMapPrefix}:g" ${dcrOutput} + sed -i -e "s:%MII_CONFIG_MAP%:${miiConfigMap}:g" ${dcrOutput} + sed -i -e "s:%WDT_DOMAIN_TYPE%:${wdtDomainType}:g" ${dcrOutput} + + buildServerPodResources + if [ -z "${serverPodResources}" ]; then + sed -i -e "/%OPTIONAL_SERVERPOD_RESOURCES%/d" ${dcrOutput} + else + if [[ $(uname) -eq "Darwin" ]]; then + serverPodResources=$(echo "${serverPodResources}" | sed -e 's/\\n/%NEWLINE%/g') + sed -i -e "s:%OPTIONAL_SERVERPOD_RESOURCES%:${serverPodResources}:g" ${dcrOutput} + sed -i -e $'s|%NEWLINE%|\\\n|g' ${dcrOutput} + else + sed -i -e "s:%OPTIONAL_SERVERPOD_RESOURCES%:${serverPodResources}:g" ${dcrOutput} + fi + fi + + if [ "${domainHomeInImage}" == "true" ]; then + + # now we know which image to use, update the domain yaml file + if [ -z $image ]; then + sed -i -e "s|%WEBLOGIC_IMAGE%|${defaultImageName}|g" ${dcrOutput} + else + sed -i -e "s|%WEBLOGIC_IMAGE%|${image}|g" ${dcrOutput} + fi + else + sed -i -e "s:%WEBLOGIC_IMAGE%:${image}:g" ${dcrOutput} + fi + fi + + # Remove any "...yaml-e" and "...properties-e" files left over from running sed + rm -f ${domainOutputDir}/*.yaml-e + rm -f ${domainOutputDir}/*.properties-e + +} + + +# +# Function to markup the wdt model file +# +function updateModelFile { + # Update the wdt model file with kubernetes section + modelFile="${domainOutputDir}/tmp/wdt_model.yaml" + cat ${scriptDir}/wdt_k8s_model_template.yaml >> ${modelFile} + + sed -i -e "s:%DOMAIN_UID%:${domainUID}:g" ${modelFile} + sed -i -e "s:%NAMESPACE%:$namespace:g" ${modelFile} + sed -i -e "s:%DOMAIN_HOME%:${domainHome}:g" ${modelFile} + sed -i -e "s:%DOMAIN_HOME_SOURCE_TYPE%:${domainHomeSourceType}:g" ${modelFile} + sed -i -e "s:%WEBLOGIC_IMAGE_PULL_POLICY%:${imagePullPolicy}:g" ${modelFile} + sed -i -e "s:%WEBLOGIC_IMAGE_PULL_SECRET_PREFIX%:${imagePullSecretPrefix}:g" ${modelFile} + sed -i -e "s:%WEBLOGIC_IMAGE_PULL_SECRET_NAME%:${imagePullSecretName}:g" ${modelFile} + sed -i -e "s:%WEBLOGIC_CREDENTIALS_SECRET_NAME%:${weblogicCredentialsSecretName}:g" ${modelFile} + sed -i -e "s:%INCLUDE_SERVER_OUT_IN_POD_LOG%:${includeServerOutInPodLog}:g" ${modelFile} + sed -i -e "s:%LOG_HOME_ON_PV_PREFIX%:${logHomeOnPVPrefix}:g" ${modelFile} + sed -i -e "s:%LOG_HOME_ENABLED%:${logHomeOnPV}:g" ${modelFile} + sed -i -e "s:%LOG_HOME%:${logHome}:g" ${modelFile} + sed -i -e "s:%HTTP_ACCESS_LOG_IN_LOG_HOME%:${httpAccessLogInLogHome}:g" ${modelFile} + sed -i -e "s:%DATA_HOME%:${dataHome}:g" ${modelFile} + sed -i -e "s:%SERVER_START_POLICY%:${serverStartPolicy}:g" ${modelFile} + sed -i -e "s;%JAVA_OPTIONS%;${javaOptions};g" ${modelFile} + sed -i -e "s:%DOMAIN_PVC_NAME%:${persistentVolumeClaimName}:g" ${modelFile} + sed -i -e "s:%DOMAIN_ROOT_DIR%:${domainPVMountPath}:g" ${modelFile} + + if [ "${istioEnabled}" == "true" ]; then + exposeAdminNodePortPrefix="${disabledPrefix}" + fi + + sed -i -e "s:%EXPOSE_T3_CHANNEL_PREFIX%:${exposeAdminT3ChannelPrefix}:g" ${modelFile} + sed -i -e "s:%EXPOSE_ANY_CHANNEL_PREFIX%:${exposeAnyChannelPrefix}:g" ${modelFile} + sed -i -e "s:%EXPOSE_ADMIN_PORT_PREFIX%:${exposeAdminNodePortPrefix}:g" ${modelFile} + sed -i -e "s:%ADMIN_NODE_PORT%:${adminNodePort}:g" ${modelFile} + sed -i -e "s:%CLUSTER_NAME%:${clusterName}:g" ${modelFile} + sed -i -e "s:%INITIAL_MANAGED_SERVER_REPLICAS%:${initialManagedServerReplicas}:g" ${modelFile} + sed -i -e "s:%ISTIO_PREFIX%:${istioPrefix}:g" ${modelFile} + sed -i -e "s:%ISTIO_ENABLED%:${istioEnabled}:g" ${modelFile} + sed -i -e "s:%ISTIO_READINESS_PORT%:${istioReadinessPort}:g" ${modelFile} + # MII settings are used for model-in-image integration testing + sed -i -e "s:%MII_PREFIX%:${miiPrefix}:g" ${modelFile} + sed -i -e "s:%MII_CONFIG_MAP_PREFIX%:${miiConfigMapPrefix}:g" ${modelFile} + sed -i -e "s:%MII_CONFIG_MAP%:${miiConfigMap}:g" ${modelFile} + sed -i -e "s:%WDT_DOMAIN_TYPE%:${wdtDomainType}:g" ${modelFile} + + buildServerPodResources + if [ -z "${serverPodResources}" ]; then + sed -i -e "/%OPTIONAL_SERVERPOD_RESOURCES%/d" ${modelFile} + else + if [[ $(uname) -eq "Darwin" ]]; then + serverPodResources=$(echo "${serverPodResources}" | sed -e 's/\\n/%NEWLINE%/g') + sed -i -e "s:%OPTIONAL_SERVERPOD_RESOURCES%:${serverPodResources}:g" ${modelFile} + sed -i -e $'s|%NEWLINE%|\\\n|g' ${modelFile} + else + sed -i -e "s:%OPTIONAL_SERVERPOD_RESOURCES%:${serverPodResources}:g" ${modelFile} + fi + fi + + sed -i -e "s:%WEBLOGIC_IMAGE%:${image}:g" ${modelFile} +} + +# +# Function to create the domain recource +# +function createDomainResource { + kubectl apply -f ${dcrOutput} + + attempts=0 + while [ "$DCR_AVAIL" != "1" ] && [ ! $attempts -eq 10 ]; do + attempts=$((attempts + 1)) + sleep 1 + DCR_AVAIL=`kubectl get domain ${domainUID} -n ${namespace} | grep ${domainUID} | wc | awk ' { print $1; } '` + done + if [ "${DCR_AVAIL}" != "1" ]; then + fail "The domain resource ${domainUID} was not found" + fi +} + +# +# Function to create a domain +# $1 - boolean value indicating the location of the domain home +# true means domain home in image +# false means domain home on PV +# +function createDomain { + if [ "$#" != 1 ]; then + fail "The function must be called with domainHomeInImage parameter." + fi + + domainHomeInImage="${1}" + if [ "true" != "${domainHomeInImage}" ] && [ "false" != "${domainHomeInImage}" ]; then + fail "The value of domainHomeInImage must be true or false: ${domainHomeInImage}" + fi + + # Setup the environment for running this script and perform initial validation checks + initialize + + # Generate files for creating the domain + createFiles + + # Check that the domain secret exists and contains the required elements + validateDomainSecret + + # Validate the domain's persistent volume claim + if [ "${doValidation}" == true ] && [ "${domainHomeInImage}" == false -o "${logHomeOnPV}" == true ]; then + validateDomainPVC + fi + + # Create the WebLogic domain home + createDomainHome + + if [ "${executeIt}" = true ]; then + createDomainResource + fi + + # Print a summary + printSummary +} + +# +# Function to update a domain +# $1 - boolean value indicating the location of the domain home +# true means domain home in image +# false means domain home on PV +# +function updateDomain { + + domainHomeInImage="false" + + # Setup the environment for running this script and perform initial validation checks + initialize + + # Generate files for creating the domain + createFiles update + + # Check that the domain secret exists and contains the required elements + validateDomainSecret + + # Validate the domain's persistent volume claim + if [ "${doValidation}" == true ]; then + validateDomainPVC + fi + + # Create the WebLogic domain home + updateDomainHome + + if [ "${executeIt}" = true ]; then + createDomainResource + fi + + # Print a summary + printSummary +} + +# checks if a given pod in a NameSpace has been deleted +function checkPodDelete(){ + + pod=$1 + ns=$2 + status="Terminating" + + if [ -z ${1} ]; then + echo "No Pod name provided " + exit -1 + fi + + if [ -z ${2} ]; then + echo "No NameSpace provided " + exit -2 + fi + + echo "Checking Status for Pod [$pod] in namespace [${ns}]" + max=10 + count=1 + while [ $count -le $max ] ; do + sleep 5 + pod=`kubectl get po/$1 -n ${ns} | grep -v NAME | awk '{print $1}'` + if [ -z ${pod} ]; then + status="Terminated" + echo "Pod [$1] removed from nameSpace [${ns}]" + break; + fi + count=`expr $count + 1` + echo "Pod [$pod] Status [${status}]" + done + + if [ $count -gt $max ] ; then + echo "[ERROR] The Pod[$1] in NameSpace [$ns] could not be deleted in 50s"; + exit 1 + fi +} + +# Checks if all container(s) in a pod are running state based on READY column +#NAME READY STATUS RESTARTS AGE +#domain1-adminserver 1/1 Running 0 4m + +function checkPodState(){ + + status="NotReady" + max=60 + count=1 + + pod=$1 + ns=$2 + state=${3:-1/1} + + echo "Checking Pod READY column for State [$state]" + pname=`kubectl get po -n ${ns} | grep -w ${pod} | awk '{print $1}'` + if [ -z ${pname} ]; then + echo "No such pod [$pod] exists in NameSpace [$ns] " + exit -1 + fi + + rcode=`kubectl get po ${pname} -n ${ns} | grep -w ${pod} | awk '{print $2}'` + [[ ${rcode} -eq "${state}" ]] && status="Ready" + + while [ ${status} != "Ready" -a $count -le $max ] ; do + sleep 5 + rcode=`kubectl get po/$pod -n ${ns} | grep -v NAME | awk '{print $2}'` + [[ ${rcode} -eq "1/1" ]] && status="Ready" + echo "Pod [$1] Status is ${status} Iter [$count/$max]" + count=`expr $count + 1` + done + if [ $count -gt $max ] ; then + echo "[ERROR] Unable to start the Pod [$pod] after 300s "; + exit 1 + fi + + pname=`kubectl get po -n ${ns} | grep -w ${pod} | awk '{print $1}'` + kubectl -n ${ns} get po ${pname} +} + +# Checks if a pod is available in a given namespace +function checkPod(){ + + max=20 + count=1 + + pod=$1 + ns=$2 + + pname=`kubectl get po -n ${ns} | grep -w ${pod} | awk '{print $1}'` + if [ -z ${pname} ]; then + echo "No such pod [$pod] exists in NameSpace [$ns]" + sleep 10 + fi + + rcode=`kubectl get po -n ${ns} | grep -w ${pod} | awk '{print $1}'` + if [ ! -z ${rcode} ]; then + echo "[$pod] already initialized .. " + return 0 + fi + + echo "The POD [${pod}] has not been initialized ..." + while [ -z ${rcode} ]; do + [[ $count -gt $max ]] && break + echo "Pod[$pod] is being initialized ..." + sleep 5 + rcode=`kubectl get po -n ${ns} | grep $pod | awk '{print $1}'` + count=`expr $count + 1` + done + + if [ $count -gt $max ] ; then + echo "[ERROR] Could not find Pod [$pod] after 120s"; + exit 1 + fi +} + +# Checks if a service is available in a given namespace +function checkService(){ + svc=$1 + ns=$2 + startSecs=$SECONDS + maxWaitSecs=20 + while [ -z "`kubectl get service -n ${ns} | grep -w ${svc}`" ]; do + if [ $((SECONDS - startSecs)) -lt $maxWaitSecs ]; then + echo "Service [$svc] not found after $((SECONDS - startSecs)) seconds, retrying ..." + sleep 5 + else + echo "[Error] Could not find Service [$svc] after $((SECONDS - startSecs)) seconds" + exit 1 + fi + done + echo "Service [$svc] found" +} diff --git a/OracleAccessManagement/kubernetes/common/validate.sh b/OracleAccessManagement/kubernetes/common/validate.sh new file mode 100755 index 000000000..1a407a99a --- /dev/null +++ b/OracleAccessManagement/kubernetes/common/validate.sh @@ -0,0 +1,481 @@ +#!/usr/bin/env bash +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# Description +# Common validation functions shared by all other scripts that process inputs properties. +# + +# +# Function to note that a validate error has occurred +# +function validationError { + printError $* + validateErrors=true +} + +# +# Function to cause the script to fail if there were any validation errors +# +function failIfValidationErrors { + if [ "$validateErrors" = true ]; then + fail 'The errors listed above must be resolved before the script can continue' + fi +} + +# +# Function to validate that a list of required input parameters were specified +# +function validateInputParamsSpecified { + for p in $*; do + local name=$p + local val=${!name} + if [ -z "$val" ]; then + validationError "The ${name} parameter in ${valuesInputFile} is missing, null or empty" + fi + done +} + +# +# Function to validate that a list of input parameters have boolean values. +# It assumes that validateInputParamsSpecified will also be called for these params. +# +function validateBooleanInputParamsSpecified { + validateInputParamsSpecified $* + for p in $*; do + local name=$p + local val=${!name} + if ! [ -z $val ]; then + if [ "true" != "$val" ] && [ "false" != "$val" ]; then + validationError "The value of $name must be true or false: $val" + fi + fi + done +} + +# +# Function to validate that a list of input parameters have integer values. +# +function validateIntegerInputParamsSpecified { + validateInputParamsSpecified $* + for p in $*; do + local name=$p + local val=${!name} + if ! [ -z $val ]; then + local intVal="" + printf -v intVal '%d' "$val" 2>/dev/null + if ! [ "${val}" == "${intVal}" ]; then + validationError "The value of $name must be an integer: $val" + fi + fi + done +} + +# +# Function to check if a value is lowercase +# $1 - name of object being checked +# $2 - value to check +function validateLowerCase { + local lcVal=$(toLower $2) + if [ "$lcVal" != "$2" ]; then + validationError "The value of $1 must be lowercase: $2" + fi +} + +# +# Function to check if a value is lowercase and legal DNS name +# $1 - name of object being checked +# $2 - value to check +function validateDNS1123LegalName { + local val=$(toDNS1123Legal $2) + if [ "$val" != "$2" ]; then + validationError "The value of $1 contains invalid charaters: $2" + fi +} + +# +# Function to validate the namespace +# +function validateNamespace { + validateLowerCase "namespace" ${namespace} +} + +# +# Function to validate the version of the inputs file +# +function validateVersion { + local requiredVersion=${requiredInputsVersion} + if [ "${version}" != "${requiredVersion}" ]; then + validationError "Invalid version: \"${version}\". Must be ${requiredVersion}." + fi +} + +# +# Function to ensure the domain uid is a legal DNS name +# +function validateDomainUid { + validateLowerCase "domainUID" ${domainUID} + validateDNS1123LegalName domainUID ${domainUID} +} + +# +# Function to ensure the namespace is lowercase +# +function validateNamespace { + validateLowerCase "namespace" ${namespace} +} + +# +# Create an instance of clusterName to be used in cases where a legal DNS name is required. +# +function validateClusterName { + clusterNameSVC=$(toDNS1123Legal $clusterName) +} + +# +# Create an instance of adminServerName to be used in cases where a legal DNS name is required. +# +function validateAdminServerName { + adminServerNameSVC=$(toDNS1123Legal $adminServerName) +} + +# +# Create an instance of adminServerName to be used in cases where a legal DNS name is required. +# +function validateManagedServerNameBase { + managedServerNameBaseSVC=$(toDNS1123Legal $managedServerNameBase) +} + +# +# Function to validate the secret name +# +function validateWeblogicCredentialsSecretName { + validateLowerCase "weblogicCredentialsSecretName" ${weblogicCredentialsSecretName} +} + +# +# Function to validate the weblogic image pull policy +# +function validateWeblogicImagePullPolicy { + if [ ! -z ${imagePullPolicy} ]; then + case ${imagePullPolicy} in + "IfNotPresent") + ;; + "Always") + ;; + "Never") + ;; + *) + validationError "Invalid value for imagePullPolicy: ${imagePullPolicy}. Valid values are IfNotPresent, Always, and Never." + ;; + esac + else + # Set the default + imagePullPolicy="IfNotPresent" + fi + failIfValidationErrors +} + +# +# Function to validate the fmwDomainType +# +function validateFmwDomainType { + if [ ! -z ${fmwDomainType} ]; then + case ${fmwDomainType} in + "JRF") + ;; + "RestrictedJRF") + ;; + *) + validationError "Invalid value for fmwDomainType: ${fmwDomainType}. Valid values are JRF or restrictedJRF." + ;; + esac + else + # Set the default + fmwDomainType="JRF" + fi + failIfValidationErrors +} + +# +# Function to validate the weblogic image pull secret name +# +function validateWeblogicImagePullSecretName { + if [ ! -z ${imagePullSecretName} ]; then + validateLowerCase imagePullSecretName ${imagePullSecretName} + imagePullSecretPrefix="" + if [ "${generateOnly}" = false ]; then + validateWeblogicImagePullSecret + fi + else + # Set name blank when not specified, and comment out the yaml + imagePullSecretName="" + imagePullSecretPrefix="#" + fi +} + +# +# Function to validate the weblogic image pull secret exists +# +function validateWeblogicImagePullSecret { + # The kubernetes secret for pulling images from a container registry is optional. + # If it was specified, make sure it exists. + validateSecretExists ${imagePullSecretName} ${namespace} + failIfValidationErrors +} + +# try to execute kubectl to see whether kubectl is available +function validateKubectlAvailable { + if ! [ -x "$(command -v kubectl)" ]; then + validationError "kubectl is not installed" + fi +} + +# Function to validate the server start policy value +# +function validateServerStartPolicy { + validateInputParamsSpecified serverStartPolicy + if [ ! -z "${serverStartPolicy}" ]; then + case ${serverStartPolicy} in + "NEVER") + ;; + "ALWAYS") + ;; + "IF_NEEDED") + ;; + "ADMIN_ONLY") + ;; + *) + validationError "Invalid value for serverStartPolicy: ${serverStartPolicy}. Valid values are 'NEVER', 'ALWAYS', 'IF_NEEDED', and 'ADMIN_ONLY'." + ;; + esac + fi +} + +# +# Function to validate the weblogic domain storage reclaim policy +# +function validateWeblogicDomainStorageReclaimPolicy { + validateInputParamsSpecified weblogicDomainStorageReclaimPolicy + if [ ! -z "${weblogicDomainStorageReclaimPolicy}" ]; then + case ${weblogicDomainStorageReclaimPolicy} in + "Retain") + ;; + "Delete") + if [ "${weblogicDomainStoragePath:0:5}" != "/tmp/" ]; then + validationError "ERROR - Invalid value for weblogicDomainStorageReclaimPolicy ${weblogicDomainStorageReclaimPolicy} with weblogicDomainStoragePath ${weblogicDomainStoragePath} that is not /tmp/" + fi + ;; + "Recycle") + ;; + *) + validationError "Invalid value for weblogicDomainStorageReclaimPolicy: ${weblogicDomainStorageReclaimPolicy}. Valid values are Retain, Delete and Recycle." + ;; + esac + fi +} + +# +# Function to validate the weblogic domain storage type +# +function validateWeblogicDomainStorageType { + validateInputParamsSpecified weblogicDomainStorageType + if [ ! -z "${weblogicDomainStorageType}" ]; then + case ${weblogicDomainStorageType} in + "HOST_PATH") + ;; + "NFS") + validateInputParamsSpecified weblogicDomainStorageNFSServer + ;; + *) + validationError "Invalid value for weblogicDomainStorageType: ${weblogicDomainStorageType}. Valid values are HOST_PATH and NFS." + ;; + esac + fi +} + +# +# Function to validate the load balancer value +# +function validateLoadBalancer { + validateInputParamsSpecified loadBalancer + if [ ! -z "${loadBalancer}" ]; then + case ${loadBalancer} in + "TRAEFIK") + ;; + "APACHE") + ;; + "VOYAGER") + ;; + "NONE") + ;; + *) + validationError "Invalid value for loadBalancer: ${loadBalancer}. Valid values are APACHE, TRAEFIK, VOYAGER and NONE." + ;; + esac + fi +} + +# +# Function to validate a kubernetes secret exists +# $1 - the name of the secret +# $2 - namespace +function validateSecretExists { + echo "Checking to see if the secret ${1} exists in namespace ${2}" + local SECRET=`kubectl get secret ${1} -n ${2} | grep ${1} | wc | awk ' { print $1; }'` + if [ "${SECRET}" != "1" ]; then + validationError "The secret ${1} was not found in namespace ${2}" + fi +} + +# +# Function to validate the domain secret +# +function validateDomainSecret { + # Verify the secret exists + validateSecretExists ${weblogicCredentialsSecretName} ${namespace} + failIfValidationErrors + + # Verify the secret contains a username + SECRET=`kubectl get secret ${weblogicCredentialsSecretName} -n ${namespace} -o jsonpath='{.data}' | tr -d '"' | grep username: | wc | awk ' { print $1; }'` + if [ "${SECRET}" != "1" ]; then + validationError "The domain secret ${weblogicCredentialsSecretName} in namespace ${namespace} does contain a username" + fi + + # Verify the secret contains a password + SECRET=`kubectl get secret ${weblogicCredentialsSecretName} -n ${namespace} -o jsonpath='{.data}' | tr -d '"'| grep password: | wc | awk ' { print $1; }'` + if [ "${SECRET}" != "1" ]; then + validationError "The domain secret ${weblogicCredentialsSecretName} in namespace ${namespace} does contain a password" + fi + failIfValidationErrors +} + +# +# function to validate if we will be using wdt or wlst to create the domain +# +function validateDomainFilesDir { + useWdt=true + if [ -z "${createDomainFilesDir}" ] || [ "${createDomainFilesDir}" == "wlst" ]; then + useWdt=false + fi +} + +# +# Function to validate the common input parameters +# +function validateCommonInputs { + sample_name=${1:-"other"} + + # Parse the common inputs file + parseCommonInputs + + validateInputParamsSpecified \ + adminServerName \ + domainUID \ + clusterName \ + managedServerNameBase \ + namespace \ + includeServerOutInPodLog \ + version + + validateIntegerInputParamsSpecified \ + adminPort \ + initialManagedServerReplicas \ + managedServerPort \ + t3ChannelPort \ + adminNodePort + + if [ ! "${sample_name}" == "fmw-domain-home-in-image" ]; then + validateIntegerInputParamsSpecified configuredManagedServerCount + fi + + validateBooleanInputParamsSpecified \ + productionModeEnabled \ + exposeAdminT3Channel \ + exposeAdminNodePort \ + includeServerOutInPodLog + + export requiredInputsVersion="create-weblogic-sample-domain-inputs-v1" + validateVersion + + validateDomainUid + validateNamespace + validateAdminServerName + validateManagedServerNameBase + validateClusterName + validateWeblogicCredentialsSecretName + validateServerStartPolicy + validateWeblogicImagePullPolicy + validateWeblogicImagePullSecretName + validateFmwDomainType + validateDomainFilesDir + # Below three validate methods are used for MII integration testing + validateWdtDomainType + validateWdtModelFile + validateWdtModelPropertiesFile + + failIfValidationErrors +} + +# +# Function to validate the domain's persistent volume claim has been created +# +function validateDomainPVC { + # Check if the persistent volume claim is already available + checkPvcExists ${persistentVolumeClaimName} ${namespace} + if [ "${PVC_EXISTS}" = "false" ]; then + validationError "The domain persistent volume claim ${persistentVolumeClaimName} does not exist in namespace ${namespace}" + fi + failIfValidationErrors +} + +# +# Function to validate the WDT model file exists +# used for MII integration testing +# +function validateWdtModelFile { + # Check if the model file exists + if [ ! -z $wdtModelFile ]; then + if [ ! -f $wdtModelFile ]; then + validationError "The WDT model file ${wdtModelFile} does not exist" + fi + fi + failIfValidationErrors +} + +# +# Function to validate the WDT model property file exists +# used for MII integration testing +# +function validateWdtModelPropertiesFile { + # Check if the model property file exists + if [ ! -z $wdtModelPropertiesFile ]; then + if [ ! -f $wdtModelPropertiesFile ]; then + validationError "The WDT model property file ${wdtModelPropertiesFile} does not exist" + fi + fi + failIfValidationErrors +} + +# Function to validate the wdtDomainType +# used for MII integration testing +function validateWdtDomainType { + if [ ! -z ${wdtDomainType} ]; then + case ${wdtDomainType} in + "WLS") + ;; + "JRF") + ;; + "RestrictedJRF") + ;; + *) + validationError "Invalid value for wdtDomainType: ${wdtDomainType}. Valid values are WLS or JRF or restrictedJRF." + ;; + esac + else + # Set the default + wdtDomainType="WLS" + fi + failIfValidationErrors +} + diff --git a/OracleAccessManagement/kubernetes/common/wdt-and-wit-utility.sh b/OracleAccessManagement/kubernetes/common/wdt-and-wit-utility.sh new file mode 100755 index 000000000..aa9cc691c --- /dev/null +++ b/OracleAccessManagement/kubernetes/common/wdt-and-wit-utility.sh @@ -0,0 +1,439 @@ +#!/bin/bash +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# Description: +# +# This script contains functions for installing WebLogic Deploy Tool (WDT) and +# WebLogic Image Tool (WIT), and for running WDT. +# +# +# Usage: +# +# Export customized values for the input shell environment variables as needed +# before calling this script. +# +# Outputs: +# +# WDT install: WDT_DIR/weblogic-deploy/... +# +# Copy of wdt model: WDT_DIR/$(basename WDT_MODEL_FILE) +# Copy of wdt vars: WDT_DIR/$(basename WDT_VAR_FILE) +# +# WDT logs: WDT_DIR/weblogic-deploy/logs/... +# WDT stdout: WDT_DIR/createDomain.sh.out +# +# WebLogic domain home: DOMAIN_HOME_DIR +# default: /shared/domains/ +# +# Input environment variables: +# +# ORACLE_HOME Oracle home with a WebLogic install. +# default: /u01/oracle +# +# DOMAIN_HOME_DIR Target location for generated domain. +# +# WDT_MODEL_FILE Full path to WDT model file. +# default: the directory that contains this script +# plus "/wdt_model.yaml" +# +# WDT_VAR_FILE Full path to WDT variable file (java properties format). +# default: the directory that contains this script +# plus "/create-domain-inputs.yaml" +# +# WDT_DIR Target location to install and run WDT, and to keep a copy of +# $WDT_MODEL_FILE and $WDT_MODEL_VARS. Also the location +# of WDT log files. +# default: /shared/wdt +# +# WDT_VERSION WDT version to download. +# default: LATEST +# +# WDT_INSTALL_ZIP_FILE Filename of WDT install zip. +# default: weblogic-deploy.zip +# +# WDT_INSTALL_ZIP_URL URL for downloading WDT install zip +# default: https://github.com/oracle/weblogic-deploy-tooling/releases/latest/download/$WDT_INSTALL_ZIP_FILE +# +# WIT_DIR Target location to install WIT +# default: /shared/imagetool +# +# WIT_VERSION WIT version to download. +# default: LATEST +# +# WIT_INSTALL_ZIP_FILE Filename of WIT install zip. +# default: imagetool.zip +# +# WIT_INSTALL_ZIP_URL URL for downloading WIT install zip +# default: https://github.com/oracle/weblogic-image-tool/releases/latest/download/$WIT_INSTALL_ZIP_FILE +# + + +# Initialize globals + +export ORACLE_HOME=${ORACLE_HOME:-/u01/oracle} + +SCRIPTPATH="$( cd "$(dirname "$0")" > /dev/null 2>&1 ; pwd -P )" +WDT_MODEL_FILE=${WDT_MODEL_FILE:-"$SCRIPTPATH/wdt_model.yaml"} +WDT_VAR_FILE=${WDT_VAR_FILE:-"$SCRIPTPATH/create-domain-inputs.yaml"} + +WDT_DIR=${WDT_DIR:-/shared/wdt} +WDT_VERSION=${WDT_VERSION:-LATEST} + +WIT_DIR=${WIT_DIR:-/shared/imagetool} +WIT_VERSION=${WIT_VERSION:-LATEST} + +DOMAIN_TYPE="${DOMAIN_TYPE:-WLS}" + +function download { + local fileUrl="${1}" + + local curl_res=1 + max=20 + count=0 + while [ $curl_res -ne 0 -a $count -lt $max ] ; do + sleep 1 + count=`expr $count + 1` + for proxy in "${https_proxy}" "${https_proxy2}"; do + echo @@ "Info: Downloading $fileUrl with https_proxy=\"$proxy\"" + https_proxy="${proxy}" \ + curl --silent --show-error --connect-timeout 10 -O -L $fileUrl + curl_res=$? + [ $curl_res -eq 0 ] && break + done + done + if [ $curl_res -ne 0 ]; then + echo @@ "Error: Download failed." + return 1 + fi +} + +function run_wdt { + # + # Run WDT using WDT_VAR_FILE, WDT_MODEL_FILE, and ORACLE_HOME. + # Output: + # - result domain will be in DOMAIN_HOME_DIR + # - logging output is in $WDT_DIR/createDomain.sh.out and $WDT_DIR/weblogic-deploy/logs + # - WDT_VAR_FILE & WDT_MODEL_FILE will be copied to WDT_DIR. + # + + local action="${1}" + + # Input files and directories. + + local inputs_orig="$WDT_VAR_FILE" + local model_orig="$WDT_MODEL_FILE" + local oracle_home="$ORACLE_HOME" + local domain_type="$DOMAIN_TYPE" + local wdt_bin_dir="$WDT_DIR/weblogic-deploy/bin" + local wdt_createDomain_script="$wdt_bin_dir/createDomain.sh" + + if [ ${action} = "create" ]; then + local wdt_domain_script="$wdt_bin_dir/createDomain.sh" + else + local wdt_domain_script="$wdt_bin_dir/updateDomain.sh" + fi + + local domain_home_dir="$DOMAIN_HOME_DIR" + if [ -z "${domain_home_dir}" ]; then + local domain_dir="/shared/domains" + local domain_uid=`egrep 'domainUID' $inputs_orig | awk '{print $2}'` + local domain_home_dir=$domain_dir/$domain_uid + fi + + mkdir -p $domain_home_dir + + # Output files and directories. + + local inputs_final=$WDT_DIR/$(basename "$inputs_orig") + local model_final=$WDT_DIR/$(basename "$model_orig") + if [ ${action} = "create" ]; then + local out_file=$WDT_DIR/createDomain.sh.out + else + local out_file=$WDT_DIR/updateDomain.sh.out + fi + local wdt_log_dir="$WDT_DIR/weblogic-deploy/logs" + + echo @@ "Info: About to run WDT ${wdt_domain_script}" + + for directory in wdt_bin_dir SCRIPTPATH WDT_DIR oracle_home; do + if [ ! -d "${!directory}" ]; then + echo @@ "Error: Could not find ${directory} directory ${!directory}." + return 1 + fi + done + + for fil in inputs_orig model_orig wdt_createDomain_script; do + if [ ! -f "${!fil}" ]; then + echo @@ "Error: Could not find ${fil} file ${!fil}." + return 1 + fi + done + + cp $model_orig $model_final || return 1 + cp $inputs_orig $inputs_final || return 1 + + local save_dir=`pwd` + cd $WDT_DIR || return 1 + + cmd=" + $wdt_domain_script + -oracle_home $oracle_home + -domain_type $domain_type + -domain_home $domain_home_dir + -model_file $model_final + -variable_file $inputs_final + " + + echo @@ "Info: About to run the following WDT command:" + echo "${cmd}" + echo @@ "Info: WDT output will be in $out_file and $wdt_log_dir" + eval $cmd > $out_file 2>&1 + local wdt_res=$? + + cd $save_dir + + if [ $wdt_res -ne 0 ]; then + if [ ${action} = "create" ]; then + cat $WDT_DIR/createDomain.sh.out + echo @@ "Info: WDT createDomain.sh output is in $out_file and $wdt_log_dir" + echo @@ "Error: WDT createDomain.sh failed." + return 1 + else + cat $WDT_DIR/updateDomain.sh.out + echo @@ "Info: WDT updateDomain.sh output is in $out_file and $wdt_log_dir" + echo @@ "Error: WDT updateDomain.sh failed." + return 1 + fi + fi + + cd $WDT_DIR || return 1 + + cmd=" + $wdt_bin_dir/extractDomainResource.sh + -oracle_home $oracle_home + -domain_resource_file domain${action}.yaml + -domain_home $domain_home_dir + -model_file $model_final + -variable_file $inputs_final + " + echo @@ "Info: About to run the following WDT command:" + echo "${cmd}" + echo @@ "Info: WDT output will be in extract${action}.out and $wdt_log_dir" + eval $cmd > extract${action}.out 2>&1 + local wdt_res=$? + + cd $save_dir + + if [ $wdt_res -ne 0 ]; then + cat $WDT_DIR/extract${action}.out + echo @@ "Info: WDT extractDomainResource output is in extract${action}.out and $wdt_log_dir" + echo @@ "Error: WDT createDomain.sh failed." + return 1 + fi + + if [ ${action} = "create" ]; then + # chmod -R g+w $domain_home_dir || return 1 + echo @@ "Info: WDT createDomain.sh succeeded." + else + echo @@ "Info: WDT updateDomain.sh succeeded." + fi + + return 0 +} + +function setup_wdt_shared_dir { + mkdir -p $WDT_DIR || return 1 +} + +# +# Install Weblogic Server Deploy Tooling to ${WDT_DIR} +# +function install_wdt { + + WDT_INSTALL_ZIP_FILE="${WDT_INSTALL_ZIP_FILE:-weblogic-deploy.zip}" + + if [ "$WDT_VERSION" == "LATEST" ]; then + WDT_INSTALL_ZIP_URL=${WDT_INSTALL_ZIP_URL:-"https://github.com/oracle/weblogic-deploy-tooling/releases/latest/download/$WDT_INSTALL_ZIP_FILE"} + else + WDT_INSTALL_ZIP_URL=${WDT_INSTALL_ZIP_URL:-"https://github.com/oracle/weblogic-deploy-tooling/releases/download/release-$WDT_VERSION/$WDT_INSTALL_ZIP_FILE"} + fi + + local save_dir=`pwd` + cd $WDT_DIR || return 1 + + echo @@ "Info: Downloading $WDT_INSTALL_ZIP_URL " + download $WDT_INSTALL_ZIP_URL || return 1 + + if [ ! -f $WDT_INSTALL_ZIP_FILE ]; then + cd $save_dir + echo @@ "Error: Download failed or $WDT_INSTALL_ZIP_FILE not found." + return 1 + fi + + echo @@ "Info: Archive downloaded to $WDT_DIR/$WDT_INSTALL_ZIP_FILE, about to unzip via 'jar xf'." + + jar xf $WDT_INSTALL_ZIP_FILE + local jar_res=$? + + cd $save_dir + + if [ $jar_res -ne 0 ]; then + echo @@ "Error: Install failed while unzipping $WDT_DIR/$WDT_INSTALL_ZIP_FILE" + return $jar_res + fi + + if [ ! -d "$WDT_DIR/weblogic-deploy/bin" ]; then + echo @@ "Error: Install failed: directory '$WDT_DIR/weblogic-deploy/bin' not found." + return 1 + fi + + chmod 775 $WDT_DIR/weblogic-deploy/bin/* || return 1 + + echo @@ "Info: Install succeeded, wdt install is in the $WDT_DIR/weblogic-deploy directory." + return 0 +} + +# +# Install WebLogic Image Tool to ${WIT_DIR}. Used by install_wit_if_needed. +# Do not call this function directory. +# +function install_wit { + + WIT_INSTALL_ZIP_FILE="${WIT_INSTALL_ZIP_FILE:-imagetool.zip}" + + if [ "$WIT_VERSION" == "LATEST" ]; then + WIT_INSTALL_ZIP_URL=${WDT_INSTALL_ZIP_URL:-"https://github.com/oracle/weblogic-image-tool/releases/latest/download/$WIT_INSTALL_ZIP_FILE"} + else + WIT_INSTALL_ZIP_URL=${WIT_INSTALL_ZIP_URL:-"https://github.com/oracle/weblogic-image-tool/releases/download/release-$WIT_VERSION/$WIT_INSTALL_ZIP_FILE"} + fi + + + + local save_dir=`pwd` + + echo @@ "imagetool.sh not found in ${imagetoolBinDir}. Installing imagetool..." + + echo @@ "Info: Downloading $WIT_INSTALL_ZIP_URL " + download $WIT_INSTALL_ZIP_URL || return 1 + + if [ ! -f $WIT_INSTALL_ZIP_FILE ]; then + cd $save_dir + echo @@ "Error: Download failed or $WIT_INSTALL_ZIP_FILE not found." + return 1 + fi + echo @@ "Info: Archive downloaded to $WIT_DIR/$WIT_INSTALL_ZIP_FILE, about to unzip via 'jar xf'." + + jar xf $WIT_INSTALL_ZIP_FILE + local jar_res=$? + + cd $save_dir + + if [ $jar_res -ne 0 ]; then + echo @@ "Error: Install failed while unzipping $WIT_DIR/$WIT_INSTALL_ZIP_FILE" + return $jar_res + fi + + if [ ! -d "$WIT_DIR/imagetool/bin" ]; then + echo @@ "Error: Install failed: directory '$WIT_DIR/imagetool/bin' not found." + return 1 + fi + + chmod 775 $WIT_DIR/imagetool/bin/* || return 1 +} + +# +# Checks whether WebLogic Image Tool is already installed under ${WIT_DIR}, and install +# it if not. +# +function install_wit_if_needed { + + local save_dir=`pwd` + + mkdir -p $WIT_DIR || return 1 + cd $WIT_DIR || return 1 + + imagetoolBinDir=$WIT_DIR/imagetool/bin + if [ -f $imagetoolBinDir/imagetool.sh ]; then + echo @@ "Info: imagetool.sh already exist in ${imagetoolBinDir}. Skipping WIT installation." + else + install_wit + fi + + export WLSIMG_CACHEDIR="$WIT_DIR/imagetool-cache" + + # Check existing imageTool cache entry for WDT: + # - if there is already an entry, and the WDT installer file specified in the cache entry exists, skip WDT installation + # - if file in cache entry doesn't exist, delete cache entry, install WDT, and add WDT installer to cache + # - if entry does not exist, install WDT, and add WDT installer to cache + if [ "$WDT_VERSION" == "LATEST" ]; then + wdtCacheVersion="latest" + else + wdtCacheVersion=$WDT_VERSION + fi + + local listItems=$( ${imagetoolBinDir}/imagetool.sh cache listItems | grep "wdt_${wdtCacheVersion}" ) + + if [ ! -z "$listItems" ]; then + local wdt_file_path_in_cache=$(echo $listItems | sed 's/.*=\(.*\)/\1/') + if [ -f "$wdt_file_path_in_cache" ]; then + skip_wdt_install=true + else + echo @@ "Info: imageTool cache contains an entry for WDT zip at $wdt_file_path_in_cache which does not exist. Removing from cache entry." + ${imagetoolBinDir}/imagetool.sh cache deleteEntry \ + --key wdt_${wdtCacheVersion} + fi + fi + + if [ -z "$skip_wdt_install" ]; then + echo @@ "Info: imageTool cache does not contain a valid entry for wdt_${wdtCacheVersion}. Installing WDT" + setup_wdt_shared_dir || return 1 + install_wdt || return 1 + ${imagetoolBinDir}/imagetool.sh cache addInstaller \ + --type wdt \ + --version $WDT_VERSION \ + --path $WDT_DIR/$WDT_INSTALL_ZIP_FILE || return 1 + else + echo @@ "Info: imageTool cache already contains entry ${listItems}. Skipping WDT installation." + fi + + cd $save_dir + + echo @@ "Info: Install succeeded, imagetool install is in the $WIT_DIR/imagetool directory." + return 0 +} + +function encrypt_model { + # + # run encryptModel.sh from WDT to encrypt model and properties files + # + local domainOutputDirFullPath=${1} # full path to directory where the model, encrypt file, and domain properties files are + local model_file=${2} # path to file containing encryption key relative to ${domainOutputDirFullPath} + local encrypt_key_file=${3} # path to file containing encryption key relative to ${domainOutputDirFullPath} + local domain_properties_file=${4} # path to domain properties file relative to ${domainOutputDirFullPath} + local oracle_home="$ORACLE_HOME" + + echo @@ "Info: encrypt passwords in the variables file at ${domainOutputDirFullPath}/${domain_properties_file} using encryption key from create-domain.sh argument written to file: ${encrypt_key_file}" + + cmd=" + cat /shared/${encrypt_key_file} /shared/${encrypt_key_file} | + /wdt/bin/encryptModel.sh \ + -oracle_home ${oracle_home} \ + -model_file /shared/${model_file} \ + -variable_file /shared/${domain_properties_file} + " + echo $cmd > ${domainOutputDirFullPath}/cmd.sh + chmod 755 ${domainOutputDirFullPath}/cmd.sh + echo @@ "Info: Encrypt Model: About to run the following command in container with image ${domainHomeImageBase}:" + cat ${domainOutputDirFullPath}/cmd.sh + + chmod 766 ${domainOutputDirFullPath}/${domain_properties_file} + docker run -it --rm -v ${domainOutputDirFullPath}:/shared -v ${WDT_DIR}/weblogic-deploy:/wdt ${domainHomeImageBase} /bin/bash -c /shared/cmd.sh || return 1 + + # clean up the generated files + rm ${domainOutputDirFullPath}/cmd.sh + + echo @@ "Info: encrypt_model Completed" +} + + diff --git a/OracleAccessManagement/kubernetes/3.0.1/create-access-domain/domain-home-on-pv/common/clusterCreate_template.py b/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/common/clusterCreate_template.py similarity index 100% rename from OracleAccessManagement/kubernetes/3.0.1/create-access-domain/domain-home-on-pv/common/clusterCreate_template.py rename to OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/common/clusterCreate_template.py diff --git a/OracleAccessManagement/kubernetes/3.0.1/create-access-domain/domain-home-on-pv/common/create-domain-job.sh b/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/common/create-domain-job.sh similarity index 100% rename from OracleAccessManagement/kubernetes/3.0.1/create-access-domain/domain-home-on-pv/common/create-domain-job.sh rename to OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/common/create-domain-job.sh diff --git a/OracleAccessManagement/kubernetes/3.0.1/create-access-domain/domain-home-on-pv/common/createOAMDomain.py b/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/common/createOAMDomain.py similarity index 100% rename from OracleAccessManagement/kubernetes/3.0.1/create-access-domain/domain-home-on-pv/common/createOAMDomain.py rename to OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/common/createOAMDomain.py diff --git a/OracleAccessManagement/kubernetes/3.0.1/create-access-domain/domain-home-on-pv/common/oamconfig.properties b/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/common/oamconfig.properties old mode 100644 new mode 100755 similarity index 99% rename from OracleAccessManagement/kubernetes/3.0.1/create-access-domain/domain-home-on-pv/common/oamconfig.properties rename to OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/common/oamconfig.properties index 76ed03a3a..d002e86d5 --- a/OracleAccessManagement/kubernetes/3.0.1/create-access-domain/domain-home-on-pv/common/oamconfig.properties +++ b/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/common/oamconfig.properties @@ -3,7 +3,6 @@ #Below are only the sample values, please modify them as per your setup - # The name space where OAM servers are created OAM_NAMESPACE='accessns' diff --git a/OracleAccessManagement/kubernetes/3.0.1/create-access-domain/domain-home-on-pv/common/oamconfig_modify.sh b/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/common/oamconfig_modify.sh similarity index 98% rename from OracleAccessManagement/kubernetes/3.0.1/create-access-domain/domain-home-on-pv/common/oamconfig_modify.sh rename to OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/common/oamconfig_modify.sh index b10309eea..f31ebf61f 100755 --- a/OracleAccessManagement/kubernetes/3.0.1/create-access-domain/domain-home-on-pv/common/oamconfig_modify.sh +++ b/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/common/oamconfig_modify.sh @@ -102,6 +102,7 @@ sed -i -e "s:@OAP_SERVICEPORT@:$OAP_SERVICEPORT:g" $cur_dir/output/oamconfig_mod cp $cur_dir/oamoap-service-template.yaml $cur_dir/output/oamoap-service.yaml sed -i -e "s:@OAM_NAMESPACE@:$OAM_NAMESPACE:g" $cur_dir/output/oamoap-service.yaml +sed -i -e "s:@DOMAINID@:$domainUID:g" $cur_dir/output/oamoap-service.yaml kubectl create -f $cur_dir/output/oamoap-service.yaml kubectl get services -n $OAM_NAMESPACE | grep NodePort diff --git a/OracleAccessManagement/kubernetes/3.0.1/create-access-domain/domain-home-on-pv/common/oamconfig_modify_template.xml b/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/common/oamconfig_modify_template.xml old mode 100644 new mode 100755 similarity index 100% rename from OracleAccessManagement/kubernetes/3.0.1/create-access-domain/domain-home-on-pv/common/oamconfig_modify_template.xml rename to OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/common/oamconfig_modify_template.xml diff --git a/OracleAccessManagement/kubernetes/3.0.1/create-access-domain/domain-home-on-pv/common/oamoap-service-template.yaml b/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/common/oamoap-service-template.yaml old mode 100644 new mode 100755 similarity index 92% rename from OracleAccessManagement/kubernetes/3.0.1/create-access-domain/domain-home-on-pv/common/oamoap-service-template.yaml rename to OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/common/oamoap-service-template.yaml index abd01f888..20624427c --- a/OracleAccessManagement/kubernetes/3.0.1/create-access-domain/domain-home-on-pv/common/oamoap-service-template.yaml +++ b/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/common/oamoap-service-template.yaml @@ -4,7 +4,7 @@ kind: Service apiVersion: v1 metadata: - name: oamoap-service + name: @DOMAINID@-oamoap-service namespace: @OAM_NAMESPACE@ spec: type: NodePort diff --git a/OracleAccessManagement/kubernetes/3.0.1/create-access-domain/domain-home-on-pv/common/readme.txt b/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/common/readme.txt old mode 100644 new mode 100755 similarity index 100% rename from OracleAccessManagement/kubernetes/3.0.1/create-access-domain/domain-home-on-pv/common/readme.txt rename to OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/common/readme.txt diff --git a/OracleAccessManagement/kubernetes/3.0.1/create-access-domain/domain-home-on-pv/common/utility.sh b/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/common/utility.sh similarity index 100% rename from OracleAccessManagement/kubernetes/3.0.1/create-access-domain/domain-home-on-pv/common/utility.sh rename to OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/common/utility.sh diff --git a/OracleAccessManagement/kubernetes/3.0.1/create-access-domain/domain-home-on-pv/create-database/db-with-pv.yaml b/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/create-database/db-with-pv.yaml old mode 100644 new mode 100755 similarity index 100% rename from OracleAccessManagement/kubernetes/3.0.1/create-access-domain/domain-home-on-pv/create-database/db-with-pv.yaml rename to OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/create-database/db-with-pv.yaml diff --git a/OracleAccessManagement/kubernetes/3.0.1/create-access-domain/domain-home-on-pv/create-database/db-without-pv.yaml b/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/create-database/db-without-pv.yaml old mode 100644 new mode 100755 similarity index 100% rename from OracleAccessManagement/kubernetes/3.0.1/create-access-domain/domain-home-on-pv/create-database/db-without-pv.yaml rename to OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/create-database/db-without-pv.yaml diff --git a/OracleAccessManagement/kubernetes/3.0.1/create-access-domain/domain-home-on-pv/create-domain-inputs.yaml b/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/create-domain-inputs.yaml old mode 100644 new mode 100755 similarity index 100% rename from OracleAccessManagement/kubernetes/3.0.1/create-access-domain/domain-home-on-pv/create-domain-inputs.yaml rename to OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/create-domain-inputs.yaml diff --git a/OracleAccessManagement/kubernetes/3.0.1/create-access-domain/domain-home-on-pv/create-domain-job-template.yaml b/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/create-domain-job-template.yaml old mode 100644 new mode 100755 similarity index 100% rename from OracleAccessManagement/kubernetes/3.0.1/create-access-domain/domain-home-on-pv/create-domain-job-template.yaml rename to OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/create-domain-job-template.yaml diff --git a/OracleAccessManagement/kubernetes/3.0.1/create-access-domain/domain-home-on-pv/create-domain.sh b/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/create-domain.sh similarity index 96% rename from OracleAccessManagement/kubernetes/3.0.1/create-access-domain/domain-home-on-pv/create-domain.sh rename to OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/create-domain.sh index 3dec1fa51..578f9b211 100755 --- a/OracleAccessManagement/kubernetes/3.0.1/create-access-domain/domain-home-on-pv/create-domain.sh +++ b/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/create-domain.sh @@ -25,11 +25,12 @@ source ${scriptDir}/../../common/utility.sh source ${scriptDir}/../../common/validate.sh function usage { - echo usage: ${script} -o dir -i file [-e] [-v] [-h] + echo usage: ${script} -o dir -i file [-e] [-v] [-t] [-h] echo " -i Parameter inputs file, must be specified." echo " -o Output directory for the generated yaml files, must be specified." echo " -e Also create the resources in the generated yaml files, optional." echo " -v Validate the existence of persistentVolumeClaim, optional." + echo " -t Timeout (in seconds) for create domain job execution, optional." echo " -h Help" exit $1 } @@ -39,7 +40,8 @@ function usage { # doValidation=false executeIt=false -while getopts "evhi:o:" opt; do +timeout=600 +while getopts "evhi:o:t:" opt; do case $opt in i) valuesInputFile="${OPTARG}" ;; @@ -49,6 +51,8 @@ while getopts "evhi:o:" opt; do ;; e) executeIt=true ;; + t) timeout="${OPTARG}" + ;; h) usage 0 ;; *) usage 1 @@ -70,6 +74,11 @@ if [ "${missingRequiredOption}" == "true" ]; then usage 1 fi +if [ -z ${timeout} ]; then + timeout=600 +fi + + # # Function to initialize and validate the output directory # for the generated yaml files for this domain. @@ -211,7 +220,7 @@ function createDomainHome { # Update the "- $(CLUSTER_NAME)" in the affinity section to policy_cluster sed -i "0,/- ${clusterName}/s//- policy_cluster/" ${dcrOutput} sed -i -e "/- clusterName:/a ${PRECREATE_SERVICE}" ${dcrOutput} - sed -i "0,/replicas: 2/ {0,/replicas: 2/ s/replicas: 2/replicas: 1/}" ${dcrOutput} + #sed -i "0,/replicas: 2/ {0,/replicas: 2/ s/replicas: 2/replicas: 1/}" ${dcrOutput} fi else echo "domainType not defined. Setting it to oam by default" @@ -224,7 +233,7 @@ function createDomainHome { echo "Waiting for the job to complete..." JOB_STATUS="0" - max=20 + max=`expr ${timeout} / 30` count=0 while [ "$JOB_STATUS" != "Completed" -a $count -lt $max ] ; do sleep 30 @@ -294,5 +303,3 @@ function printSummary { # Perform the sequence of steps to create a domain createDomain false - - diff --git a/OracleAccessManagement/kubernetes/3.0.1/create-access-domain/domain-home-on-pv/delete-domain-job-template.yaml b/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/delete-domain-job-template.yaml old mode 100644 new mode 100755 similarity index 100% rename from OracleAccessManagement/kubernetes/3.0.1/create-access-domain/domain-home-on-pv/delete-domain-job-template.yaml rename to OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/delete-domain-job-template.yaml diff --git a/OracleAccessManagement/kubernetes/3.0.1/create-access-domain/domain-home-on-pv/wlst/create-domain-script.sh b/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/wlst/create-domain-script.sh similarity index 100% rename from OracleAccessManagement/kubernetes/3.0.1/create-access-domain/domain-home-on-pv/wlst/create-domain-script.sh rename to OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/wlst/create-domain-script.sh diff --git a/OracleAccessManagement/kubernetes/create-kubernetes-secrets/create-azure-storage-credentials-secret.sh b/OracleAccessManagement/kubernetes/create-kubernetes-secrets/create-azure-storage-credentials-secret.sh new file mode 100755 index 000000000..8e6d3d947 --- /dev/null +++ b/OracleAccessManagement/kubernetes/create-kubernetes-secrets/create-azure-storage-credentials-secret.sh @@ -0,0 +1,92 @@ +#!/usr/bin/env bash +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# Description +# This sample script creates a Kubernetes secret for Azure Storage to use Azure file share on AKS. +# +# The following pre-requisites must be handled prior to running this script: +# * The kubernetes namespace must already be created +# + +script="${BASH_SOURCE[0]}" + +# +# Function to exit and print an error message +# $1 - text of message +function fail { + echo [ERROR] $* + exit 1 +} + +# Try to execute kubectl to see whether kubectl is available +function validateKubectlAvailable { + if ! [ -x "$(command -v kubectl)" ]; then + fail "kubectl is not installed" + fi +} + +function usage { + echo usage: ${script} -c storageAccountName -k storageAccountKey [-s secretName] [-n namespace] [-h] + echo " -a storage account name, must be specified." + echo " -k storage account key, must be specified." + echo " -s secret name, optional. Use azure-secret if not specified." + echo " -n namespace, optional. Use the default namespace if not specified." + echo " -h Help" + exit $1 +} + +# +# Parse the command line options +# +secretName=azure-secret +namespace=default +while getopts "ha:k:s:n:" opt; do + case $opt in + a) storageAccountName="${OPTARG}" + ;; + k) storageAccountKey="${OPTARG}" + ;; + s) secretName="${OPTARG}" + ;; + n) namespace="${OPTARG}" + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +if [ -z ${storageAccountName} ]; then + echo "${script}: -e must be specified." + missingRequiredOption="true" +fi + +if [ -z ${storageAccountKey} ]; then + echo "${script}: -p must be specified." + missingRequiredOption="true" +fi + +if [ "${missingRequiredOption}" == "true" ]; then + usage 1 +fi + +# check and see if the secret already exists +result=`kubectl get secret ${secretName} -n ${namespace} --ignore-not-found=true | grep ${secretName} | wc | awk ' { print $1; }'` +if [ "${result:=Error}" != "0" ]; then + fail "The secret ${secretName} already exists in namespace ${namespace}." +fi + +# create the secret +kubectl -n $namespace create secret generic $secretName \ + --from-literal=azurestorageaccountname=$storageAccountName \ + --from-literal=azurestorageaccountkey=$storageAccountKey + +# Verify the secret exists +SECRET=`kubectl get secret ${secretName} -n ${namespace} | grep ${secretName} | wc | awk ' { print $1; }'` +if [ "${SECRET}" != "1" ]; then + fail "The secret ${secretName} was not found in namespace ${namespace}" +fi + +echo "The secret ${secretName} has been successfully created in the ${namespace} namespace." diff --git a/OracleAccessManagement/kubernetes/create-kubernetes-secrets/create-docker-credentials-secret.sh b/OracleAccessManagement/kubernetes/create-kubernetes-secrets/create-docker-credentials-secret.sh new file mode 100755 index 000000000..48f113b93 --- /dev/null +++ b/OracleAccessManagement/kubernetes/create-kubernetes-secrets/create-docker-credentials-secret.sh @@ -0,0 +1,106 @@ +#!/usr/bin/env bash +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# Description +# This sample script creates a Kubernetes secret for container registry credentials for use with the WLS Operator on AKS. +# +# The following pre-requisites must be handled prior to running this script: +# * The kubernetes namespace must already be created +# + +script="${BASH_SOURCE[0]}" + +# +# Function to exit and print an error message +# $1 - text of message +function fail { + echo [ERROR] $* + exit 1 +} + +# Try to execute kubectl to see whether kubectl is available +function validateKubectlAvailable { + if ! [ -x "$(command -v kubectl)" ]; then + fail "kubectl is not installed" + fi +} + +function usage { + echo usage: ${script} -e email -p password -u username [-s secretName] [-d dockerServer] [-n namespace] [-h] + echo " -e email, must be specified." + echo " -p password, must be specified." + echo " -u username, must be specified." + echo " -s secret name, optional, Use regcred if not specified." + echo " -d docker server, optional, Use docker.io if not specified." + echo " -n namespace, optional. Use the default namespace if not specified" + echo " -h Help" + exit $1 +} + +# +# Parse the command line options +# +secretName=regcred +namespace=default +dockerServer=container-registry.oracle.com +while getopts "he:p:u:n:d:s:d:" opt; do + case $opt in + e) email="${OPTARG}" + ;; + p) password="${OPTARG}" + ;; + u) username="${OPTARG}" + ;; + s) secretName="${OPTARG}" + ;; + d) dockerServer="${OPTARG}" + ;; + n) namespace="${OPTARG}" + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +if [ -z ${email} ]; then + echo "${script}: -e must be specified." + missingRequiredOption="true" +fi + +if [ -z ${password} ]; then + echo "${script}: -p must be specified." + missingRequiredOption="true" +fi + +if [ -z ${username} ]; then + echo "${script}: -u must be specified." + missingRequiredOption="true" +fi + +if [ "${missingRequiredOption}" == "true" ]; then + usage 1 +fi + +# check and see if the secret already exists +result=`kubectl get secret ${secretName} -n ${namespace} --ignore-not-found=true | grep ${secretName} | wc | awk ' { print $1; }'` +if [ "${result:=Error}" != "0" ]; then + fail "The secret ${secretName} already exists in namespace ${namespace}." +fi + +# create the secret +kubectl -n $namespace create secret docker-registry $secretName \ + --docker-email=$email \ + --docker-password=$password \ + --docker-server=$dockerServer \ + --docker-username=$username + +# Verify the secret exists +SECRET=`kubectl get secret ${secretName} -n ${namespace} | grep ${secretName} | wc | awk ' { print $1; }'` +if [ "${SECRET}" != "1" ]; then + fail "The secret ${secretName} was not found in namespace ${namespace}" +fi + +echo "The secret ${secretName} has been successfully created in the ${namespace} namespace." diff --git a/OracleAccessManagement/kubernetes/create-oracle-db-service/README.md b/OracleAccessManagement/kubernetes/create-oracle-db-service/README.md new file mode 100755 index 000000000..7cb982e97 --- /dev/null +++ b/OracleAccessManagement/kubernetes/create-oracle-db-service/README.md @@ -0,0 +1,76 @@ +# Managing Oracle Database Service for OracleAccessManagement + +The sample scripts in this directory demonstrate how to: +* Start an Oracle Database (DB) service in a Kubernetes cluster. +* Stop an Oracle DB service in a Kubernetes cluster. + +## Start an Oracle Database service in a Kubernetes cluster + +Use this script to create an Oracle Database service in a Kubernetes Namespace with the default credentials, in the Oracle Database Slim image. + +The script assumes that either the image, `container-registry.oracle.com/database/enterprise:12.2.0.1-slim`, is available in the Docker repository, or an `ImagePullSecret` is created for `container-registry.oracle.com`. To create a secret for accessing `container-registry.oracle.com`, see the script `create-image-pull-secret.sh`. + +``` + +$ ./start-db-service.sh -h +usage: ./start-db-service.sh -p -i -s -n [-h] + -i Oracle DB Image (optional) + (default: container-registry.oracle.com/database/enterprise:12.2.0.1-slim) + -p DB Service NodePort (optional) + (default: 30011, set to 'none' to deploy service without a NodePort) + -s DB Image PullSecret (optional) + (default: docker-store) + -n Configurable Kubernetes NameSpace for Oracle DB Service (optional)" + (default: default) + -h Help + +$ ./start-db-service.sh +NodePort[30011] ImagePullSecret[docker-store] Image[container-registry.oracle.com/database/enterprise:12.2.0.1-slim] +deployment.extensions/oracle-db created +service/oracle-db created +[oracle-db-54667dfd5f-76sxf] already initialized .. +Checking Pod READY column for State [1/1] +Pod [oracle-db-54667dfd5f-76sxf] Status is Ready Iter [1/60] +NAME READY STATUS RESTARTS AGE +oracle-db-54667dfd5f-76sxf 1/1 Running 0 8s +NAME READY STATUS RESTARTS AGE +oracle-db-54667dfd5f-76sxf 1/1 Running 0 8s +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +kubernetes ClusterIP 10.96.0.1 443/TCP 27d +oracle-db NodePort 10.99.58.137 1521:30011/TCP 9s +Oracle DB service is RUNNING with NodePort [30011] + +``` + +For creating a OracleAccessManagement domain, you can use the database connection string, `oracle-db.default.svc.cluster.local:1521/devpdb.k8s`,as `rcuDatabaseURL` parameter in the `domain.input.yaml` file. + +Note: oracle-db.default.svc.cluster.local:1521/devpdb.k8s can be used as rcuDatabaseURL if the Oracle DB Service is started in `default` NameSpace. For custom NameSpace the URL need to be modified accrodingly e.g. oracle-db.[namespace].svc.cluster.local:1521/devpdb.k8s + +You can access the database through the NodePort outside of the Kubernetes cluster, using the URL `:30011/devpdb.k8s`. + +**Note**: To create a OracleAccessManagement domain image, the domain-in-image model needs a public database URL as an `rcuDatabaseURL` parameter. + +## Stop an Oracle Database service in a Kubernetes cluster + +Use this script to stop the Oracle Database service you created using the `start-db-service.sh` script. + +``` +$ ./stop-db-service.sh -h +usage: stop-db-service.sh -n namespace [-h] + -n Kubernetes NameSpace for Oracle DB Service to be Stopped (optional) + (default: default) + -h Help + +Note: Here the NameSpace refers to the NameSpace used in start-db-service.sh + +$ ./stop-db-service.sh +deployment.extensions "oracle-db" deleted +service "oracle-db" deleted +Checking Status for Pod [oracle-db-756f9b99fd-gvv46] in namesapce [default] +Pod [oracle-db-756f9b99fd-gvv46] Status [Terminating] +Pod [oracle-db-756f9b99fd-gvv46] Status [Terminating] +Pod [oracle-db-756f9b99fd-gvv46] Status [Terminating] +Error from server (NotFound): pods "oracle-db-756f9b99fd-gvv46" not found +Pod [oracle-db-756f9b99fd-gvv46] removed from nameSpace [default] +``` + diff --git a/OracleAccessManagement/kubernetes/create-oracle-db-service/common/checkDbState.sh b/OracleAccessManagement/kubernetes/create-oracle-db-service/common/checkDbState.sh new file mode 100755 index 000000000..9ce5aa3d3 --- /dev/null +++ b/OracleAccessManagement/kubernetes/create-oracle-db-service/common/checkDbState.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +logfile="/home/oracle/setup/log/setupDB.log" +max=30 +counter=0 +while [ $counter -le ${max} ] +do + grep "Done ! The database is ready for use ." $logfile + [[ $? == 0 ]] && break; + ((counter++)) + echo "[$counter/${max}] Retrying for Oracle Database Availability..." + sleep 10 +done + +if [ $counter -gt ${max} ]; then + echo "[ERRORR] Oracle DB Service is not ready after [${max}] iterations ..." + exit -1 +fi + diff --git a/OracleAccessManagement/kubernetes/create-oracle-db-service/common/oracle.db.yaml b/OracleAccessManagement/kubernetes/create-oracle-db-service/common/oracle.db.yaml new file mode 100755 index 000000000..4185471f3 --- /dev/null +++ b/OracleAccessManagement/kubernetes/create-oracle-db-service/common/oracle.db.yaml @@ -0,0 +1,78 @@ +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +apiVersion: v1 +kind: Service +metadata: + name: oracle-db + namespace: default +spec: + ports: + - name: tns + port: 1521 + protocol: TCP + targetPort: 1521 + nodePort: 30011 + selector: + app.kubernetes.io/instance: dev + app.kubernetes.io/name: oracle-db + sessionAffinity: None + type: NodePort +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: oracle-db + namespace: default +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/instance: dev + app.kubernetes.io/name: oracle-db + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/instance: dev + app.kubernetes.io/name: oracle-db + spec: + containers: + - env: + - name: DB_SID + value: devcdb + - name: DB_PDB + value: devpdb + - name: DB_DOMAIN + value: k8s + - name: DB_BUNDLE + value: basic + image: container-registry.oracle.com/database/enterprise:12.2.0.1-slim + imagePullPolicy: IfNotPresent + name: oracle-db + ports: + - containerPort: 1521 + name: tns + protocol: TCP + resources: + limits: + cpu: "2" + memory: "6Gi" + ephemeral-storage: "8Gi" + requests: + cpu: 500m + ephemeral-storage: "6Gi" + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: 30 + imagePullSecrets: + - name: docker-store + diff --git a/OracleAccessManagement/kubernetes/create-oracle-db-service/create-image-pull-secret.sh b/OracleAccessManagement/kubernetes/create-oracle-db-service/create-image-pull-secret.sh new file mode 100755 index 000000000..94a6e93bb --- /dev/null +++ b/OracleAccessManagement/kubernetes/create-oracle-db-service/create-image-pull-secret.sh @@ -0,0 +1,60 @@ +#!/bin/bash +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# Create ImagePullSecret to pull Oracle DB and OracleAccessManagement Image + +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" + +function usage { + echo "usage: ${script} -u -p -e -s [-h]" + echo " -u Oracle Container Registry User Name (needed)" + echo " -p Oracle Container Registry Password (needed)" + echo " -e email (needed)" + echo " -s Generated Secret (optional) " + echo " (default: docker-store) " + echo " -h Help" + exit $1 +} + +while getopts ":u:p:s:e:" opt; do + case $opt in + u) username="${OPTARG}" + ;; + p) password="${OPTARG}" + ;; + e) email="${OPTARG}" + ;; + s) secert="${OPTARG}" + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +if [ -z ${username} ]; then + echo "${script}: -u must be specified." + usage 1 +fi + +if [ -z ${password} ]; then + echo "${script}: -p must be specified." + usage 1 +fi + +if [ -e ${email} ]; then + echo "${script}: -p must be specified." + usage 1 +fi + +if [ -z ${secret} ]; then + secret="docker-store" +fi + +kubectl delete secret/${secret} --ignore-not-found +echo "Creating ImagePullSecret on container-registry.oracle.com" +kubectl create secret docker-registry ${secret} --docker-server=container-registry.oracle.com --docker-username=${username} --docker-password=${password} --docker-email=${email} + diff --git a/OracleAccessManagement/kubernetes/create-oracle-db-service/start-db-service.sh b/OracleAccessManagement/kubernetes/create-oracle-db-service/start-db-service.sh new file mode 100755 index 000000000..9a522d4eb --- /dev/null +++ b/OracleAccessManagement/kubernetes/create-oracle-db-service/start-db-service.sh @@ -0,0 +1,113 @@ +#!/bin/bash +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# Bring up Oracle DB Instance in [default] NameSpace with a NodePort Service + +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" +source ${scriptDir}/../common/utility.sh + +function usage { + echo "usage: ${script} -p -i -s -n [-h]" + echo " -i Oracle DB Image (optional)" + echo " (default: container-registry.oracle.com/database/enterprise:12.2.0.1-slim)" + echo " -p DB Service NodePort (optional)" + echo " (default: 30011, set to 'none' to deploy service without a NodePort)" + echo " -s DB Image PullSecret (optional)" + echo " (default: docker-store) " + echo " -n Configurable Kubernetes NameSpace for Oracle DB Service (optional)" + echo " (default: default) " + echo " -h Help" + exit $1 +} + +while getopts ":h:p:s:i:n:" opt; do + case $opt in + p) nodeport="${OPTARG}" + ;; + s) pullsecret="${OPTARG}" + ;; + i) dbimage="${OPTARG}" + ;; + n) namespace="${OPTARG}" + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +if [ -z ${nodeport} ]; then + nodeport=30011 +fi + +if [ -z ${pullsecret} ]; then + pullsecret="docker-store" +fi + +if [ -z ${namespace} ]; then + namespace="default" +fi + +echo "Checking Status for NameSpace [$namespace]" +domns=`kubectl get ns ${namespace} | grep ${namespace} | awk '{print $1}'` +if [ -z ${domns} ]; then + echo "Adding NameSpace[$namespace] to Kubernetes Cluster" + kubectl create namespace ${namespace} + sleep 5 +else + echo "Skipping the NameSpace[$namespace] Creation ..." +fi + +if [ -z ${dbimage} ]; then + dbimage="container-registry.oracle.com/database/enterprise:12.2.0.1-slim" +fi + +echo "NodePort[$nodeport] ImagePullSecret[$pullsecret] Image[${dbimage}] NameSpace[${namespace}]" + +# Modify ImagePullSecret and DatabaseImage based on input +sed -i -e '$d' ${scriptDir}/common/oracle.db.yaml +echo ' - name: docker-store' >> ${scriptDir}/common/oracle.db.yaml +sed -i -e "s?name: docker-store?name: ${pullsecret}?g" ${scriptDir}/common/oracle.db.yaml +sed -i -e "s?image:.*?image: ${dbimage}?g" ${scriptDir}/common/oracle.db.yaml +sed -i -e "s?namespace:.*?namespace: ${namespace}?g" ${scriptDir}/common/oracle.db.yaml + +# Modify the NodePort based on input +if [ "${nodeport}" = "none" ]; then + sed -i -e "s? nodePort:? #nodePort:?g" ${scriptDir}/common/oracle.db.yaml + sed -i -e "s? type:.*NodePort? #type: NodePort?g" ${scriptDir}/common/oracle.db.yaml +else + sed -i -e "s?[#]*nodePort:.*?nodePort: ${nodeport}?g" ${scriptDir}/common/oracle.db.yaml + sed -i -e "s?[#]*type:.*NodePort?type: NodePort?g" ${scriptDir}/common/oracle.db.yaml # default type is ClusterIP +fi + +kubectl delete service oracle-db -n ${namespace} --ignore-not-found +kubectl apply -f ${scriptDir}/common/oracle.db.yaml + +dbpod=`kubectl get po -n ${namespace} | grep oracle-db | cut -f1 -d " " ` + +checkPod ${dbpod} ${namespace} +checkPodState ${dbpod} ${namespace} "1/1" +checkService oracle-db ${namespace} + +kubectl get po -n ${namespace} +kubectl get service -n ${namespace} + +kubectl cp ${scriptDir}/common/checkDbState.sh -n ${namespace} ${dbpod}:/home/oracle/ +kubectl exec -it ${dbpod} -n ${namespace} /bin/bash /home/oracle/checkDbState.sh +if [ $? != 0 ]; then + echo "######################"; + echo "[ERROR] Could not create Oracle DB Service, check the pod log for pod ${dbpod} in namespace ${namespace}"; + echo "######################"; + exit -3; +fi + +if [ ! "${nodeport}" = "none" ]; then + echo "Oracle DB Service is RUNNING with NodePort [${nodeport}]" +else + echo "Oracle DB Service is RUNNING and does not specify a public NodePort" +fi +echo "Oracle DB Service URL [oracle-db.${namespace}.svc.cluster.local:1521/devpdb.k8s]" + diff --git a/OracleAccessManagement/kubernetes/create-oracle-db-service/stop-db-service.sh b/OracleAccessManagement/kubernetes/create-oracle-db-service/stop-db-service.sh new file mode 100755 index 000000000..7ab14928c --- /dev/null +++ b/OracleAccessManagement/kubernetes/create-oracle-db-service/stop-db-service.sh @@ -0,0 +1,45 @@ +#!/bin/bash +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# Drop the DB Service created by start-db-service.sh + +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" +source ${scriptDir}/../common/utility.sh + +function usage { + echo "usage: ${script} -n namespace [-h]" + echo " -n Kubernetes NameSpace for Oracle DB Service to be Stopped (optional)" + echo " (default: default) " + echo " -h Help" + exit $1 +} + +while getopts ":h:n:" opt; do + case $opt in + n) namespace="${OPTARG}" + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + + +if [ -z ${namespace} ]; then + namespace=default +fi + + +dbpod=`kubectl get po -n ${namespace} | grep oracle-db | cut -f1 -d " " ` +kubectl delete -f ${scriptDir}/common/oracle.db.yaml --ignore-not-found + +if [ -z ${dbpod} ]; then + echo "Couldn't find oracle-db pod in [${namespace}] namesapce" +else + checkPodDelete ${dbpod} ${namespace} + kubectl delete svc/oracle-db -n ${namespace} --ignore-not-found +fi + diff --git a/OracleAccessManagement/kubernetes/create-rcu-credentials/README.md b/OracleAccessManagement/kubernetes/create-rcu-credentials/README.md new file mode 100755 index 000000000..843f53f8f --- /dev/null +++ b/OracleAccessManagement/kubernetes/create-rcu-credentials/README.md @@ -0,0 +1,56 @@ +# Creating RCU credentials for a OracleAccessManagement domain + +This sample demonstrates how to create a Kubernetes secret containing the +RCU credentials for a OracleAccessManagement domain. The operator expects this secret to be +named following the pattern `domainUID-rcu-credentials`, where `domainUID` +is the unique identifier of the domain. It must be in the same namespace +that the domain will run in. + +To use the sample, run the command: + +``` +$ ./create-rcu-credentials.sh \ + -u username \ + -p password \ + -a sys_username \ + -q sys_password \ + -d domainUID \ + -n namespace \ + -s secretName +``` + +The parameters are as follows: + +``` + -u username for schema owner (regular user), must be specified. + -p password for schema owner (regular user), must be specified. + -a username for SYSDBA user, must be specified. + -q password for SYSDBA user, must be specified. + -d domainUID, optional. The default value is accessinfra. If specified, the secret will be labeled with the domainUID unless the given value is an empty string. + -n namespace, optional. Use the accessns namespace if not specified. + -s secretName, optional. If not specified, the secret name will be determined based on the domainUID value. +``` + +This creates a `generic` secret containing the user name and password as literal values. + +You can check the secret with the `kubectl describe secret` command. An example is shown below, +including the output: + +``` +$ kubectl -n accessns describe secret accessinfra-rcu-credentials -o yaml +Name: accessinfra-rcu-credentials +Namespace: accessns +Labels: weblogic.domainName=accessinfra + weblogic.domainUID=accessinfra +Annotations: + +Type: Opaque + +Data +==== +password: 12 bytes +sys_password: 12 bytes +sys_username: 3 bytes +username: 4 bytes +``` + diff --git a/OracleAccessManagement/kubernetes/create-rcu-credentials/create-rcu-credentials.sh b/OracleAccessManagement/kubernetes/create-rcu-credentials/create-rcu-credentials.sh new file mode 100755 index 000000000..053af099e --- /dev/null +++ b/OracleAccessManagement/kubernetes/create-rcu-credentials/create-rcu-credentials.sh @@ -0,0 +1,139 @@ +#!/usr/bin/env bash +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# Description +# This sample script creates a Kubernetes secret for RCU credentials. +# +# The following pre-requisites must be handled prior to running this script: +# * The kubernetes namespace must already be created +# +# Secret name determination +# 1) secretName - if specified +# 2) accessinfra-rcu-credentials - if secretName and domainUID are both not specified. This is the default out-of-the-box. +# 3) -rcu-credentials - if secretName is not specified, and domainUID is specified. +# 4) rcu-credentials - if secretName is not specified, and domainUID is specified as "". +# +# The generated secret will be labeled with +# weblogic.domainUID=$domainUID +# and +# weblogic.domainName=$domainUID +# Where the $domainUID is the value of the -d command line option, unless the value supplied is an empty String "" +# + +script="${BASH_SOURCE[0]}" + +# +# Function to exit and print an error message +# $1 - text of message +function fail { + echo [ERROR] $* + exit 1 +} + +# Try to execute kubectl to see whether kubectl is available +function validateKubectlAvailable { + if ! [ -x "$(command -v kubectl)" ]; then + fail "kubectl is not installed" + fi +} + +function usage { + echo usage: ${script} -u username -p password -a sysuser -q syspassword [-d domainUID] [-n namespace] [-s secretName] [-h] + echo " -u username for schema owner (regular user), must be specified." + echo " -p password for schema owner (regular user), must be specified." + echo " -a username for SYSDBA user, must be specified." + echo " -q password for SYSDBA user, must be specified." + echo " -d domainUID, optional. The default value is accessinfra. If specified, the secret will be labeled with the domainUID unless the given value is an empty string." + echo " -n namespace, optional. Use the accessns namespace if not specified" + echo " -s secretName, optional. If not specified, the secret name will be determined based on the domainUID value" + echo " -h Help" + exit $1 +} + +# +# Parse the command line options +# +domainUID=accessinfra +namespace=accessns +while getopts "hu:p:n:d:s:q:a:" opt; do + case $opt in + u) username="${OPTARG}" + ;; + p) password="${OPTARG}" + ;; + a) sys_username="${OPTARG}" + ;; + q) sys_password="${OPTARG}" + ;; + n) namespace="${OPTARG}" + ;; + d) domainUID="${OPTARG}" + ;; + s) secretName="${OPTARG}" + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +if [ -z $secretName ]; then + if [ -z $domainUID ]; then + secretName=rcu-credentials + else + secretName=$domainUID-rcu-credentials + fi +fi + +if [ -z ${username} ]; then + echo "${script}: -u must be specified." + missingRequiredOption="true" +fi + +if [ -z ${password} ]; then + echo "${script}: -p must be specified." + missingRequiredOption="true" +fi + +if [ -z ${sys_username} ]; then + echo "${script}: -s must be specified." + missingRequiredOption="true" +fi + +if [ -z ${sys_password} ]; then + echo "${script}: -q must be specified." + missingRequiredOption="true" +fi + +if [ "${missingRequiredOption}" == "true" ]; then + usage 1 +fi + +# check and see if the secret already exists +result=`kubectl get secret ${secretName} -n ${namespace} --ignore-not-found=true | grep ${secretName} | wc | awk ' { print $1; }'` +if [ "${result:=Error}" != "0" ]; then + fail "The secret ${secretName} already exists in namespace ${namespace}." +fi + +# create the secret +kubectl -n $namespace create secret generic $secretName \ + --from-literal=username=$username \ + --from-literal=password=$password \ + --from-literal=sys_username=$sys_username \ + --from-literal=sys_password=$sys_password + +# label the secret with domainUID if needed +if [ ! -z $domainUID ]; then + kubectl label secret ${secretName} -n $namespace weblogic.domainUID=$domainUID weblogic.domainName=$domainUID +fi + +# Verify the secret exists +SECRET=`kubectl get secret ${secretName} -n ${namespace} | grep ${secretName} | wc | awk ' { print $1; }'` +if [ "${SECRET}" != "1" ]; then + fail "The secret ${secretName} was not found in namespace ${namespace}" +fi + +echo "The secret ${secretName} has been successfully created in the ${namespace} namespace." + diff --git a/OracleAccessManagement/kubernetes/create-rcu-schema/README.md b/OracleAccessManagement/kubernetes/create-rcu-schema/README.md new file mode 100755 index 000000000..e17cd596e --- /dev/null +++ b/OracleAccessManagement/kubernetes/create-rcu-schema/README.md @@ -0,0 +1,219 @@ +# Managing RCU schema for a OracleAccessManagement domain + +The sample scripts in this directory demonstrate how to: +* Create an RCU schema in the Oracle DB that will be used by a OracleAccessManagement domain. +* Delete the RCU schema in the Oracle DB used by a OracleAccessManagement domain. + +## Start an Oracle Database service in a Kubernetes cluster + +Use the script ``samples/scripts/create-oracle-db-service/start-db-service.sh`` + +For creating a OracleAccessManagement domain, you can use the Database connection string, `oracle-db.default.svc.cluster.local:1521/devpdb.k8s`, as an `rcuDatabaseURL` parameter in the `domain.input.yaml` file. + +You can access the Database through the NodePort outside of the Kubernetes cluster, using the URL `:30011/devpdb.k8s`. + +**Note**: To create a OracleAccessManagement domain image, the domain-in-image model needs a public Database URL as an `rcuDatabaseURL` parameter. + + +## Create the RCU schema in the Oracle Database + +This script generates the RCU schema based `schemaPrefix` and `dburl`. + +The script assumes that either the image, `oracle/oam:12.2.1.4.0`, is available in the nodes or an `ImagePullSecret` is created to pull the image. To create a secret, see the script `create-image-pull-secret.sh`. + +``` +$ ./create-rcu-schema.sh -h +usage: ./create-rcu-schema.sh -s -t -d -i -u -p -n -q -r -o -c [-h] + -s RCU Schema Prefix (required) + -t RCU Schema Type (optional) + (supported values: oam) + -d RCU Oracle Database URL (optional) + (default: oracle-db.default.svc.cluster.local:1521/devpdb.k8s) + -p OracleAccessManagement ImagePullSecret (optional) + (default: none) + -i OracleAccessManagement Image (optional) + (default: oracle/oam:12.2.1.4.0) + -u OracleAccessManagement ImagePullPolicy (optional) + (default: IfNotPresent) + -n Namespace for RCU pod (optional) + (default: default) + -q password for database SYSDBA user. (optional) + (default: Oradoc_db1) + -r password for all schema owner (regular user). (optional) + (default: Oradoc_db1) + -o Output directory for the generated YAML file. (optional) + (default: rcuoutput) + -c Comma-separated variables in the format variablename=value. (optional). + (default: none) + -h Help + +$ ./create-rcu-schema.sh -s domain1 +ImagePullSecret[none] Image[oracle/oam:12.2.1.4.0] dburl[oracle-db.default.svc.cluster.local:1521/devpdb.k8s] rcuType[fmw] customVariables[none] +pod/rcu created +[rcu] already initialized .. +Checking Pod READY column for State [1/1] +Pod [rcu] Status is Ready Iter [1/60] +NAME READY STATUS RESTARTS AGE +rcu 1/1 Running 0 6s +NAME READY STATUS RESTARTS AGE +rcu 1/1 Running 0 11s +CLASSPATH=/u01/jdk/lib/tools.jar:/u01/oracle/wlserver/modules/features/wlst.wls.classpath.jar: + +PATH=/u01/oracle/wlserver/server/bin:/u01/oracle/wlserver/../oracle_common/modules/thirdparty/org.apache.ant/1.10.5.0.0/apache-ant-1.10.5/bin:/u01/jdk/jre/bin:/u01/jdk/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/u01/jdk/bin:/u01/oracle/oracle_common/common/bin:/u01/oracle/wlserver/common/bin:/u01/oracle:/u01/oracle/wlserver/../oracle_common/modules/org.apache.maven_3.2.5/bin + +Your environment has been set. +Check if the DB Service is ready to accept request +DB Connection String [oracle-db.default.svc.cluster.local:1521/devpdb.k8s], schemaPrefix [accessinfra] rcuType [fmw] + +**** Success!!! **** + +You can connect to the database in your app using: + + java.util.Properties props = new java.util.Properties(); + props.put("user", "sys as sysdba"); + props.put("password", "Oradoc_db1"); + java.sql.Driver d = + Class.forName("oracle.jdbc.OracleDriver").newInstance(); + java.sql.Connection conn = + Driver.connect("sys as sysdba", props); +Creating RCU Schema for OracleAccessManagement Domain ... +Extra RCU Schema Component Choosen[] + +Processing command line .... + +Repository Creation Utility - Checking Prerequisites +Checking Component Prerequisites +Repository Creation Utility - Creating Tablespaces +Validating and Creating Tablespaces +Create tablespaces in the repository database +Repository Creation Utility - Create +Repository Create in progress. +Executing pre create operations + Percent Complete: 20 + Percent Complete: 20 + ..... + Percent Complete: 96 + Percent Complete: 100 + ..... +Executing post create operations + +Repository Creation Utility: Create - Completion Summary + +Database details: +----------------------------- +Host Name : oracle-db.default.svc.cluster.local +Port : 1521 +Service Name : DEVPDB.K8S +Connected As : sys +Prefix for (prefixable) Schema Owners : DOMAIN1 +RCU Logfile : /tmp/RCU2020-05-01_14-35_1160633335/logs/rcu.log + +Component schemas created: +----------------------------- +Component Status Logfile + +Common Infrastructure Services Success /tmp/RCU2020-05-01_14-35_1160633335/logs/stb.log +Oracle Platform Security Services Success /tmp/RCU2020-05-01_14-35_1160633335/logs/opss.log +Audit Services Success /tmp/RCU2020-05-01_14-35_1160633335/logs/iau.log +Audit Services Append Success /tmp/RCU2020-05-01_14-35_1160633335/logs/iau_append.log +Audit Services Viewer Success /tmp/RCU2020-05-01_14-35_1160633335/logs/iau_viewer.log +Metadata Services Success /tmp/RCU2020-05-01_14-35_1160633335/logs/mds.log +WebLogic Services Success /tmp/RCU2020-05-01_14-35_1160633335/logs/wls.log + +Repository Creation Utility - Create : Operation Completed +[INFO] Modify the domain.input.yaml to use [oracle-db.default.svc.cluster.local:1521/devpdb.k8s] as rcuDatabaseURL and [domain1] as rcuSchemaPrefix +``` + +## Drop the RCU schema from the Oracle Database + +Use this script to drop the RCU schema based `schemaPrefix` and `dburl`. + +``` +$ ./drop-rcu-schema.sh -h +usage: ./drop-rcu-schema.sh -s -d -n -q -r [-h] + -s RCU Schema Prefix (required) + -t RCU Schema Type (optional) + (supported values: oam) + -d Oracle Database URL (optional) + (default: oracle-db.default.svc.cluster.local:1521/devpdb.k8s) + -n Namespace where RCU pod is deployed (optional) + (default: default) + -q password for database SYSDBA user. (optional) + (default: Oradoc_db1) + -r password for all schema owner (regular user). (optional) + (default: Oradoc_db1) + -c Comma-separated variables in the format variablename=value. (optional). + (default: none) + -h Help + +$ ./drop-rcu-schema.sh -s domain1 +CLASSPATH=/u01/jdk/lib/tools.jar:/u01/oracle/wlserver/modules/features/wlst.wls.classpath.jar: + +PATH=/u01/oracle/wlserver/server/bin:/u01/oracle/wlserver/../oracle_common/modules/thirdparty/org.apache.ant/1.10.5.0.0/apache-ant-1.10.5/bin:/u01/jdk/jre/bin:/u01/jdk/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/u01/jdk/bin:/u01/oracle/oracle_common/common/bin:/u01/oracle/wlserver/common/bin:/u01/oracle:/u01/oracle/wlserver/../oracle_common/modules/org.apache.maven_3.2.5/bin + +Your environment has been set. +Check if the DB Service is ready to accept request +DB Connection String [oracle-db.default.svc.cluster.local:1521/devpdb.k8s] schemaPrefix [domain1] rcuType[fmw] + +**** Success!!! **** + +You can connect to the database in your app using: + + java.util.Properties props = new java.util.Properties(); + props.put("user", "sys as sysdba"); + props.put("password", "Oradoc_db1"); + java.sql.Driver d = + Class.forName("oracle.jdbc.OracleDriver").newInstance(); + java.sql.Connection conn = + Driver.connect("sys as sysdba", props); +Dropping RCU Schema for OracleAccessManagement Domain ... +Extra RCU Schema Component(s) Choosen[] + +Processing command line .... +Repository Creation Utility - Checking Prerequisites +Checking Global Prerequisites +Repository Creation Utility - Checking Prerequisites +Checking Component Prerequisites +Repository Creation Utility - Drop +Repository Drop in progress. + Percent Complete: 2 + Percent Complete: 14 + ..... + Percent Complete: 99 + Percent Complete: 100 + ..... + +Repository Creation Utility: Drop - Completion Summary + +Database details: +----------------------------- +Host Name : oracle-db.default.svc.cluster.local +Port : 1521 +Service Name : DEVPDB.K8S +Connected As : sys +Prefix for (prefixable) Schema Owners : DOMAIN1 +RCU Logfile : /tmp/RCU2020-05-01_14-42_651700358/logs/rcu.log + +Component schemas dropped: +----------------------------- +Component Status Logfile + +Common Infrastructure Services Success /tmp/RCU2020-05-01_14-42_651700358/logs/stb.log +Oracle Platform Security Services Success /tmp/RCU2020-05-01_14-42_651700358/logs/opss.log +Audit Services Success /tmp/RCU2020-05-01_14-42_651700358/logs/iau.log +Audit Services Append Success /tmp/RCU2020-05-01_14-42_651700358/logs/iau_append.log +Audit Services Viewer Success /tmp/RCU2020-05-01_14-42_651700358/logs/iau_viewer.log +Metadata Services Success /tmp/RCU2020-05-01_14-42_651700358/logs/mds.log +WebLogic Services Success /tmp/RCU2020-05-01_14-42_651700358/logs/wls.log + +Repository Creation Utility - Drop : Operation Completed +pod "rcu" deleted +Checking Status for Pod [rcu] in namesapce [default] +Error from server (NotFound): pods "rcu" not found +Pod [rcu] removed from nameSpace [default] +``` + +## Stop an Oracle Database service in a Kubernetes cluster + +Use the script ``samples/scripts/create-oracle-db-service/stop-db-service.sh`` + diff --git a/OracleAccessManagement/kubernetes/create-rcu-schema/common/createRepository.sh b/OracleAccessManagement/kubernetes/create-rcu-schema/common/createRepository.sh new file mode 100755 index 000000000..fe91c1dcc --- /dev/null +++ b/OracleAccessManagement/kubernetes/create-rcu-schema/common/createRepository.sh @@ -0,0 +1,68 @@ +#!/bin/bash +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +. /u01/oracle/wlserver/server/bin/setWLSEnv.sh + +echo "Check if the DB Service is ready to accept request " +connectString=${1:-oracle-db.default.svc.cluster.local:1521/devpdb.k8s} +schemaPrefix=${2:-accessinfra} +rcuType=${3:-fmw} +sysPassword=${4:-Oradoc_db1} +customVariables=${5:-none} + +echo "DB Connection String [$connectString], schemaPrefix [${schemaPrefix}] rcuType [${rcuType}] customVariables [${customVariables}]" + +max=100 +counter=0 +while [ $counter -le ${max} ] +do + java utils.dbping ORACLE_THIN "sys as sysdba" ${sysPassword} ${connectString} > dbping.err 2>&1 + [[ $? == 0 ]] && break; + ((counter++)) + echo "[$counter/${max}] Retrying the DB Connection ..." + sleep 10 +done + +if [ $counter -gt ${max} ]; then + echo "Error output from 'java utils.dbping ORACLE_THIN \"sys as sysdba\" SYSPASSWORD ${connectString}' from '$(pwd)/dbping.err':" + cat dbping.err + echo "[ERROR] Oracle DB Service is not ready after [${max}] iterations ..." + exit -1 +else + java utils.dbping ORACLE_THIN "sys as sysdba" ${sysPassword} ${connectString} +fi + +if [ $customVariables != "none" ]; then + extVariables="-variables $customVariables" +else + extVariables="" +fi +case $rcuType in + +oam) + extComponents="-component OAM" + echo "Creating RCU Schema for OracleAccessManagement Domain ..." + ;; + * ) + echo "[ERROR] Unknown RCU Schema Type [$rcuType]" + echo "Supported values: oam" + exit -1 + ;; +esac + +echo "Extra RCU Schema Component Choosen[${extComponents}]" +echo "Extra RCU Schema Variable Choosen[${extVariables}]" + +#Debug +#export DISPLAY=0.0 +#/u01/oracle/oracle_common/bin/rcu -listComponents + +/u01/oracle/oracle_common/bin/rcu -silent -createRepository \ + -databaseType ORACLE -connectString ${connectString} \ + -dbUser sys -dbRole sysdba -useSamePasswordForAllSchemaUsers true \ + -selectDependentsForComponents true \ + -schemaPrefix ${schemaPrefix} ${extComponents} ${extVariables} \ + -component MDS -component IAU -component IAU_APPEND -component IAU_VIEWER \ + -component OPSS -component WLS -component STB < /u01/oracle/pwd.txt + diff --git a/OracleAccessManagement/kubernetes/create-rcu-schema/common/dropRepository.sh b/OracleAccessManagement/kubernetes/create-rcu-schema/common/dropRepository.sh new file mode 100755 index 000000000..6b02d8804 --- /dev/null +++ b/OracleAccessManagement/kubernetes/create-rcu-schema/common/dropRepository.sh @@ -0,0 +1,62 @@ +#!/bin/bash +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +. /u01/oracle/wlserver/server/bin/setWLSEnv.sh + +echo "Check if the DB Service is ready to accept request " +connectString=${1:-oracle-db.default.svc.cluster.local:1521/devpdb.k8s} +schemaPrefix=${2:-accessinfra} +rcuType=${3:-fmw} +sysPassword=${4:-Oradoc_db1} +customVariables=${5:-none} + +echo "DB Connection String [$connectString] schemaPrefix [${schemaPrefix}] rcuType[${rcuType}] customVariables[${customVariables}]" + +max=20 +counter=0 +while [ $counter -le ${max} ] +do + java utils.dbping ORACLE_THIN "sys as sysdba" ${sysPassword} ${connectString} > dbping.err 2>&1 + [[ $? == 0 ]] && break; + ((counter++)) + echo "[$counter/${max}] Retrying the DB Connection ..." + sleep 10 +done + +if [ $counter -gt ${max} ]; then + echo "[ERROR] Oracle DB Service is not ready after [${max}] iterations ..." + exit -1 +else + java utils.dbping ORACLE_THIN "sys as sysdba" ${sysPassword} ${connectString} +fi + +if [ $customVariables != "none" ]; then + extVariables="-variables $customVariables" +else + extVariables="" +fi + +case $rcuType in +oam) + extComponents="-component OAM" + echo "Dropping RCU Schema for OracleAccessManagement Domain ..." + ;; + * ) + echo "[ERROR] Unknown RCU Schema Type [$rcuType]" + echo "Supported values: oam" + exit -1 + ;; +esac + +echo "Extra RCU Schema Component(s) Choosen[${extComponents}]" +echo "Extra RCU Schema Variable(s) Choosen[${extVariables}]" + +/u01/oracle/oracle_common/bin/rcu -silent -dropRepository \ + -databaseType ORACLE -connectString ${connectString} \ + -dbUser sys -dbRole sysdba \ + -selectDependentsForComponents true \ + -schemaPrefix ${schemaPrefix} ${extComponents} ${extVariables} \ + -component MDS -component IAU -component IAU_APPEND -component IAU_VIEWER \ + -component OPSS -component WLS -component STB < /u01/oracle/pwd.txt + diff --git a/OracleAccessManagement/kubernetes/create-rcu-schema/common/rcu.yaml b/OracleAccessManagement/kubernetes/create-rcu-schema/common/rcu.yaml new file mode 100755 index 000000000..f10996c99 --- /dev/null +++ b/OracleAccessManagement/kubernetes/create-rcu-schema/common/rcu.yaml @@ -0,0 +1,20 @@ +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +apiVersion: v1 +kind: Pod +metadata: + labels: + run: rcu + name: rcu + namespace: default +spec: + containers: + - args: + - sleep + - infinity + image: oracle/oam:12.2.1.4.0 + imagePullPolicy: IfNotPresent + name: rcu + imagePullSecrets: + - name: docker-store diff --git a/OracleAccessManagement/kubernetes/create-rcu-schema/common/template/rcu.yaml.template b/OracleAccessManagement/kubernetes/create-rcu-schema/common/template/rcu.yaml.template new file mode 100755 index 000000000..7832ee662 --- /dev/null +++ b/OracleAccessManagement/kubernetes/create-rcu-schema/common/template/rcu.yaml.template @@ -0,0 +1,22 @@ +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# This is a template for RCU Pod +# +apiVersion: v1 +kind: Pod +metadata: + labels: + run: rcu + name: rcu + namespace: %NAMESPACE% +spec: + containers: + - args: + - sleep + - infinity + image: oracle/oam:12.2.1.4.0 + imagePullPolicy: %WEBLOGIC_IMAGE_PULL_POLICY% + name: rcu + %WEBLOGIC_IMAGE_PULL_SECRET_PREFIX%imagePullSecrets: + %WEBLOGIC_IMAGE_PULL_SECRET_PREFIX%- name: %WEBLOGIC_IMAGE_PULL_SECRET_NAME% diff --git a/OracleAccessManagement/kubernetes/create-rcu-schema/create-image-pull-secret.sh b/OracleAccessManagement/kubernetes/create-rcu-schema/create-image-pull-secret.sh new file mode 100755 index 000000000..006c90331 --- /dev/null +++ b/OracleAccessManagement/kubernetes/create-rcu-schema/create-image-pull-secret.sh @@ -0,0 +1,59 @@ +#!/bin/bash +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# Create ImagePullSecret to pull Oracle DB and OracleAccessManagement Image + +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" + +function usage { + echo "usage: ${script} -u -p -e -s [-h]" + echo " -u Oracle Container Registry User Name (needed)" + echo " -p Oracle Container Registry Password (needed)" + echo " -e email (needed)" + echo " -s Generated Secret (optional) " + echo " (default: docker-store) " + echo " -h Help" + exit $1 +} + +while getopts ":u:p:s:e:" opt; do + case $opt in + u) username="${OPTARG}" + ;; + p) password="${OPTARG}" + ;; + e) email="${OPTARG}" + ;; + s) secert="${OPTARG}" + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +if [ -z ${username} ]; then + echo "${script}: -u must be specified." + usage 1 +fi + +if [ -z ${password} ]; then + echo "${script}: -p must be specified." + usage 1 +fi + +if [ -e ${email} ]; then + echo "${script}: -p must be specified." + usage 1 +fi + +if [ -z ${secret} ]; then + secret="docker-store" +fi + +kubectl delete secret/${secret} --ignore-not-found +echo "Creating ImagePullSecret on container-registry.oracle.com" +kubectl create secret docker-registry ${secret} --docker-server=container-registry.oracle.com --docker-username=${username} --docker-password=${password} --docker-email=${email} diff --git a/OracleAccessManagement/kubernetes/create-rcu-schema/create-rcu-schema.sh b/OracleAccessManagement/kubernetes/create-rcu-schema/create-rcu-schema.sh new file mode 100755 index 000000000..7e225c901 --- /dev/null +++ b/OracleAccessManagement/kubernetes/create-rcu-schema/create-rcu-schema.sh @@ -0,0 +1,204 @@ +#!/bin/bash +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# Configure RCU schema based on schemaPreifix and rcuDatabaseURL + +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" +source ${scriptDir}/../common/utility.sh + +function usage { + echo "usage: ${script} -s -t -d -i -u -p -n -q -r -o -c [-l] [-h] " + echo " -s RCU Schema Prefix (required)" + echo " -t RCU Schema Type (optional)" + echo " (supported values: oam)" + echo " -d RCU Oracle Database URL (optional) " + echo " (default: oracle-db.default.svc.cluster.local:1521/devpdb.k8s) " + echo " -p OracleAccessManagement ImagePullSecret (optional) " + echo " (default: none) " + echo " -i OracleAccessManagement Image (optional) " + echo " (default: oracle/oam:12.2.1.4.0) " + echo " -u OracleAccessManagement ImagePullPolicy (optional) " + echo " (default: IfNotPresent) " + echo " -n Namespace for RCU pod (optional)" + echo " (default: default)" + echo " -q password for database SYSDBA user. (optional)" + echo " (default: Oradoc_db1)" + echo " -r password for all schema owner (regular user). (optional)" + echo " (default: Oradoc_db1)" + echo " -o Output directory for the generated YAML file. (optional)" + echo " (default: rcuoutput)" + echo " -c Comma-separated custom variables in the format variablename=value. (optional)." + echo " (default: none)" + echo " -l Timeout limit in seconds. (optional)." + echo " (default: 300)" + echo " -h Help" + exit $1 +} + +# Checks if all container(s) in a pod are running state based on READY column using given timeout limit +# NAME READY STATUS RESTARTS AGE +# domain1-adminserver 1/1 Running 0 4m +function checkPodStateUsingCustomTimeout(){ + + status="NotReady" + count=1 + + pod=$1 + ns=$2 + state=${3:-1/1} + timeoutLimit=${4:-300} + max=`expr ${timeoutLimit} / 5` + + echo "Checking Pod READY column for State [$state]" + pname=`kubectl get po -n ${ns} | grep -w ${pod} | awk '{print $1}'` + if [ -z ${pname} ]; then + echo "No such pod [$pod] exists in NameSpace [$ns] " + exit -1 + fi + + rcode=`kubectl get po ${pname} -n ${ns} | grep -w ${pod} | awk '{print $2}'` + [[ ${rcode} -eq "${state}" ]] && status="Ready" + + while [ ${status} != "Ready" -a $count -le $max ] ; do + sleep 5 + rcode=`kubectl get po/$pod -n ${ns} | grep -v NAME | awk '{print $2}'` + [[ ${rcode} -eq "1/1" ]] && status="Ready" + echo "Pod [$1] Status is ${status} Iter [$count/$max]" + count=`expr $count + 1` + done + if [ $count -gt $max ] ; then + echo "[ERROR] Unable to start the Pod [$pod] after ${timeout}s "; + exit 1 + fi + + pname=`kubectl get po -n ${ns} | grep -w ${pod} | awk '{print $1}'` + kubectl -n ${ns} get po ${pname} +} + +timeout=300 + +while getopts ":h:s:d:p:i:t:n:q:r:o:u:c:l:" opt; do + case $opt in + s) schemaPrefix="${OPTARG}" + ;; + t) rcuType="${OPTARG}" + ;; + d) dburl="${OPTARG}" + ;; + p) pullsecret="${OPTARG}" + ;; + i) fmwimage="${OPTARG}" + ;; + n) namespace="${OPTARG}" + ;; + q) sysPassword="${OPTARG}" + ;; + r) schemaPassword="${OPTARG}" + ;; + o) rcuOutputDir="${OPTARG}" + ;; + u) imagePullPolicy="${OPTARG}" + ;; + c) customVariables="${OPTARG}" + ;; + l) timeout="${OPTARG}" + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +if [ -z ${schemaPrefix} ]; then + echo "${script}: -s must be specified." + usage 1 +fi + +if [ -z ${dburl} ]; then + dburl="oracle-db.default.svc.cluster.local:1521/devpdb.k8s" +fi + +if [ -z ${rcuType} ]; then + rcuType="fmw" +fi + +if [ -z ${pullsecret} ]; then + pullsecret="none" + pullsecretPrefix="#" +fi + +if [ -z ${fmwimage} ]; then + fmwimage="oracle/oam:12.2.1.4.0" +fi + +if [ -z ${imagePullPolicy} ]; then + imagePullPolicy="IfNotPresent" +fi + +if [ -z ${namespace} ]; then + namespace="default" +fi + +if [ -z ${sysPassword} ]; then + sysPassword="Oradoc_db1" +fi + +if [ -z ${schemaPassword} ]; then + schemaPassword="Oradoc_db1" +fi + +if [ -z ${rcuOutputDir} ]; then + rcuOutputDir="rcuoutput" +fi + +if [ -z ${customVariables} ]; then + customVariables="none" +fi + +if [ -z ${timeout} ]; then + timeout=300 +fi + +echo "ImagePullSecret[$pullsecret] Image[${fmwimage}] dburl[${dburl}] rcuType[${rcuType}] customVariables[${customVariables}]" + +mkdir -p ${rcuOutputDir} +rcuYaml=${rcuOutputDir}/rcu.yaml +rm -f ${rcuYaml} +rcuYamlTemp=${scriptDir}/common/template/rcu.yaml.template +cp $rcuYamlTemp $rcuYaml + +# Modify the ImagePullSecret based on input +sed -i -e "s:%NAMESPACE%:${namespace}:g" $rcuYaml +sed -i -e "s:%WEBLOGIC_IMAGE_PULL_POLICY%:${imagePullPolicy}:g" $rcuYaml +sed -i -e "s:%WEBLOGIC_IMAGE_PULL_SECRET_NAME%:${pullsecret}:g" $rcuYaml +sed -i -e "s:%WEBLOGIC_IMAGE_PULL_SECRET_PREFIX%:${pullsecretPrefix}:g" $rcuYaml +sed -i -e "s?image:.*?image: ${fmwimage}?g" $rcuYaml +kubectl apply -f $rcuYaml + +# Make sure the rcu deployment Pod is RUNNING +checkPod rcu $namespace +checkPodStateUsingCustomTimeout rcu $namespace "1/1" ${timeout} +sleep 5 +kubectl get po/rcu -n $namespace + +# Generate the default password files for rcu command +echo "$sysPassword" > pwd.txt +echo "$schemaPassword" >> pwd.txt + +kubectl exec -n $namespace -i rcu -- bash -c 'cat > /u01/oracle/createRepository.sh' < ${scriptDir}/common/createRepository.sh +kubectl exec -n $namespace -i rcu -- bash -c 'cat > /u01/oracle/pwd.txt' < pwd.txt +rm -rf createRepository.sh pwd.txt + +kubectl exec -n $namespace -i rcu /bin/bash /u01/oracle/createRepository.sh ${dburl} ${schemaPrefix} ${rcuType} ${sysPassword} ${customVariables} +if [ $? != 0 ]; then + echo "######################"; + echo "[ERROR] Could not create the RCU Repository"; + echo "######################"; + exit -3; +fi + +echo "[INFO] Modify the domain.input.yaml to use [$dburl] as rcuDatabaseURL and [${schemaPrefix}] as rcuSchemaPrefix " + diff --git a/OracleAccessManagement/kubernetes/create-rcu-schema/drop-rcu-schema.sh b/OracleAccessManagement/kubernetes/create-rcu-schema/drop-rcu-schema.sh new file mode 100755 index 000000000..0f29946b7 --- /dev/null +++ b/OracleAccessManagement/kubernetes/create-rcu-schema/drop-rcu-schema.sh @@ -0,0 +1,108 @@ +#!/bin/bash +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# Drop the RCU schema based on schemaPreifix and Database URL + +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" +source ${scriptDir}/../common/utility.sh + +function usage { + echo "usage: ${script} -s -d -n -q -r -c [-h]" + echo " -s RCU Schema Prefix (required)" + echo " -t RCU Schema Type (optional)" + echo " (supported values: oam) " + echo " -d Oracle Database URL (optional)" + echo " (default: oracle-db.default.svc.cluster.local:1521/devpdb.k8s) " + echo " -n Namespace where RCU pod is deployed (optional)" + echo " (default: default) " + echo " -q password for database SYSDBA user. (optional)" + echo " (default: Oradoc_db1)" + echo " -r password for all schema owner (regular user). (optional)" + echo " (default: Oradoc_db1)" + echo " -c Comma-separated custom variables in the format variablename=value. (optional)." + echo " (default: none)" + echo " -h Help" + exit $1 +} + +while getopts ":h:s:d:t:n:q:r:c:" opt; do + case $opt in + s) schemaPrefix="${OPTARG}" + ;; + t) rcuType="${OPTARG}" + ;; + d) dburl="${OPTARG}" + ;; + n) namespace="${OPTARG}" + ;; + q) sysPassword="${OPTARG}" + ;; + r) schemaPassword="${OPTARG}" + ;; + c) customVariables="${OPTARG}" + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +if [ -z ${schemaPrefix} ]; then + echo "${script}: -s must be specified." + usage 1 +fi + +if [ -z ${dburl} ]; then + dburl="oracle-db.default.svc.cluster.local:1521/devpdb.k8s" +fi + +if [ -z ${rcuType} ]; then + rcuType="fmw" +fi + +if [ -z ${namespace} ]; then + namespace="default" +fi + +if [ -z ${sysPassword} ]; then + sysPassword="Oradoc_db1" +fi + +if [ -z ${schemaPassword} ]; then + schemaPassword="Oradoc_db1" +fi + +if [ -z ${customVariables} ]; then + customVariables="none" +fi + +rcupod=`kubectl get po -n ${namespace} | grep rcu | cut -f1 -d " " ` +if [ -z ${rcupod} ]; then + echo "RCU deployment pod not found in [$namespace] Namespace" + exit -2 +fi + +#fmwimage=`kubectl get pod/rcu -o jsonpath="{..image}"` +echo "DB Connection String [$dbUrl], schemaPrefix [${schemaPrefix}] rcuType [${rcuType}] schemaProfileType [${customVariables}]" + +echo "${sysPassword}" > pwd.txt +echo "${schemaPassword}" >> pwd.txt + +kubectl exec -n $namespace -i rcu -- bash -c 'cat > /u01/oracle/dropRepository.sh' < ${scriptDir}/common/dropRepository.sh +kubectl exec -n $namespace -i rcu -- bash -c 'cat > /u01/oracle/pwd.txt' < pwd.txt +rm -rf dropRepository.sh pwd.txt + +kubectl exec -n $namespace -i rcu /bin/bash /u01/oracle/dropRepository.sh ${dburl} ${schemaPrefix} ${rcuType} ${sysPassword} ${customVariables} +if [ $? != 0 ]; then + echo "######################"; + echo "[ERROR] Could not drop the RCU Repository based on dburl[${dburl}] schemaPrefix[${schemaPrefix}] "; + echo "######################"; + exit -3; +fi + +kubectl delete pod rcu -n ${namespace} +checkPodDelete rcu ${namespace} + diff --git a/OracleAccessManagement/kubernetes/create-weblogic-domain-credentials/README.md b/OracleAccessManagement/kubernetes/create-weblogic-domain-credentials/README.md new file mode 100755 index 000000000..5e4c6fb26 --- /dev/null +++ b/OracleAccessManagement/kubernetes/create-weblogic-domain-credentials/README.md @@ -0,0 +1,50 @@ +# Creating credentials for a WebLogic domain + +This sample demonstrates how to create a Kubernetes secret containing the +credentials for a WebLogic domain. The operator expects this secret to be +named following the pattern `domainUID-weblogic-credentials`, where `domainUID` +is the unique identifier of the domain. It must be in the same namespace +that the domain will run in. + +To use the sample, run the command: + +``` +$ ./create-weblogic-credentials.sh -u username -p password -d domainUID -n namespace -s secretName +``` + +The parameters are as follows: + +``` + -u user name, must be specified. + -p password, must be specified. + -d domainUID, optional. The default value is accessinfra. If specified, the secret will be labeled with the domainUID unless the given value is an empty string. + -n namespace, optional. Use the accessns namespace if not specified. + -s secretName, optional. If not specified, the secret name will be determined based on the domainUID value. +``` + +This creates a `generic` secret containing the user name and password as literal values. + +You can check the secret with the `kubectl get secret` command. An example is shown below, +including the output: + +``` +$ kubectl -n accessns get secret accessinfra-weblogic-credentials -o yaml +apiVersion: v1 +data: + password: d2VsY29tZTE= + username: d2VibG9naWM= +kind: Secret +metadata: + creationTimestamp: 2018-12-12T20:25:20Z + labels: + weblogic.domainName: accessinfra + weblogic.domainUID: accessinfra + name: accessinfra-weblogic-credentials + namespace: accessns + resourceVersion: "5680" + selfLink: /api/v1/namespaces/accessns/secrets/accessinfra-weblogic-credentials + uid: 0c2b3510-fe4c-11e8-994d-00001700101d +type: Opaque + +``` + diff --git a/OracleAccessManagement/kubernetes/create-weblogic-domain-credentials/create-weblogic-credentials.sh b/OracleAccessManagement/kubernetes/create-weblogic-domain-credentials/create-weblogic-credentials.sh new file mode 100755 index 000000000..ad11f1853 --- /dev/null +++ b/OracleAccessManagement/kubernetes/create-weblogic-domain-credentials/create-weblogic-credentials.sh @@ -0,0 +1,121 @@ +#!/usr/bin/env bash +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# Description +# This sample script creates a Kubernetes secret for WebLogic domain admin credentials. +# +# The following pre-requisites must be handled prior to running this script: +# * The kubernetes namespace must already be created +# +# Secret name determination +# 1) secretName - if specified +# 2) accessinfra-weblogic-credentials - if secretName and domainUID are both not specified. This is the default out-of-the-box. +# 3) -weblogic-credentials - if secretName is not specified, and domainUID is specified. +# 4) weblogic-credentials - if secretName is not specified, and domainUID is specified as "". +# +# The generated secret will be labeled with +# weblogic.domainUID=$domainUID +# and +# weblogic.domainName=$domainUID +# Where the $domainUID is the value of the -d command line option, unless the value supplied is an empty String "" +# + +script="${BASH_SOURCE[0]}" + +# +# Function to exit and print an error message +# $1 - text of message +function fail { + echo [ERROR] $* + exit 1 +} + +# Try to execute kubectl to see whether kubectl is available +function validateKubectlAvailable { + if ! [ -x "$(command -v kubectl)" ]; then + fail "kubectl is not installed" + fi +} + +function usage { + echo usage: ${script} -u username -p password [-d domainUID] [-n namespace] [-s secretName] [-h] + echo " -u username, must be specified." + echo " -p password, must be specified." + echo " -d domainUID, optional. The default value is accessinfra. If specified, the secret will be labeled with the domainUID unless the given value is an empty string." + echo " -n namespace, optional. Use the accessns namespace if not specified" + echo " -s secretName, optional. If not specified, the secret name will be determined based on the domainUID value" + echo " -h Help" + exit $1 +} + +# +# Parse the command line options +# +domainUID=accessinfra +namespace=accessns +while getopts "hu:p:n:d:s:" opt; do + case $opt in + u) username="${OPTARG}" + ;; + p) password="${OPTARG}" + ;; + n) namespace="${OPTARG}" + ;; + d) domainUID="${OPTARG}" + ;; + s) secretName="${OPTARG}" + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +if [ -z $secretName ]; then + if [ -z $domainUID ]; then + secretName=weblogic-credentials + else + secretName=$domainUID-weblogic-credentials + fi +fi + +if [ -z ${username} ]; then + echo "${script}: -u must be specified." + missingRequiredOption="true" +fi + +if [ -z ${password} ]; then + echo "${script}: -p must be specified." + missingRequiredOption="true" +fi + +if [ "${missingRequiredOption}" == "true" ]; then + usage 1 +fi + +# check and see if the secret already exists +result=`kubectl get secret ${secretName} -n ${namespace} --ignore-not-found=true | grep ${secretName} | wc | awk ' { print $1; }'` +if [ "${result:=Error}" != "0" ]; then + fail "The secret ${secretName} already exists in namespace ${namespace}." +fi + +# create the secret +kubectl -n $namespace create secret generic $secretName \ + --from-literal=username=$username \ + --from-literal=password=$password + +# label the secret with domainUID if needed +if [ ! -z $domainUID ]; then + kubectl label secret ${secretName} -n $namespace weblogic.domainUID=$domainUID weblogic.domainName=$domainUID +fi + +# Verify the secret exists +SECRET=`kubectl get secret ${secretName} -n ${namespace} | grep ${secretName} | wc | awk ' { print $1; }'` +if [ "${SECRET}" != "1" ]; then + fail "The secret ${secretName} was not found in namespace ${namespace}" +fi + +echo "The secret ${secretName} has been successfully created in the ${namespace} namespace." + diff --git a/OracleAccessManagement/kubernetes/create-weblogic-domain-pv-pvc/README.md b/OracleAccessManagement/kubernetes/create-weblogic-domain-pv-pvc/README.md new file mode 100755 index 000000000..222519bd0 --- /dev/null +++ b/OracleAccessManagement/kubernetes/create-weblogic-domain-pv-pvc/README.md @@ -0,0 +1,222 @@ +# Sample persistent volume and persistent volume claim + +The sample scripts demonstrate the creation of a Kubernetes persistent volume (PV) and persistent volume claim (PVC), which can then be used in a domain resource as a persistent storage for the WebLogic domain home or log files. + +A PV and PVC can be shared by multiple WebLogic domains or dedicated to a particular domain. + +## Prerequisites + +Please read the [Persistent Volumes](../../../../site/persistent-volumes.md) guide before proceeding. + +## Using the scripts to create a PV and PVC + +Prior to running the `create-pv-pvc.sh` script, make a copy of the `create-pv-pvc-inputs.yaml` file, and uncomment and explicitly configure the `weblogicDomainStoragePath` property in the inputs file. + +Run the create script, pointing it at your inputs file and an output directory: + +``` +$ ./create-pv-pvc.sh \ + -i create-pv-pvc-inputs.yaml \ + -o /path/to/output-directory +``` + +The `create-pv-pvc.sh` script will create a subdirectory `pv-pvcs` under the given `/path/to/output-directory` directory. By default, the script generates two YAML files, namely `weblogic-sample-pv.yaml` and `weblogic-sample-pvc.yaml`, in the `/path/to/output-directory/pv-pvcs`. These two YAML files can be used to create the Kubernetes resources using the `kubectl create -f` command. + +``` +$ kubectl create -f accessinfra-domain-pv.yaml +$ kubectl create -f accessinfra-domain-pvc.yaml + +``` + +As a convenience, the script can optionally create the PV and PVC resources using the `-e` option. + +The usage of the create script is as follows: + +``` +$ sh create-pv-pvc.sh -h +usage: create-pv-pvc.sh -i file -o dir [-e] [-h] + -i Parameter inputs file, must be specified. + -o Output directory for the generated yaml files, must be specified. + -e Also create the Kubernetes objects using the generated yaml files + -h Help +``` + +If you copy the sample scripts to a different location, make sure that you copy everything in the `/kubernetes/samples/scripts` directory together into the target directory, maintaining the original directory hierarchy. + +## Configuration parameters + +The PV and PVC creation inputs can be customized by editing the `create-pv-pvc-inputs.yaml` file. + +| Parameter | Definition | Default | +| --- | --- | --- | +| `domainUID` | ID of the domain resource to which the generated PV and PVC will be dedicated. Leave it empty if the PV and PVC are going to be shared by multiple domains. | no default | +| `namespace` | Kubernetes namespace to create the PVC. | `default` | +| `baseName` | Base name of the PV and PVC. The generated PV and PVC will be `-pv` and `-pvc` respectively. | `weblogic-sample` | +| `weblogicDomainStoragePath` | Physical path of the storage for the PV. When `weblogicDomainStorageType` is set to `HOST_PATH`, this value should be set the to path to the domain storage on the Kubernetes host. When `weblogicDomainStorageType` is set to NFS, then `weblogicDomainStorageNFSServer` should be set to the IP address or name of the DNS server, and this value should be set to the exported path on that server. Note that the path where the domain is mounted in the WebLogic containers is not affected by this setting, that is determined when you create your domain. | no default | +| `weblogicDomainStorageReclaimPolicy` | Kubernetes PVC policy for the persistent storage. The valid values are: `Retain`, `Delete`, and `Recycle`. | `Retain` | +| `weblogicDomainStorageSize` | Total storage allocated for the PVC. | `10Gi` | +| `weblogicDomainStorageType` | Type of storage. Legal values are `NFS` and `HOST_PATH`. If using `NFS`, `weblogicDomainStorageNFSServer` must be specified. | `HOST_PATH` | +| `weblogicDomainStorageNFSServer`| Name or IP address of the NFS server. This setting only applies if `weblogicDomainStorateType` is `NFS`. | no default | + +## Shared versus dedicated PVC + +By default, the `domainUID` is left empty in the inputs file, which means the generated PV and PVC will not be associated with a particular domain, but can be shared by multiple domain resources in the same Kubernetes namespaces as the PV and PVC. + +For the use cases where dedicated PV and PVC are desired for a particular domain, the `domainUID` needs to be set in the `create-pv-pvc-inputs.yaml` file. The presence of a non-empty `domainUID` in the inputs file will cause the generated PV and PVC to be associated with the specified `domainUID`. The association includes that the names of the generated YAML files and the Kubernetes PV and PVC objects are decorated with the `domainUID`, and the PV and PVC objects are also labeled with the `domainUID`. + +## Verify the results + +The create script will verify that the PV and PVC were created, and will report a failure if there was any error. However, it may be desirable to manually verify the PV and PVC, even if just to gain familiarity with the various Kubernetes objects that were created by the script. + +### Generated YAML files with the default inputs + +The content of the generated `accessinfra-domain-pvc.yaml`: + +``` +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: accessinfra-domain-pvc + namespace: default + + storageClassName: accessinfra-domain-storage-class + accessModes: + - ReadWriteMany + resources: + requests: + storage: 10Gi +``` + +The content of the generated `accessinfra-domain-pv.yaml`: +``` +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +apiVersion: v1 +kind: PersistentVolume +metadata: + name: accessinfra-domain-pv + # labels: + # weblogic.domainUID: +spec: + storageClassName: accessinfra-domain-storage-class + capacity: + storage: 10Gi + accessModes: + - ReadWriteMany + # Valid values are Retain, Delete or Recycle + persistentVolumeReclaimPolicy: Retain + hostPath: + # nfs: + # server: %SAMPLE_STORAGE_NFS_SERVER% + path: "/scratch/k8s_dir" + +``` + +### Generated YAML files for dedicated PV and PVC + +The content of the generated `accessinfra-domain-pvc.yaml` when `domainUID` is set to `domain1`: + +``` +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: accessinfra-domain-pvc + namespace: default + labels: + weblogic.domainUID: accessinfra +spec: + storageClassName: accessinfra-domain-storage-class + accessModes: + - ReadWriteMany + resources: + requests: + storage: 10Gi +``` + +The content of the generated `accessinfra-domain-pv.yaml` when `domainUID` is set to `domain1`: +``` +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +apiVersion: v1 +kind: PersistentVolume +metadata: + name: accessinfra-domain-pv + labels: + weblogic.domainUID: accessinfra +spec: + storageClassName: accessinfra-domain-storage-class + capacity: + storage: 10Gi + accessModes: + - ReadWriteMany + # Valid values are Retain, Delete or Recycle + persistentVolumeReclaimPolicy: Retain + hostPath: + # nfs: + # server: %SAMPLE_STORAGE_NFS_SERVER% + path: "/scratch/k8s_dir" +``` + +### Verify the PV and PVC objects + +You can use this command to verify the persistent volume was created, note that the `Status` field +should have the value `Bound`, indicating the that persistent volume has been claimed: + +``` +$ kubectl describe pv accessinfra-domain-pv +Name: accessinfra-domain-pv +Annotations: pv.kubernetes.io/bound-by-controller=yes +StorageClass: accessinfra-domain-storage-class +Status: Bound +Claim: default/accessinfra-domain-pvc +Reclaim Policy: Retain +Access Modes: RWX +Capacity: 10Gi +Message: +Source: + Type: HostPath (bare host directory volume) + Path: /scratch/k8s_dir + HostPathType: +Events: + +``` + +You can use this command to verify the persistent volume claim was created: + +``` +$ kubectl describe pvc accessinfra-domain-pvc +Name: accessinfra-domain-pvc +Namespace: default +StorageClass: accessinfra-domain-storage-class +Status: Bound +Volume: accessinfra-domain-pv +Annotations: pv.kubernetes.io/bind-completed=yes + pv.kubernetes.io/bound-by-controller=yes +Finalizers: [] +Capacity: 10Gi +Access Modes: RWX +Events: + +``` + +## Troubleshooting + +* Message: `[ERROR] The weblogicDomainStoragePath parameter in kubernetes/samples/scripts/create-weblogic-domain-pv-pvc/create-pv-pvc-inputs.yaml is missing, null or empty` +Edit the file and set the value of the field. This value must be a directory that is world writable. +Optionally, follow these steps to tighten permissions on the named directory after you run the sample the first time: + + * Become the root user. + * `ls -nd $value-of-weblogicDomainStoragePath` + * Note the values of the third and fourth field of the output. + * `chown $third-field:$fourth-field $value-of-weblogicDomainStoragePath` + * `chmod 755 $value-of-weblogicDomainStoragePath` + * Return to your normal user ID. + diff --git a/OracleAccessManagement/kubernetes/create-weblogic-domain-pv-pvc/create-pv-pvc-inputs.yaml b/OracleAccessManagement/kubernetes/create-weblogic-domain-pv-pvc/create-pv-pvc-inputs.yaml new file mode 100755 index 000000000..1fa458b8b --- /dev/null +++ b/OracleAccessManagement/kubernetes/create-weblogic-domain-pv-pvc/create-pv-pvc-inputs.yaml @@ -0,0 +1,44 @@ +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# The version of this inputs file. Do not modify. +version: create-accessinfra-pv-pvc-inputs-v1 + +# The base name of the pv and pvc +baseName: domain + +# Unique ID identifying a domain. +# If left empty, the generated pv can be shared by multiple domains +# This ID must not contain an underscope ("_"), and must be lowercase and unique across all domains in a Kubernetes cluster. +domainUID: accessinfra + +# Name of the namespace for the persistent volume claim +namespace: accessns + +# Persistent volume type for the persistent storage. +# The value must be 'HOST_PATH' or 'NFS'. +# If using 'NFS', weblogicDomainStorageNFSServer must be specified. +weblogicDomainStorageType: HOST_PATH + +# The server name or ip address of the NFS server to use for the persistent storage. +# The following line must be uncomment and customized if weblogicDomainStorateType is NFS: +#weblogicDomainStorageNFSServer: nfsServer + +# Physical path of the persistent storage. +# When weblogicDomainStorageType is set to HOST_PATH, this value should be set the to path to the +# domain storage on the Kubernetes host. +# When weblogicDomainStorageType is set to NFS, then weblogicDomainStorageNFSServer should be set +# to the IP address or name of the DNS server, and this value should be set to the exported path +# on that server. +# Note that the path where the domain is mounted in the WebLogic containers is not affected by this +# setting, that is determined when you create your domain. +# The following line must be uncomment and customized: +weblogicDomainStoragePath: /scratch/k8s_dir + +# Reclaim policy of the persistent storage +# The valid values are: 'Retain', 'Delete', and 'Recycle' +weblogicDomainStorageReclaimPolicy: Retain + +# Total storage allocated to the persistent storage. +weblogicDomainStorageSize: 10Gi + diff --git a/OracleAccessManagement/kubernetes/create-weblogic-domain-pv-pvc/create-pv-pvc.sh b/OracleAccessManagement/kubernetes/create-weblogic-domain-pv-pvc/create-pv-pvc.sh new file mode 100755 index 000000000..ba3de3459 --- /dev/null +++ b/OracleAccessManagement/kubernetes/create-weblogic-domain-pv-pvc/create-pv-pvc.sh @@ -0,0 +1,267 @@ +#!/usr/bin/env bash +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# Description +# This sample script generates the Kubernetes yaml files for a persistent volume and persistent volume claim +# that can be used by a domain custom resource. +# +# The creation inputs can be customized by editing create-pv-pvc-inputs.yaml +# +# The following pre-requisites must be handled prior to running this script: +# * The Kubernetes namespace must already be created +# + +# Initialize +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" +source ${scriptDir}/../common/utility.sh +source ${scriptDir}/../common/validate.sh + +function usage { + echo usage: ${script} -i file -o dir [-e] [-h] + echo " -i Parameter inputs file, must be specified." + echo " -o Output directory for the generated yaml files, must be specified." + echo " -e Also create the Kubernetes objects using the generated yaml files" + echo " -h Help" + exit $1 +} + +# +# Parse the command line options +# +executeIt=false +while getopts "ehi:o:" opt; do + case $opt in + i) valuesInputFile="${OPTARG}" + ;; + o) outputDir="${OPTARG}" + ;; + e) executeIt=true + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +if [ -z ${valuesInputFile} ]; then + echo "${script}: -i must be specified." + missingRequiredOption="true" +fi + +if [ -z ${outputDir} ]; then + echo "${script}: -o must be specified." + missingRequiredOption="true" +fi + +if [ "${missingRequiredOption}" == "true" ]; then + usage 1 +fi + +# +# Function to initialize and validate the output directory +# for the generated yaml files for this domain. +# +function initOutputDir { + pvOutputDir="$outputDir/pv-pvcs" + + if [ -z ${domainUID} ]; then + pvOutput="${pvOutputDir}/${baseName}-pv.yaml" + pvcOutput="${pvOutputDir}/${baseName}-pvc.yaml" + persistentVolumeName=${baseName}-pv + persistentVolumeClaimName=${baseName}-pvc + else + pvOutput="${pvOutputDir}/${domainUID}-${baseName}-pv.yaml" + pvcOutput="${pvOutputDir}/${domainUID}-${baseName}-pvc.yaml" + persistentVolumeName=${domainUID}-${baseName}-pv + persistentVolumeClaimName=${domainUID}-${baseName}-pvc + fi + removeFileIfExists ${pvOutputDir}/{valuesInputFile} + removeFileIfExists ${pvOutputDir}/{pvOutput} + removeFileIfExists ${pvOutputDir}/{pvcOutput} + removeFileIfExists ${pvOutputDir}/create-pv-pvc-inputs.yaml +} + +# +# Function to setup the environment to run the create domain job +# +function initialize { + + # Validate the required files exist + validateErrors=false + + if [ -z "${valuesInputFile}" ]; then + validationError "You must use the -i option to specify the name of the inputs parameter file (a modified copy of kubernetes/samples/scripts/create-weblogic-domain-pv-pvc/create-pv-pvc-inputs.yaml)." + else + if [ ! -f ${valuesInputFile} ]; then + validationError "Unable to locate the input parameters file ${valuesInputFile}" + fi + fi + + if [ -z "${outputDir}" ]; then + validationError "You must use the -o option to specify the name of an existing directory to store the generated yaml files in." + fi + + domainPVInput="${scriptDir}/pv-template.yaml" + if [ ! -f ${domainPVInput} ]; then + validationError "The template file ${domainPVInput} for generating a persistent volume was not found" + fi + + domainPVCInput="${scriptDir}/pvc-template.yaml" + if [ ! -f ${domainPVCInput} ]; then + validationError "The template file ${domainPVCInput} for generating a persistent volume claim was not found" + fi + + failIfValidationErrors + + # Parse the commonn inputs file + parseCommonInputs + validateInputParamsSpecified \ + weblogicDomainStoragePath \ + weblogicDomainStorageSize \ + baseName \ + namespace \ + version + + export requiredInputsVersion="create-weblogic-sample-domain-pv-pvc-inputs-v1" + validateDomainUid + validateNamespace + validateWeblogicDomainStorageType + validateWeblogicDomainStorageReclaimPolicy + initOutputDir + failIfValidationErrors +} + + +# +# Function to generate the yaml files for creating a domain +# +function createYamlFiles { + + # Create a directory for this domain's output files + mkdir -p ${pvOutputDir} + + # Make sure the output directory has a copy of the inputs file. + # The user can either pre-create the output directory, put the inputs + # file there, and create the domain from it, or the user can put the + # inputs file some place else and let this script create the output directory + # (if needed) and copy the inputs file there. + copyInputsFileToOutputDirectory ${valuesInputFile} "${pvOutputDir}/create-pv-pvc-inputs.yaml" + + enabledPrefix="" # uncomment the feature + disabledPrefix="# " # comment out the feature + + echo Generating ${pvOutput} + + cp ${domainPVInput} ${pvOutput} + if [ "${weblogicDomainStorageType}" == "NFS" ]; then + hostPathPrefix="${disabledPrefix}" + nfsPrefix="${enabledPrefix}" + sed -i -e "s:%SAMPLE_STORAGE_NFS_SERVER%:${weblogicDomainStorageNFSServer}:g" ${pvOutput} + else + hostPathPrefix="${enabledPrefix}" + nfsPrefix="${disabledPrefix}" + fi + + sed -i -e "s:%NAMESPACE%:$namespace:g" ${pvOutput} + if [ -z ${domainUID} ]; then + domainUIDLabelPrefix="${disabledPrefix}" + separator="" + else + domainUIDLabelPrefix="${enabledPrefix}" + separator="-" + fi + sed -i -e "s:%DOMAIN_UID%:$domainUID:g" ${pvOutput} + sed -i -e "s:%SEPARATOR%:$separator:g" ${pvOutput} + sed -i -e "s:%DOMAIN_UID_LABEL_PREFIX%:${domainUIDLabelPrefix}:g" ${pvOutput} + + sed -i -e "s:%BASE_NAME%:$baseName:g" ${pvOutput} + sed -i -e "s:%SAMPLE_STORAGE_PATH%:${weblogicDomainStoragePath}:g" ${pvOutput} + sed -i -e "s:%SAMPLE_STORAGE_RECLAIM_POLICY%:${weblogicDomainStorageReclaimPolicy}:g" ${pvOutput} + sed -i -e "s:%SAMPLE_STORAGE_SIZE%:${weblogicDomainStorageSize}:g" ${pvOutput} + sed -i -e "s:%HOST_PATH_PREFIX%:${hostPathPrefix}:g" ${pvOutput} + sed -i -e "s:%NFS_PREFIX%:${nfsPrefix}:g" ${pvOutput} + + # Generate the yaml to create the persistent volume claim + echo Generating ${pvcOutput} + + cp ${domainPVCInput} ${pvcOutput} + sed -i -e "s:%NAMESPACE%:$namespace:g" ${pvcOutput} + sed -i -e "s:%BASE_NAME%:${baseName}:g" ${pvcOutput} + + sed -i -e "s:%DOMAIN_UID%:$domainUID:g" ${pvcOutput} + sed -i -e "s:%SEPARATOR%:$separator:g" ${pvcOutput} + sed -i -e "s:%DOMAIN_UID_LABEL_PREFIX%:${domainUIDLabelPrefix}:g" ${pvcOutput} + + sed -i -e "s:%SAMPLE_STORAGE_SIZE%:${weblogicDomainStorageSize}:g" ${pvcOutput} + + # Remove any "...yaml-e" files left over from running sed + rm -f ${pvOutputDir}/*.yaml-e +} + +# +# Function to create the domain's persistent volume +# +function createDomainPV { + # Check if the persistent volume is already available + checkPvExists ${persistentVolumeName} + if [ "${PV_EXISTS}" = "false" ]; then + echo Creating the persistent volume ${persistentVolumeName} + kubectl create -f ${pvOutput} + checkPvState ${persistentVolumeName} Available + fi +} + +# +# Function to create the domain's persistent volume claim +# Must be called after createDomainPV since it relies on +# createDomainPV defining persistentVolumeName +# +function createDomainPVC { + # Check if the persistent volume claim is already available + checkPvcExists ${persistentVolumeClaimName} ${namespace} + if [ "${PVC_EXISTS}" = "false" ]; then + echo Creating the persistent volume claim ${persistentVolumeClaimName} + kubectl create -f ${pvcOutput} + checkPvState ${persistentVolumeName} Bound + fi +} + +# +# Function to output to the console a summary of the work completed +# +function printSummary { + echo "The following files were generated:" + echo " ${pvOutput}" + echo " ${pvcOutput}" +} + +# +# Perform the following sequence of steps to create a domain +# + +# Setup the environment for running this script and perform initial validation checks +initialize + +# Generate the yaml files for creating the domain +createYamlFiles + +# All done if the generate only option is true +if [ "${executeIt}" = true ]; then + + # Create the domain's persistent volume + createDomainPV + + # Create the domain's persistent volume claim + createDomainPVC +fi + +# Output a job summary +printSummary + +echo +echo Completed + + diff --git a/OracleAccessManagement/kubernetes/create-weblogic-domain-pv-pvc/pv-template.yaml b/OracleAccessManagement/kubernetes/create-weblogic-domain-pv-pvc/pv-template.yaml new file mode 100755 index 000000000..49e33a22f --- /dev/null +++ b/OracleAccessManagement/kubernetes/create-weblogic-domain-pv-pvc/pv-template.yaml @@ -0,0 +1,21 @@ +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +apiVersion: v1 +kind: PersistentVolume +metadata: + name: %DOMAIN_UID%%SEPARATOR%%BASE_NAME%-pv + labels: + %DOMAIN_UID_LABEL_PREFIX%weblogic.domainUID: %DOMAIN_UID% +spec: + storageClassName: %DOMAIN_UID%%SEPARATOR%%BASE_NAME%-storage-class + capacity: + storage: %SAMPLE_STORAGE_SIZE% + accessModes: + - ReadWriteMany + # Valid values are Retain, Delete or Recycle + persistentVolumeReclaimPolicy: %SAMPLE_STORAGE_RECLAIM_POLICY% + %HOST_PATH_PREFIX%hostPath: + %NFS_PREFIX%nfs: + %NFS_PREFIX%server: %SAMPLE_STORAGE_NFS_SERVER% + path: "%SAMPLE_STORAGE_PATH%" diff --git a/OracleAccessManagement/kubernetes/create-weblogic-domain-pv-pvc/pvc-template.yaml b/OracleAccessManagement/kubernetes/create-weblogic-domain-pv-pvc/pvc-template.yaml new file mode 100755 index 000000000..49e8d5afb --- /dev/null +++ b/OracleAccessManagement/kubernetes/create-weblogic-domain-pv-pvc/pvc-template.yaml @@ -0,0 +1,17 @@ +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: %DOMAIN_UID%%SEPARATOR%%BASE_NAME%-pvc + namespace: %NAMESPACE% + labels: + %DOMAIN_UID_LABEL_PREFIX%weblogic.domainUID: %DOMAIN_UID% +spec: + storageClassName: %DOMAIN_UID%%SEPARATOR%%BASE_NAME%-storage-class + accessModes: + - ReadWriteMany + resources: + requests: + storage: %SAMPLE_STORAGE_SIZE% diff --git a/OracleAccessManagement/kubernetes/delete-domain/README.md b/OracleAccessManagement/kubernetes/delete-domain/README.md new file mode 100755 index 000000000..800c7094a --- /dev/null +++ b/OracleAccessManagement/kubernetes/delete-domain/README.md @@ -0,0 +1,27 @@ +# Deleting domain resources created while executing the samples + +After running the sample, you will need to release domain resources that +can then be used for other purposes. The script in this sample demonstrates one approach to releasing +domain resources. + +## Using the script to delete domain resources + +```shell +$ ./delete-weblogic-domain-resources.sh \ + -d domain-uid[,domain-uid...] \ + [-s max-seconds] \ + [-t] +``` +The required option `-d` takes `domain-uid` values (separated + by commas and no spaces) to identify the domain resources that should be deleted. + +To limit the amount of time spent on attempting to delete domain resources, use `-s`. +The option must be followed by an integer that represents the total number of seconds +that will be spent attempting to delete resources. The default number of seconds is 120. + +The optional option `-t` shows what the script will delete without executing the deletion. + +To see the help associated with the script: +```shell +$ ./delete-weblogic-domain-resources.sh -h +``` diff --git a/OracleAccessManagement/kubernetes/delete-domain/delete-weblogic-domain-resources.sh b/OracleAccessManagement/kubernetes/delete-domain/delete-weblogic-domain-resources.sh new file mode 100755 index 000000000..fd54d1ea0 --- /dev/null +++ b/OracleAccessManagement/kubernetes/delete-domain/delete-weblogic-domain-resources.sh @@ -0,0 +1,283 @@ +#!/bin/bash +# Copyright (c) 2019, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# Description: +# Use this script to delete all kubernetes resources associated +# with a set of given domains. Alternatively, run the script +# in a test mode to show what would be deleted without actually +# performing the deletes. +# +# Usage: +# See "function usage" below or call this script with no parameters. +# + +script="${BASH_SOURCE[0]}" + +function usage { +cat << EOF + Usage: + + $(basename $0) -d domain-uid,domain-uid,... [-s max-seconds] [-t] + $(basename $0) -h + + Perform a best-effort delete of the kubernetes resources for + the given domain(s), and retry until either max-seconds is reached + or all resources were deleted (default $default_maxwaitsecs seconds). + + The domains can be specified as a comma-separated list of + domain-uids (no spaces). The domains can be located in any + kubernetes namespace. + + Specify '-t' to run the script in a test mode which will + show kubernetes commands but not actually perform them. + + The script runs in phases: + + Phase 1: Set the serverStartPolicy of each domain to NEVER if + it's not already NEVER. This should cause each + domain's operator to initiate a controlled shutdown + of the domain. Immediately proceed to phase 2. + + Phase 2: Wait up to half of max-seconds for WebLogic + Server pods to exit normally, and then proceed + to phase 3. + + Phase 3: Periodically delete any remaining kubernetes resources + for the specified domains, including any pods + leftover from previous phases. Exit and fail if + max-seconds is exceeded and there are any leftover + kubernetes resources. + + This script exits with a zero status on success, and a + non-zero status on failure. +EOF +} + +# +# getDomainResources domain(s) outfilename +# +# Usage: +# getDomainResources domainA,domainB,... outfilename +# +# Internal helper function +# +# File output is all domain related resources for the given domain uids, one per line, +# in the form: 'kind name [-n namespace]'. For example: +# PersistentVolumeClaim domain1-pv-claim -n default +# PersistentVolume domain1-pv +# +function getDomainResources { + local domain_regex='' + LABEL_SELECTOR="weblogic.domainUID in ($1)" + IFS=',' read -ra UIDS <<< "$1" + for i in "${!UIDS[@]}"; do + if [ $i -gt 0 ]; then + domain_regex="$domain_regex|" + fi + domain_regex="$domain_regex^Domain ${UIDS[$i]} " + done + + # clean the output file + if [ -e $2 ]; then + rm $2 + fi + + # first, let's get all namespaced types with -l $LABEL_SELECTOR + NAMESPACED_TYPES="pod,job,deploy,rs,service,pvc,ingress,cm,serviceaccount,role,rolebinding,secret" + + kubectl get $NAMESPACED_TYPES \ + -l "$LABEL_SELECTOR" \ + -o=jsonpath='{range .items[*]}{.kind}{" "}{.metadata.name}{" -n "}{.metadata.namespace}{"\n"}{end}' \ + --all-namespaces=true >> $2 + + # if domain crd exists, look for domains too: + kubectl get crd domains.weblogic.oracle > /dev/null 2>&1 + if [ $? -eq 0 ]; then + kubectl get domain \ + -o=jsonpath='{range .items[*]}{.kind}{" "}{.metadata.name}{" -n "}{.metadata.namespace}{"\n"}{end}' \ + --all-namespaces=true | egrep "$domain_regex" >> $2 + fi + + # now, get all non-namespaced types with -l $LABEL_SELECTOR + + NOT_NAMESPACED_TYPES="pv,clusterroles,clusterrolebindings" + + kubectl get $NOT_NAMESPACED_TYPES \ + -l "$LABEL_SELECTOR" \ + -o=jsonpath='{range .items[*]}{.kind}{" "}{.metadata.name}{"\n"}{end}' \ + --all-namespaces=true >> $2 +} + +# +# deleteDomains domain(s) maxwaitsecs +# +# Usage: +# deleteDomains domainA,domainB,... maxwaitsecs +# +# Internal helper function +# This function first sets the serverStartPolicy of each Domain to NEVER +# and waits up to half of $2 for pods to 'self delete'. It then performs +# a helm delete on $1, and finally it directly deletes +# any remaining k8s resources for domain $1 (including any remaining pods) +# and retries these direct deletes up to $2 seconds. +# +# If global $test_mode is true, it shows candidate actions but doesn't +# actually perform them +# +function deleteDomains { + + if [ "$test_mode" = "true" ]; then + echo @@ Test mode! Displaying commands for deleting kubernetes resources with label weblogic.domainUID \'$1\' without actually deleting them. + else + echo @@ Deleting kubernetes resources with label weblogic.domainUID \'$1\'. + fi + + local maxwaitsecs=${2:-$default_maxwaitsecs} + local tempfile="/tmp/$(basename $0).tmp.$$" # == /tmp/[script-file-name].tmp.[pid] + local mstart=`date +%s` + local phase=1 + + while : ; do + # get all k8s resources with matching domain-uid labels and put them in $tempfile + getDomainResources $1 $tempfile + + # get a count of all k8s resources with matching domain-uid labels + local allcount=`wc -l $tempfile | awk '{ print $1 }'` + + # get a count of all WLS pods (any pod with a matching domain-uid label that doesn't have 'traefik' or 'apache' embedded in its name) + local podcount=`grep "^Pod" $tempfile | grep -v traefik | grep -v apache | wc -l | awk '{ print $1 }'` + + local mnow=`date +%s` + + echo @@ $allcount resources remaining after $((mnow - mstart)) seconds, including $podcount WebLogic Server pods. Max wait is $maxwaitsecs seconds. + + # Exit if all k8s resources deleted or max wait seconds exceeded. + + if [ $allcount -eq 0 ]; then + echo @@ Success. + rm -f $tempfile + exit 0 + elif [ $((mnow - mstart)) -gt $maxwaitsecs ]; then + echo @@ Error! Max wait of $maxwaitsecs seconds exceeded with $allcount resources remaining, including $podcount WebLogic Server pods. Giving up. Remaining resources: + cat $tempfile + rm -f $tempfile + exit $allcount + fi + + # In phase 1, set the serverStartPolicy of each domain to NEVER and then immediately + # proceed to phase 2. If there are no domains or WLS pods, we also immediately go to phase 2. + + if [ $phase -eq 1 ]; then + phase=2 + if [ $podcount -gt 0 ]; then + echo @@ "Setting serverStartPolicy to NEVER on each domain (this should cause operator(s) to initiate a controlled shutdown of the domain's pods.)" + cat $tempfile | grep "^Domain" | while read line; do + local name="`echo $line | awk '{ print $2 }'`" + local namespace="`echo $line | awk '{ print $4 }'`" + if [ "$test_mode" = "true" ]; then + echo "kubectl patch domain $name -n $namespace -p '{\"spec\":{\"serverStartPolicy\":\"NEVER\"}}' --type merge" + else + kubectl patch domain $name -n $namespace -p '{"spec":{"serverStartPolicy":"NEVER"}}' --type merge + fi + done + fi + fi + + # In phase 2, wait for the WLS pod count to go down to 0 for at most half + # of 'maxwaitsecs'. Otherwise proceed immediately to phase 3. + + if [ $phase -eq 2 ]; then + if [ $podcount -eq 0 ]; then + echo @@ All pods shutdown, about to directly delete remaining resources. + phase=3 + elif [ $((mnow - mstart)) -gt $((maxwaitsecs / 2)) ]; then + echo @@ Warning! $podcount WebLogic Server pods remaining but wait time exceeds half of max wait seconds. About to directly delete all remaining resources, including the leftover pods. + phase=3 + else + echo @@ "Waiting for operator to shutdown pods (will wait for no more than half of max wait seconds before directly deleting them)." + sleep 3 + continue + fi + fi + + # In phase 3, directly delete remaining k8s resources for the given domainUids + # (including any leftover WLS pods from previous phases). + + # for each namespace with leftover resources, try delete them + cat $tempfile | awk '{ print $4 }' | grep -v "^$" | sort -u | while read line; do + if [ "$test_mode" = "true" ]; then + echo kubectl -n $line delete $NAMESPACED_TYPES -l "$LABEL_SELECTOR" + else + kubectl -n $line delete $NAMESPACED_TYPES -l "$LABEL_SELECTOR" + fi + done + + # if there are any non-namespaced types left, try delete them + local no_namespace_count=`grep -c -v " -n " $tempfile` + if [ ! "$no_namespace_count" = "0" ]; then + if [ "$test_mode" = "true" ]; then + echo kubectl delete $NOT_NAMESPACED_TYPES -l "$LABEL_SELECTOR" + else + kubectl delete $NOT_NAMESPACED_TYPES -l "$LABEL_SELECTOR" + fi + fi + + # Delete domains, if any + cat $tempfile | grep "^Domain " | while read line; do + if [ "$test_mode" = "true" ]; then + echo kubectl delete $line + else + kubectl delete $line + fi + done + + sleep 3 + done +} + +# main entry point + +# default when to stop retrying (override via command line) +default_maxwaitsecs=120 + +# optional test mode that lists what would be deleted without +# actually deleting (override via command line) +test_mode=false + +domains="" + +# parse command line options +while getopts ":d:s:th" opt; do + case $opt in + d) domains="${OPTARG}" + ;; + + s) maxwaitsecs="${OPTARG}" + ;; + + t) test_mode="true" + ;; + + h) usage + exit 0 + ;; + + *) usage + exit 9999 + ;; + esac +done + +if [ "$domains" = "" ]; then + usage + exit 9999 +fi + +if [ ! -x "$(command -v kubectl)" ]; then + echo "@@ Error! kubectl is not installed." + exit 9999 +fi + +deleteDomains "${domains}" "${maxwaitsecs:-$default_maxwaitsecs}" + diff --git a/OracleAccessManagement/kubernetes/domain-lifecycle/README.md b/OracleAccessManagement/kubernetes/domain-lifecycle/README.md new file mode 100755 index 000000000..0eeab95d5 --- /dev/null +++ b/OracleAccessManagement/kubernetes/domain-lifecycle/README.md @@ -0,0 +1,186 @@ +### Domain life cycle sample scripts + +The operator provides sample scripts to start up or shut down a specific Managed Server or cluster in a deployed domain, or the entire deployed domain. + +**Note**: Prior to running these scripts, you must have previously created and deployed the domain. These scripts make use of [jq](https://stedolan.github.io/jq/) for processing JSON. You must have `jq 1.5 or higher` installed in order to run these scripts. See the installation options on the [jq downlod](https://stedolan.github.io/jq/download/) page. + +These scripts can be helpful when scripting the life cycle of a WebLogic Server domain. For information on how to start, stop, restart, and scale WebLogic Server instances in your domain, see [Domain Life Cycle](https://oracle.github.io/weblogic-kubernetes-operator/userguide/managing-domains/domain-lifecycle). + +#### Scripts to start and stop a WebLogic Server +The `startServer.sh` script starts a WebLogic Server in a domain. For clustered Managed Servers, either it increases the `spec.clusters[].replicas` value for the Managed Server's cluster by `1` or updates the `spec.managedServers[].serverStartPolicy` attribute of the domain resource or both as necessary. For the Administration Server, it updates the value of the `spec.adminServer.serverStartPolicy` attribute of the domain resource. For non-clustered Managed Servers, it updates the `spec.managedServers[].serverStartPolicy` attribute of the domain resource. The script provides an option to keep the `spec.clusters[].replicas` value constant for clustered servers. See the script `usage` information by using the `-h` option. + +Use the following command to start the server either by increasing the replica count or by updating the server start policy: +``` +$ startServer.sh -d domain1 -n weblogic-domain-1 -s managed-server1 +[INFO] Updating replica count for cluster 'cluster-1' to 1. +domain.weblogic.oracle/domain1 patched +[INFO] Successfully updated replica count for cluster 'cluster-1' to 1. +``` + +Use the following command to start the server without increasing the replica count: +``` +$ startServer.sh -d domain1 -n weblogic-domain-1 -s managed-server2 -k +[INFO] Patching start policy for 'managed-server2' to 'ALWAYS'. +domain.weblogic.oracle/domain1 patched +[INFO] Successfully patched server 'managed-server2' with 'ALWAYS' start policy. +``` + +The `stopServer.sh` script shuts down a running WebLogic Server in a domain. For clustered Managed Servers, either it decreases the `spec.clusters[].replicas` value for the Managed Server's cluster by `1` or updates the `spec.managedServers[].serverStartPolicy` attribute of the domain resource or both as necessary. For the Administration Server, it updates the value of the `spec.adminServer.serverStartPolicy` attribute of the domain resource. For non-clustered Managed Servers, it updates the `spec.managedServers[].serverStartPolicy` attribute of the domain resource. The script provides an option to keep the `spec.clusters[].replicas` value constant for clustered servers. See the script `usage` information by using the `-h` option. + +Use the following command to stop the server either by decreasing the replica count or by updating the server start policy: +``` +$ stopServer.sh -d domain1 -n weblogic-domain-1 -s managed-server1 +[INFO] Updating replica count for cluster cluster-1 to 0. +domain.weblogic.oracle/domain1 patched +[INFO] Successfully updated replica count for cluster 'cluster-1' to 0. +``` + +Use the following command to stop the server without decreasing the replica count: +``` +$ stopServer.sh -d domain1 -n weblogic-domain-1 -s managed-server2 -k +[INFO] Unsetting the current start policy 'ALWAYS' for 'managed-server2'. +domain.weblogic.oracle/domain1 patched +[INFO] Successfully unset policy 'ALWAYS'. +``` + +### Scripts to start and stop a cluster + +The `startCluster.sh` script starts a cluster by patching the `spec.clusters[].serverStartPolicy` attribute of the domain resource to `IF_NEEDED`. The operator will start the WebLogic Server instance Pods that are part of the cluster after the `serverStartPolicy` attribute is updated to `IF_NEEDED`. See the script `usage` information by using the `-h` option. +``` +$ startCluster.sh -d domain1 -n weblogic-domain-1 -c cluster-1 +[INFO]Patching start policy of cluster 'cluster-1' from 'NEVER' to 'IF_NEEDED'. +domain.weblogic.oracle/domain1 patched +[INFO] Successfully patched cluster 'cluster-1' with 'IF_NEEDED' start policy!. +``` +The `stopCluster.sh` script shuts down a cluster by patching the `spec.clusters[].serverStartPolicy` attribute of the domain resource to `NEVER`. The operator will shut down the WebLogic Server instance Pods that are part of the cluster after the `serverStartPolicy` attribute is updated to `NEVER`. See the script `usage` information by using the `-h` option. +``` +$ stopCluster.sh -d domain1 -n weblogic-domain-1 -c cluster-1 +[INFO] Patching start policy of cluster 'cluster-1' from 'IF_NEEDED' to 'NEVER'. +domain.weblogic.oracle/domain1 patched +[INFO] Successfully patched cluster 'cluster-1' with 'NEVER' start policy! +``` +### Scripts to start and stop a domain +The `startDomain.sh` script starts a deployed domain by patching the `spec.serverStartPolicy` attribute of the domain resource to `IF_NEEDED`. The operator will start the WebLogic Server instance Pods that are part of the domain after the `spec.serverStartPolicy` attribute of the domain resource is updated to `IF_NEEDED`. See the script `usage` information by using the `-h` option. +``` +$ startDomain.sh -d domain1 -n weblogic-domain-1 +[INFO] Patching domain 'domain1' from serverStartPolicy='NEVER' to 'IF_NEEDED'. +domain.weblogic.oracle/domain1 patched +[INFO] Successfully patched domain 'domain1' in namespace 'weblogic-domain-1' with 'IF_NEEDED' start policy! +``` + +The `stopDomain.sh` script shuts down a domain by patching the `spec.serverStartPolicy` attribute of the domain resource to `NEVER`. The operator will shut down the WebLogic Server instance Pods that are part of the domain after the `spec.serverStartPolicy` attribute is updated to `NEVER`. See the script `usage` information by using the `-h` option. +``` +$ stopDomain.sh -d domain1 -n weblogic-domain-1 +[INFO] Patching domain 'domain1' in namespace 'weblogic-domain-1' from serverStartPolicy='IF_NEEDED' to 'NEVER'. +domain.weblogic.oracle/domain1 patched +[INFO] Successfully patched domain 'domain1' in namespace 'weblogic-domain-1' with 'NEVER' start policy! +``` + +### Script to scale a WebLogic cluster + +The `scaleCluster.sh` script scales a WebLogic cluster by patching the `spec.clusters[].replicas` attribute of the domain resource to the specified value. The operator will perform the scaling operation for the WebLogic cluster based on the specified value of the `replicas` attribute after its value is updated. See the script `usage` information by using the `-h` option. +``` +$ scaleCluster.sh -d domain1 -n weblogic-domain-1 -c cluster-1 -r 3 +[2021-02-26T19:04:14.335000Z][INFO] Patching replicas for cluster 'cluster-1' to '3'. +domain.weblogic.oracle/domain1 patched +[2021-02-26T19:04:14.466000Z][INFO] Successfully patched replicas for cluster 'cluster-1'! +``` + +### Script to view the status of a WebLogic cluster + +The `clusterStatus.sh` script can be used to view the status of a WebLogic cluster in the WebLogic domain managed by the operator. The WebLogic Cluster Status contains information about the minimum, maximum, goal, current, and ready replica count for a WebLogic cluster. This script displays a table containing the status for WebLogic clusters in one or more domains across one or more namespaces. See the script `usage` information by using the `-h` option. + +Use the following command to view the status of all WebLogic clusters in all domains across all namespaces. +```shell +$ clusterStatus.sh + +WebLogic Cluster Status -n "" -d "" -c "": + +namespace domain cluster min max goal current ready +--------- ------ ------- --- --- ---- ------- ----- +ns-kvmt mii-domain1 cluster-1 1 5 5 5 5 +weblogic-domain-1 domain1 cluster-1 0 4 2 2 2 +weblogic-domain-1 domain1 cluster-2 0 4 0 0 0 +``` + +Use the following command to view the status of all WebLogic clusters in 'domain1' in 'weblogic-domain-1' namespace. +``` +$ clusterStatus.sh -d domain1 -n weblogic-domain-1 + +WebLogic Cluster Status -n "weblogic-domain-1" -d "domain1" -c "": + +namespace domain cluster min max goal current ready +--------- ------ ------- --- --- ---- ------- ----- +weblogic-domain-1 domain1 cluster-1 0 4 2 2 2 +weblogic-domain-1 domain1 cluster-2 0 4 0 0 0 +``` + +### Scripts to initiate a rolling restart of a WebLogic domain or cluster + +The `rollDomain.sh` script can be used to initiate a rolling restart of the WebLogic Server Pods in a domain managed by the operator. Similarly, the `rollCluster.sh` script can be used to initiate a rolling restart of the WebLogic Server Pods belonging to a WebLogic cluster in a domain managed by the operator. + +The `rollDomain.sh` script updates the value of the `spec.restartVersion` attribute of the domain resource. Then, the operator will do a rolling restart of the Server Pods in the WebLogic domain after the value of the `spec.restartVersion` is updated. You can provide the new value for `spec.restartVersion` as a parameter to the script or the script will automatically generate a new value to trigger the rolling restart. See the script `usage` information by using the `-h` option. + +``` +$ rollDomain.sh -d domain1 -n weblogic-domain-1 +[2021-03-24T04:01:19.733000Z][INFO] Patching restartVersion for domain 'domain1' to '1'. +domain.weblogic.oracle/domain1 patched +[2021-03-24T04:01:19.850000Z][INFO] Successfully patched restartVersion for domain 'domain1'! +``` + +Use the following command to roll the Server Pods in a WebLogic domain with a specific `restartVersion`: +``` +$ rollDomain.sh -r v1 -d domain1 -n weblogic-domain-1 +[2021-03-24T13:43:47.586000Z][INFO] Patching restartVersion for domain 'domain1' to 'v1'. +domain.weblogic.oracle/domain1 patched +[2021-03-24T13:43:47.708000Z][INFO] Successfully patched restartVersion for domain 'domain1'! +``` + +The `rollCluster.sh` script updates the value of the `spec.clusters[].restartVersion` attribute of the domain resource. Then, the operator will do a rolling restart of the WebLogic cluster Server Pods after the value of the `spec.clusters[].restartVersion` is updated. You can provide the new value of the `restartVersion` as a parameter to the script or the script will automatically generate a new value to trigger the rolling restart. See the script `usage` information by using the `-h` option. + +``` +$ rollCluster.sh -c cluster-1 -d domain1 -n weblogic-domain-1 +[2021-03-24T04:03:27.521000Z][INFO] Patching restartVersion for cluster 'cluster-1' to '2'. +domain.weblogic.oracle/domain1 patched +[2021-03-24T04:03:27.669000Z][INFO] Successfully patched restartVersion for cluster 'cluster-1'! +``` + +Use the following command to roll the WebLogic Cluster Servers with a specific `restartVersion`: +``` +$ rollCluster.sh -r v2 -c cluster-1 -d domain1 -n weblogic-domain-1 +[2021-03-24T13:46:16.833000Z][INFO] Patching restartVersion for cluster 'cluster-1' to 'v2'. +domain.weblogic.oracle/domain1 patched +[2021-03-24T13:46:16.975000Z][INFO] Successfully patched restartVersion for cluster 'cluster-1'! +``` + +### Scripts to restart a WebLogic Server in a domain +The `restartServer.sh` script can be used to restart a WebLogic Server in a domain. This script restarts the Server by deleting the Server Pod for the WebLogic Server instance. +``` +$ restartServer.sh -s managed-server1 -d domain1 -n weblogic-domain-1 +[2021-03-24T22:20:22.498000Z][INFO] Initiating restart of 'managed-server1' by deleting server pod 'domain1-managed-server1'. +[2021-03-24T22:20:37.614000Z][INFO] Server restart succeeded ! +``` + +### Scripts to explicitly initiate introspection of a WebLogic domain + +The `introspectDomain.sh` script can be used to rerun a WebLogic domain's introspect job by explicitly initiating the introspection. This script updates the value of the `spec.introspectVersion` attribute of the domain resource. The resulting behavior depends on your domain home source type and other factors, see [Initiating introspection](https://oracle.github.io/weblogic-kubernetes-operator/userguide/managing-domains/domain-lifecycle/introspection/#initiating-introspection) for details. You can provide the new value of the `introspectVersion` as a parameter to the script or the script will automatically generate a new value to trigger the introspection. See the script `usage` information by using the `-h` option. + +Use the following command to rerun a domain's introspect job with the `introspectVersion` value generated by the script. +``` +$ introspectDomain.sh -d domain1 -n weblogic-domain-1 +[2021-03-24T21:37:55.989000Z][INFO] Patching introspectVersion for domain 'domain1' to '1'. +domain.weblogic.oracle/domain1 patched +[2021-03-24T21:37:56.110000Z][INFO] Successfully patched introspectVersion for domain 'domain1'! +``` + +Use the following command to rerun a domain's introspect job with a specific `introspectVersion` value. +``` +$ introspectDomain.sh -i v1 -d domain1 -n weblogic-domain-1 +[2021-03-24T21:38:34.369000Z][INFO] Patching introspectVersion for domain 'domain1' to 'v1'. +domain.weblogic.oracle/domain1 patched +[2021-03-24T21:38:34.488000Z][INFO] Successfully patched introspectVersion for domain 'domain1'! +``` + +### Watching the Pods after executing life cycle scripts + +After executing the lifecycle scripts described above for a domain or a cluster or a Server, you can manually run the `kubectl -n MYNS get pods --watch=true --show-labels` command to watch the effect of running the scripts and monitor the status and labels of various Pods. You will need to do 'Ctrl-C' to stop watching the Pods and exit. diff --git a/OracleAccessManagement/kubernetes/domain-lifecycle/clusterStatus.sh b/OracleAccessManagement/kubernetes/domain-lifecycle/clusterStatus.sh new file mode 100755 index 000000000..8bfeb45f3 --- /dev/null +++ b/OracleAccessManagement/kubernetes/domain-lifecycle/clusterStatus.sh @@ -0,0 +1,130 @@ +# !/bin/sh +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +set -eu +set -o pipefail + +function usage() { +cat< ${kubernetesCli} patch domain ${domainUid} \ + -n ${domainNamespace} --type=merge --patch \"${patchJson}\"" + fi + ${kubernetesCli} patch domain ${domainUid} -n ${domainNamespace} --type=merge --patch "${patchJson}" +} + +# timestamp +# purpose: echo timestamp in the form yyyy-mm-ddThh:mm:ss.nnnnnnZ +# example: 2018-10-01T14:00:00.000001Z +function timestamp() { + local timestamp="`date --utc '+%Y-%m-%dT%H:%M:%S.%NZ' 2>&1`" + if [ ! "${timestamp/illegal/xyz}" = "${timestamp}" ]; then + # old shell versions don't support %N or --utc + timestamp="`date -u '+%Y-%m-%dT%H:%M:%S.000000Z' 2>&1`" + fi + echo "${timestamp}" +} + +# +# Function to note that a validate error has occurred +# +function validationError { + printError $* + validateErrors=true +} + +# +# Function to cause the script to fail if there were any validation errors +# +function failIfValidationErrors { + if [ "$validateErrors" = true ]; then + printError 'The errors listed above must be resolved before the script can continue. Please see usage information below.' + usage 1 + fi +} + +# +# Function to lowercase a value and make it a legal DNS1123 name +# $1 - value to convert to DNS legal name +# $2 - return value containing DNS legal name. +function toDNS1123Legal { + local name=$1 + local __result=$2 + local val=`echo "${name}" | tr "[:upper:]" "[:lower:]"` + val=${val//"_"/"-"} + eval $__result="'$val'" +} + diff --git a/OracleAccessManagement/kubernetes/domain-lifecycle/introspectDomain.sh b/OracleAccessManagement/kubernetes/domain-lifecycle/introspectDomain.sh new file mode 100755 index 000000000..120eccec7 --- /dev/null +++ b/OracleAccessManagement/kubernetes/domain-lifecycle/introspectDomain.sh @@ -0,0 +1,105 @@ +# !/bin/sh +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# + +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" +source ${scriptDir}/helper.sh +if [ "${debug}" == "true" ]; then set -x; fi; + +function usage() { + + cat << EOF + + This script initiates introspection of a WebLogic domain by updating + the value of 'spec.introspectVersion' attribute of the domain resource. + + Usage: + + $(basename $0) [-n mynamespace] [-d mydomainuid] [-i introspectVersion] [-m kubecli] + + -d : Domain unique-id. Default is 'sample-domain1'. + + -n : Domain namespace. Default is 'sample-domain1-ns'. + + -i : Introspect version. If this parameter is not provided, + then the script will generate the 'introspectVersion' by + incrementing the existing value. If the 'spec.introspectVersion' + doesn't exist or its value is non-numeric, then the script + will set the 'spec.introspectVersion' value to '1'. + + -m : Kubernetes command line interface. Default is 'kubectl' + if KUBERNETES_CLI env variable is not set. Otherwise + the default is the value of KUBERNETES_CLI env variable. + + -v : Enables verbose mode. Default is 'false'. + + -h : This help. + +EOF +exit $1 +} + +kubernetesCli=${KUBERNETES_CLI:-kubectl} +clusterName="" +domainUid="sample-domain1" +domainNamespace="sample-domain1-ns" +verboseMode=false +patchJson="" +introspectVersion="" + +while getopts "vc:n:m:d:i:h" opt; do + case $opt in + n) domainNamespace="${OPTARG}" + ;; + d) domainUid="${OPTARG}" + ;; + i) introspectVersion="${OPTARG}" + ;; + m) kubernetesCli="${OPTARG}" + ;; + v) verboseMode=true; + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +set -eu + +# +# Function to perform validations, read files and initialize workspace +# +function initialize { + + validateErrors=false + + validateKubernetesCliAvailable + validateJqAvailable + + failIfValidationErrors +} + +initialize + +# Get the domain in json format +domainJson=$(${kubernetesCli} get domain ${domainUid} -n ${domainNamespace} -o json --ignore-not-found) +if [ -z "${domainJson}" ]; then + printError "Unable to get domain resource for domain '${domainUid}' in namespace '${domainNamespace}'. Please make sure the 'domain_uid' and 'namespace' specified by the '-d' and '-n' arguments are correct. Exiting." + exit 1 +fi + +# if the introspectVersion is not provided, generate the value of introspectVersion +if [ -z "${introspectVersion}" ]; then + generateDomainIntrospectVersion "${domainJson}" introspectVersion +fi + +printInfo "Patching introspectVersion for domain '${domainUid}' to '${introspectVersion}'." +createPatchJsonToUpdateDomainIntrospectVersion "${introspectVersion}" patchJson + +executePatchCommand "${kubernetesCli}" "${domainUid}" "${domainNamespace}" "${patchJson}" "${verboseMode}" + +printInfo "Successfully patched introspectVersion for domain '${domainUid}'!" diff --git a/OracleAccessManagement/kubernetes/domain-lifecycle/restartServer.sh b/OracleAccessManagement/kubernetes/domain-lifecycle/restartServer.sh new file mode 100755 index 000000000..0c7405aba --- /dev/null +++ b/OracleAccessManagement/kubernetes/domain-lifecycle/restartServer.sh @@ -0,0 +1,106 @@ +# !/bin/sh +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# + +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" +source ${scriptDir}/helper.sh +if [ "${debug}" == "true" ]; then set -x; fi; +set -eu + +function usage() { + + cat << EOF + + This script restarts a running WebLogic server in a domain by deleting the server pod. + + Usage: + + $(basename $0) -s myserver [-n mynamespace] [-d mydomainuid] [-m kubecli] + + -s : The WebLogic server name (not the pod name). + This parameter is required. + + -d : Domain unique-id. Default is 'sample-domain1'. + + -n : Domain namespace. Default is 'sample-domain1-ns'. + + -m : Kubernetes command line interface. Default is 'kubectl' + if KUBERNETES_CLI env variable is not set. Otherwise the + default is the value of KUBERNETES_CLI env variable. + + -h : This help. + +EOF +exit $1 +} + +kubernetesCli=${KUBERNETES_CLI:-kubectl} +serverName="" +clusterName="" +domainUid="sample-domain1" +domainNamespace="sample-domain1-ns" +podName="" +legalDNSPodName="" + +while getopts "s:m:n:d:h" opt; do + case $opt in + s) serverName="${OPTARG}" + ;; + n) domainNamespace="${OPTARG}" + ;; + m) kubernetesCli="${OPTARG}" + ;; + d) domainUid="${OPTARG}" + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +# +# Function to perform validations, read files and initialize workspace +# +function initialize { + + validateErrors=false + + validateKubernetesCliAvailable + validateJqAvailable + + # Validate that server name parameter is specified. + if [ -z "${serverName}" ]; then + validationError "Please specify the server name using '-s' parameter e.g. '-s managed-server1'." + fi + + failIfValidationErrors +} + +initialize + +# Get the domain in json format +domainJson=$(${kubernetesCli} get domain ${domainUid} -n ${domainNamespace} -o json --ignore-not-found) +if [ -z "${domainJson}" ]; then + printError "Unable to get domain resource for domain '${domainUid}' in namespace '${domainNamespace}'. Please make sure the 'domain_uid' and 'namespace' specified by the '-d' and '-n' arguments are correct. Exiting." + exit 1 +fi + +# Validate that specified server is either part of a cluster or is an independent managed server +validateServerAndFindCluster "${domainUid}" "${domainNamespace}" "${serverName}" isValidServer clusterName isAdminServer +if [ "${isValidServer}" != 'true' ]; then + printError "Server ${serverName} is not part of any cluster and it's not an independent managed server. Please make sure that server name specified is correct." + exit 1 +fi + +podName=${domainUid}-${serverName} +toDNS1123Legal ${podName} legalDNSPodName +printInfo "Initiating restart of '${serverName}' by deleting server pod '${legalDNSPodName}'." +result=$(${kubernetesCli} -n ${domainNamespace} delete pod ${legalDNSPodName} --ignore-not-found) +if [ -z "${result}" ]; then + printError "Server '${serverName}' is not running." +else + printInfo "Server restart succeeded !" +fi diff --git a/OracleAccessManagement/kubernetes/domain-lifecycle/rollCluster.sh b/OracleAccessManagement/kubernetes/domain-lifecycle/rollCluster.sh new file mode 100755 index 000000000..858e41706 --- /dev/null +++ b/OracleAccessManagement/kubernetes/domain-lifecycle/rollCluster.sh @@ -0,0 +1,123 @@ +# !/bin/sh +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# + +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" +source ${scriptDir}/helper.sh +if [ "${debug}" == "true" ]; then set -x; fi; + +function usage() { + + cat << EOF + + This script initiates a rolling restart of the WebLogic cluster server pods in a domain by updating + the value of the 'spec.clusters[].restartVersion' attribute of the domain resource. + + Usage: + + $(basename $0) -c mycluster [-n mynamespace] [-d mydomainuid] [-r restartVersion] [-m kubecli] + + -c : Cluster name (required parameter). + + -d : Domain unique-id. Default is 'sample-domain1'. + + -n : Domain namespace. Default is 'sample-domain1-ns'. + + -r : Restart version. If this parameter is not provided, + then the script will generate the 'restartVersion' + value of the cluster by incrementing the existing + value. If the 'restartVersion' value doesn't exist + for the cluster then it will use the incremented value of + domain 'restartVersion'. If the domain 'restartVersion' also + doesn't exist or effective value is non-numeric, then + the script will set the 'restartVersion' value to '1'. + + -m : Kubernetes command line interface. Default is 'kubectl' + if KUBERNETES_CLI env variable is not set. Otherwise + the default is the value of the KUBERNETES_CLI env variable. + + -v : Enables verbose mode. Default is 'false'. + + -h : This help. + +EOF +exit $1 +} + +kubernetesCli=${KUBERNETES_CLI:-kubectl} +clusterName="" +domainUid="sample-domain1" +domainNamespace="sample-domain1-ns" +verboseMode=false +patchJson="" +restartVersion="" + +while getopts "vc:n:m:d:r:h" opt; do + case $opt in + c) clusterName="${OPTARG}" + ;; + n) domainNamespace="${OPTARG}" + ;; + d) domainUid="${OPTARG}" + ;; + r) restartVersion="${OPTARG}" + ;; + m) kubernetesCli="${OPTARG}" + ;; + v) verboseMode=true; + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +set -eu + +# +# Function to perform validations, read files and initialize workspace +# +function initialize { + + validateErrors=false + + validateKubernetesCliAvailable + validateJqAvailable + + if [ -z "${clusterName}" ]; then + validationError "Please specify cluster name using '-c' parameter e.g. '-c cluster-1'." + fi + + failIfValidationErrors +} + +initialize + +# Get the domain in json format +domainJson=$(${kubernetesCli} get domain ${domainUid} -n ${domainNamespace} -o json --ignore-not-found) +if [ -z "${domainJson}" ]; then + printError "Unable to get domain resource for domain '${domainUid}' in namespace '${domainNamespace}'. Please make sure the 'domain_uid' and 'namespace' specified by the '-d' and '-n' arguments are correct. Exiting." + exit 1 +fi + +isValidCluster="" +validateClusterName "${domainUid}" "${domainNamespace}" "${clusterName}" isValidCluster +if [ "${isValidCluster}" != 'true' ]; then + printError "cluster ${clusterName} is not part of domain ${domainUid} in namespace ${domainNamespace}. Please make sure that cluster name is correct." + exit 1 +fi + +# if the restartVersion is not provided, generate the value of restartVersion +if [ -z "${restartVersion}" ]; then + generateClusterRestartVersion "${domainJson}" "${clusterName}" restartVersion +fi + +printInfo "Patching restartVersion for cluster '${clusterName}' to '${restartVersion}'." +createPatchJsonToUpdateClusterRestartVersion "${domainJson}" "${clusterName}" "${restartVersion}" patchJson + +executePatchCommand "${kubernetesCli}" "${domainUid}" "${domainNamespace}" "${patchJson}" "${verboseMode}" + +printInfo "Successfully patched restartVersion for cluster '${clusterName}'!" diff --git a/OracleAccessManagement/kubernetes/domain-lifecycle/rollDomain.sh b/OracleAccessManagement/kubernetes/domain-lifecycle/rollDomain.sh new file mode 100755 index 000000000..4c821d8c8 --- /dev/null +++ b/OracleAccessManagement/kubernetes/domain-lifecycle/rollDomain.sh @@ -0,0 +1,105 @@ +# !/bin/sh +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# + +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" +source ${scriptDir}/helper.sh +if [ "${debug}" == "true" ]; then set -x; fi; + +function usage() { + + cat << EOF + + This script initiates a rolling restart of pods in a WebLogic domain by updating + the value of the 'spec.restartVersion' attribute of the domain resource. + + Usage: + + $(basename $0) [-n mynamespace] [-d mydomainuid] [-r restartVersion] [-m kubecli] + + -d : Domain unique-id. Default is 'sample-domain1'. + + -n : Domain namespace. Default is 'sample-domain1-ns'. + + -r : Restart version. If this parameter is not provided, + then the script will generate the 'restartVersion' by + incrementing the existing value. If the 'spec.restartVersion' + doesn't exist or its value is non-numeric, then the script + will set the 'spec.restartVersion' value to '1'. + + -m : Kubernetes command line interface. Default is 'kubectl' + if KUBERNETES_CLI env variable is not set. Otherwise + the default is the value of the KUBERNETES_CLI env variable. + + -v : Enables verbose mode. Default is 'false'. + + -h : This help. + +EOF +exit $1 +} + +kubernetesCli=${KUBERNETES_CLI:-kubectl} +clusterName="" +domainUid="sample-domain1" +domainNamespace="sample-domain1-ns" +verboseMode=false +patchJson="" +restartVersion="" + +while getopts "vc:n:m:d:r:h" opt; do + case $opt in + n) domainNamespace="${OPTARG}" + ;; + d) domainUid="${OPTARG}" + ;; + r) restartVersion="${OPTARG}" + ;; + m) kubernetesCli="${OPTARG}" + ;; + v) verboseMode=true; + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +set -eu + +# +# Function to perform validations, read files and initialize workspace +# +function initialize { + + validateErrors=false + + validateKubernetesCliAvailable + validateJqAvailable + + failIfValidationErrors +} + +initialize + +# Get the domain in json format +domainJson=$(${kubernetesCli} get domain ${domainUid} -n ${domainNamespace} -o json --ignore-not-found) +if [ -z "${domainJson}" ]; then + printError "Unable to get domain resource for domain '${domainUid}' in namespace '${domainNamespace}'. Please make sure the 'domain_uid' and 'namespace' specified by the '-d' and '-n' arguments are correct. Exiting." + exit 1 +fi + +# if the restartVersion is not provided, generate the value of restartVersion +if [ -z "${restartVersion}" ]; then + generateDomainRestartVersion "${domainJson}" restartVersion +fi + +printInfo "Patching restartVersion for domain '${domainUid}' to '${restartVersion}'." +createPatchJsonToUpdateDomainRestartVersion "${restartVersion}" patchJson + +executePatchCommand "${kubernetesCli}" "${domainUid}" "${domainNamespace}" "${patchJson}" "${verboseMode}" + +printInfo "Successfully patched restartVersion for domain '${domainUid}'!" diff --git a/OracleAccessManagement/kubernetes/domain-lifecycle/scaleCluster.sh b/OracleAccessManagement/kubernetes/domain-lifecycle/scaleCluster.sh new file mode 100755 index 000000000..947dd871c --- /dev/null +++ b/OracleAccessManagement/kubernetes/domain-lifecycle/scaleCluster.sh @@ -0,0 +1,122 @@ +# !/bin/sh +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# + +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" +source ${scriptDir}/helper.sh +if [ "${debug}" == "true" ]; then set -x; fi; + +function usage() { + + cat << EOF + + This script scales a WebLogic cluster in a domain by patching the + 'spec.clusters[].replicas' attribute of the domain + resource. This change will cause the operator to perform a scaling + operation for the WebLogic cluster based on the value of replica count. + + Usage: + + $(basename $0) -c mycluster -r replicas [-n mynamespace] [-d mydomainuid] [-m kubecli] + + -c : Cluster name parameter is required. + + -r : Replica count, parameter is required. + + -d : Domain unique-id. Default is 'sample-domain1'. + + -n : Domain namespace. Default is 'sample-domain1-ns'. + + -m : Kubernetes command line interface. Default is 'kubectl' if KUBERNETES_CLI env + variable is not set. Otherwise default is the value of KUBERNETES_CLI env variable. + + -v : Enables verbose mode. Default is 'false'. + + -h : This help. + +EOF +exit $1 +} + +kubernetesCli=${KUBERNETES_CLI:-kubectl} +clusterName="" +domainUid="sample-domain1" +domainNamespace="sample-domain1-ns" +verboseMode=false +patchJson="" +replicas="" + +while getopts "vc:n:m:d:r:h" opt; do + case $opt in + c) clusterName="${OPTARG}" + ;; + n) domainNamespace="${OPTARG}" + ;; + d) domainUid="${OPTARG}" + ;; + r) replicas="${OPTARG}" + ;; + m) kubernetesCli="${OPTARG}" + ;; + v) verboseMode=true; + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +set -eu + +# +# Function to perform validations, read files and initialize workspace +# +function initialize { + + validateErrors=false + + validateKubernetesCliAvailable + validateJqAvailable + + if [ -z "${clusterName}" ]; then + validationError "Please specify cluster name using '-c' parameter e.g. '-c cluster-1'." + fi + + if [ -z "${replicas}" ]; then + validationError "Please specify replica count using '-r' parameter e.g. '-r 3'." + fi + + failIfValidationErrors +} + +initialize + +# Get the domain in json format +domainJson=$(${kubernetesCli} get domain ${domainUid} -n ${domainNamespace} -o json --ignore-not-found) +if [ -z "${domainJson}" ]; then + printError "Unable to get domain resource for domain '${domainUid}' in namespace '${domainNamespace}'. Please make sure the 'domain_uid' and 'namespace' specified by the '-d' and '-n' arguments are correct. Exiting." + exit 1 +fi + +isValidCluster="" +validateClusterName "${domainUid}" "${domainNamespace}" "${clusterName}" isValidCluster +if [ "${isValidCluster}" != 'true' ]; then + printError "cluster ${clusterName} is not part of domain ${domainUid} in namespace ${domainNamespace}. Please make sure that cluster name is correct." + exit 1 +fi + +isReplicasInAllowedRange "${domainJson}" "${clusterName}" "${replicas}" replicasInAllowedRange range +if [ "${replicasInAllowedRange}" == 'false' ]; then + printError "Replicas value is not in the allowed range of ${range}. Exiting." + exit 1 +fi + +printInfo "Patching replicas for cluster '${clusterName}' to '${replicas}'." +createPatchJsonToUpdateReplicas "${domainJson}" "${clusterName}" "${replicas}" patchJson + +executePatchCommand "${kubernetesCli}" "${domainUid}" "${domainNamespace}" "${patchJson}" "${verboseMode}" + +printInfo "Successfully patched replicas for cluster '${clusterName}'!" diff --git a/OracleAccessManagement/kubernetes/domain-lifecycle/startCluster.sh b/OracleAccessManagement/kubernetes/domain-lifecycle/startCluster.sh new file mode 100755 index 000000000..5c8bf034c --- /dev/null +++ b/OracleAccessManagement/kubernetes/domain-lifecycle/startCluster.sh @@ -0,0 +1,129 @@ +# !/bin/sh +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# + +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" +source ${scriptDir}/helper.sh +if [ "${debug}" == "true" ]; then set -x; fi; + +function usage() { + + cat << EOF + + This script starts a WebLogic cluster in a domain by patching + 'spec.clusters[].serverStartPolicy' attribute of the domain + resource to 'IF_NEEDED'. This change will cause the operator to initiate + startup of cluster's WebLogic server instance pods if the pods are not + already running and the spec.replicas or + 'spec.clusters[].serverStartPolicy' is set higher than zero. + + Usage: + + $(basename $0) -c mycluster [-n mynamespace] [-d mydomainuid] [-m kubecli] + + -c : Cluster name (required parameter). + + -d : Domain unique-id. Default is 'sample-domain1'. + + -n : Domain namespace. Default is 'sample-domain1-ns'. + + -m : Kubernetes command line interface. Default is 'kubectl' if KUBERNETES_CLI env + variable is not set. Otherwise default is the value of KUBERNETES_CLI env variable. + + -v : Enables verbose mode. Default is 'false'. + + -h : This help. + +EOF +exit $1 +} + +set -eu + +kubernetesCli=${KUBERNETES_CLI:-kubectl} +clusterName="" +domainUid="sample-domain1" +domainNamespace="sample-domain1-ns" +verboseMode=false +patchJson="" + +while getopts "vc:n:m:d:h" opt; do + case $opt in + c) clusterName="${OPTARG}" + ;; + n) domainNamespace="${OPTARG}" + ;; + d) domainUid="${OPTARG}" + ;; + m) kubernetesCli="${OPTARG}" + ;; + v) verboseMode=true; + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +# +# Function to perform validations, read files and initialize workspace +# +function initialize { + + validateErrors=false + + validateKubernetesCliAvailable + validateJqAvailable + + if [ -z "${clusterName}" ]; then + validationError "Please specify cluster name using '-c' parameter e.g. '-c cluster-1'." + fi + + failIfValidationErrors + +} + +initialize + +# Get the domain in json format +domainJson=$(${kubernetesCli} get domain ${domainUid} -n ${domainNamespace} -o json --ignore-not-found) +if [ -z "${domainJson}" ]; then + printError "Unable to get domain resource for domain '${domainUid}' in namespace '${domainNamespace}'. Please make sure the 'domain_uid' and 'namespace' specified by the '-d' and '-n' arguments are correct. Exiting." + exit 1 +fi + +isValidCluster="" +validateClusterName "${domainUid}" "${domainNamespace}" "${clusterName}" isValidCluster +if [ "${isValidCluster}" != 'true' ]; then + printError "cluster ${clusterName} is not part of domain ${domainUid} in namespace ${domainNamespace}. Please make sure that cluster name is correct." + exit 1 +fi + +getDomainPolicy "${domainJson}" domainStartPolicy +# Fail if effective start policy of domain is NEVER or ADMIN_ONLY +if [[ "${domainStartPolicy}" == 'NEVER' || "${domainStartPolicy}" == 'ADMIN_ONLY' ]]; then + printError "Cannot start cluster '${clusterName}', the domain is configured with a 'spec.serverStartPolicy' attribute on the domain resource of 'NEVER' or 'ADMIN_ONLY'." + exit 1 +fi + +# Get server start policy for this cluster +getClusterPolicy "${domainJson}" "${clusterName}" startPolicy +if [ -z "${startPolicy}" ]; then + startPolicy=${domainStartPolicy} +fi + +if [ "${startPolicy}" == 'IF_NEEDED' ]; then + printInfo "No changes needed, exiting. The cluster '${clusterName}' is already started or starting. The effective value of 'spec.clusters[?(clusterName=\"${clusterName}\"].serverStartPolicy' attribute on the domain resource is 'IF_NEEDED'." + exit 0 +fi + +# Set policy value to IF_NEEDED +printInfo "Patching start policy of cluster '${clusterName}' from '${startPolicy}' to 'IF_NEEDED'." +createPatchJsonToUpdateClusterPolicy "${domainJson}" "${clusterName}" "IF_NEEDED" patchJson + +executePatchCommand "${kubernetesCli}" "${domainUid}" "${domainNamespace}" "${patchJson}" "${verboseMode}" + +printInfo "Successfully patched cluster '${clusterName}' with 'IF_NEEDED' start policy!." diff --git a/OracleAccessManagement/kubernetes/domain-lifecycle/startDomain.sh b/OracleAccessManagement/kubernetes/domain-lifecycle/startDomain.sh new file mode 100755 index 000000000..fea9cbbe5 --- /dev/null +++ b/OracleAccessManagement/kubernetes/domain-lifecycle/startDomain.sh @@ -0,0 +1,97 @@ +# !/bin/sh +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# + +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" +source ${scriptDir}/helper.sh + +function usage() { + + cat << EOF + + This script starts a deployed WebLogic domain by patching 'spec.serverStartPolicy' + attribute of the domain resource to 'IF_NEEDED'. This change will cause the operator + to initiate startup of domain's WebLogic server instance pods if the pods are not + already running. + + Usage: + + $(basename $0) [-n mynamespace] [-d mydomainuid] [-m kubecli] + + -d : Domain unique-id. Default is 'sample-domain1'. + + -n : Domain namespace. Default is 'sample-domain1-ns'. + + -m : Kubernetes command line interface. Default is 'kubectl' if KUBERNETES_CLI env + variable is not set. Otherwise default is the value of KUBERNETES_CLI env variable. + + -v : Enables verbose mode. Default is 'false'. + + -h : This help. + +EOF +exit $1 +} + +kubernetesCli=${KUBERNETES_CLI:-kubectl} +domainUid="sample-domain1" +domainNamespace="sample-domain1-ns" +verboseMode=false + +while getopts "vn:d:m:h" opt; do + case $opt in + n) domainNamespace="${OPTARG}" + ;; + d) domainUid="${OPTARG}" + ;; + m) kubernetesCli="${OPTARG}" + ;; + v) verboseMode=true; + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + + +set -eu +set -o pipefail + +function initialize { + + validateErrors=false + + validateKubernetesCliAvailable + validateJqAvailable + + failIfValidationErrors +} + +initialize + +# Get the domain in json format +domainJson=$(${kubernetesCli} get domain ${domainUid} -n ${domainNamespace} -o json --ignore-not-found) + +if [ -z "${domainJson}" ]; then + printError "Domain resource for domain '${domainUid}' not found in namespace '${domainNamespace}'. Exiting." + exit 1 +fi + +getDomainPolicy "${domainJson}" serverStartPolicy + +if [ "${serverStartPolicy}" == 'IF_NEEDED' ]; then + printInfo "No changes needed, exiting. The domain '${domainUid}' is already started or starting. The effective value of 'spec.serverStartPolicy' attribute on the domain resource is 'IF_NEEDED'." + exit 0 +fi + +printInfo "Patching domain '${domainUid}' from serverStartPolicy='${serverStartPolicy}' to 'IF_NEEDED'." + +createPatchJsonToUpdateDomainPolicy "IF_NEEDED" patchJson + +executePatchCommand "${kubernetesCli}" "${domainUid}" "${domainNamespace}" "${patchJson}" "${verboseMode}" + +printInfo "Successfully patched domain '${domainUid}' in namespace '${domainNamespace}' with 'IF_NEEDED' start policy!" diff --git a/OracleAccessManagement/kubernetes/domain-lifecycle/startServer.sh b/OracleAccessManagement/kubernetes/domain-lifecycle/startServer.sh new file mode 100755 index 000000000..37b120d71 --- /dev/null +++ b/OracleAccessManagement/kubernetes/domain-lifecycle/startServer.sh @@ -0,0 +1,242 @@ +# !/bin/sh +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# + +# This script starts a WebLogic managed server in a domain. +# Internal code notes :- +# - If server start policy is ALWAYS or policy is IF_NEEDED and the server is selected +# to start based on the replica count, it means that server is already started or is +# in the process of starting. In this case, script exits without making any changes. +# +# - If start policy of servers parent cluster or domain is 'NEVER', script +# fails as server can't be started. +# +# - If server is part of a cluster and keep_replica_constant option is false (the default) +# and the effective start policy of the server is IF_NEEDED and increasing replica count +# will naturally start the server, the script increases the replica count. +# +# - If server is part of a cluster and keep_replica_constant option is false (the default) +# and unsetting policy and increasing the replica count will start this server, script +# unsets the policy and increases replica count. For e.g. if replica count is 1 and +# start policy of server2 is NEVER, unsetting policy and increasing replica count will +# start server2. +# +# - If option to keep replica count constant ('-k') is selected and unsetting start policy +# will naturally start the server, script will unset the policy. For e.g. if replica count +# is 2 and start policy of server2 is NEVER, unsetting policy will start server2. +# +# - If above conditions are not true, it implies that either start policy is NEVER or policy +# is IF_NEEDED but server is not next in the order to start. In this case, script sets start +# policy to ALWAYS. For e.g. replica count is 3 and server10 needs to start. The script also +# increments the replica count by default. If option to keep replica count constant ('-k') +# is selected, it only sets the start policy to ALWAYS. +# + +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" +source ${scriptDir}/helper.sh +if [ "${debug}" == "true" ]; then set -x; fi; +set -eu + +function usage() { + + cat << EOF + + This script starts a WebLogic server in a domain. For the managed servers, it either + increases the value of 'spec.clusters[].replicas' by '1' or updates the + 'spec.managedServers[].serverStartPolicy' attribute of the domain + resource or both as necessary for starting the server. For the administration server, it + updates the value of 'spec.adminServer.serverStartPolicy' attribute of the domain resource. + The 'spec.clusters[].replicas' value can be kept constant by using '-k' option. + Please see README.md for more details. + + Usage: + + $(basename $0) -s myserver [-n mynamespace] [-d mydomainuid] [-k] [-m kubecli] [-v] + + -s : The WebLogic server name (not the pod name). + This parameter is required. + + -d : Domain unique-id. Default is 'sample-domain1'. + + -n : Domain namespace. Default is 'sample-domain1-ns'. + + -k : Keep replica count constant for the clustered servers. The default behavior + is to increment the replica count for the clustered servers. This parameter + is ignored for the administration and non-clustered managed servers. + + -m : Kubernetes command line interface. Default is 'kubectl' if KUBERNETES_CLI env + variable is not set. Otherwise default is the value of KUBERNETES_CLI env variable. + + -v : Enables verbose mode. Default is 'false'. + + -h : This help. + +EOF +exit $1 +} + +kubernetesCli=${KUBERNETES_CLI:-kubectl} +serverName="" +clusterName="" +domainUid="sample-domain1" +domainNamespace="sample-domain1-ns" +keepReplicaConstant=false +verboseMode=false +withReplicas="CONSTANT" +withPolicy="CONSTANT" +managedServerPolicy="" +effectivePolicy="" +isValidServer="" +patchJson="" +serverStarted="" +startsByPolicyUnset="" +startsByReplicaIncreaseAndPolicyUnset="" +isAdminServer=false + +while getopts "vkd:n:m:s:h" opt; do + case $opt in + s) serverName="${OPTARG}" + ;; + n) domainNamespace="${OPTARG}" + ;; + d) domainUid="${OPTARG}" + ;; + k) keepReplicaConstant=true; + ;; + m) kubernetesCli="${OPTARG}" + ;; + v) verboseMode=true; + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +# +# Function to perform validations, read files and initialize workspace +# +function initialize { + + # Validate the required files exist + validateErrors=false + + validateKubernetesCliAvailable + validateJqAvailable + + # Validate that server name parameter is specified. + if [ -z "${serverName}" ]; then + validationError "Please specify a server name using '-s' parameter e.g. '-s managed-server1'." + fi + + failIfValidationErrors +} + +initialize + +# Get the domain in json format +domainJson=$(${kubernetesCli} get domain ${domainUid} -n ${domainNamespace} -o json --ignore-not-found) +if [ -z "${domainJson}" ]; then + printError "Unable to get domain resource for domain '${domainUid}' in namespace '${domainNamespace}'. Please make sure the 'domain_uid' and 'namespace' specified by the '-d' and '-n' arguments are correct. Exiting." + exit 1 +fi + +# Validate that specified server is either part of a cluster or is an independent managed server +validateServerAndFindCluster "${domainUid}" "${domainNamespace}" "${serverName}" isValidServer clusterName isAdminServer +if [ "${isValidServer}" != 'true' ]; then + printError "Server ${serverName} is not part of any cluster and it's not an independent managed server. Please make sure that server name specified is correct." + exit 1 +fi + +getClusterPolicy "${domainJson}" "${clusterName}" clusterPolicy +if [ "${clusterPolicy}" == 'NEVER' ]; then + printError "Cannot start server '${serverName}', the server's parent cluster '.spec.clusters[?(clusterName=\"${clusterName}\"].serverStartPolicy' in the domain resource is set to 'NEVER'." + exit 1 +fi + +getDomainPolicy "${domainJson}" domainPolicy +if [ "${domainPolicy}" == 'NEVER' ] || [[ "${domainPolicy}" == 'ADMIN_ONLY' && "${isAdminServer}" != 'true' ]]; then + printError "Cannot start server '${serverName}', the .spec.serverStartPolicy in the domain resource is set to 'NEVER' or 'ADMIN_ONLY'." + exit 1 +fi + +getEffectivePolicy "${domainJson}" "${serverName}" "${clusterName}" effectivePolicy +if [ "${isAdminServer}" == 'true' ]; then + getEffectiveAdminPolicy "${domainJson}" effectivePolicy + if [[ "${effectivePolicy}" == "IF_NEEDED" || "${effectivePolicy}" == "ALWAYS" ]]; then + printInfo "No changes needed, exiting. Server should be already starting or started because effective sever start policy is '${effectivePolicy}'." + exit 0 + fi +fi + +if [ -n "${clusterName}" ]; then + # Server is part of a cluster, check currently started servers + checkStartedServers "${domainJson}" "${serverName}" "${clusterName}" "${withReplicas}" "${withPolicy}" serverStarted + if [[ ${effectivePolicy} == "IF_NEEDED" && ${serverStarted} == "true" ]]; then + printInfo "No changes needed, exiting. The server should be already started or it's in the process of starting. The start policy for server ${serverName} is ${effectivePolicy} and server is chosen to be started based on current replica count." + exit 0 + elif [[ "${effectivePolicy}" == "ALWAYS" && ${serverStarted} == "true" ]]; then + printInfo "No changes needed, exiting. The server should be already started or it's in the process of starting. The start policy for server ${serverName} is ${effectivePolicy}." + exit 0 + fi +else + # Server is an independent managed server. + if [[ "${effectivePolicy}" == "ALWAYS" || "${effectivePolicy}" == "IF_NEEDED" ]]; then + printInfo "No changes needed, exiting. The server should be already started or it's in the process of starting. The start policy for server ${serverName} is ${effectivePolicy}." + exit 0 + fi +fi + +getServerPolicy "${domainJson}" "${serverName}" managedServerPolicy +createServerStartPolicyPatch "${domainJson}" "${serverName}" "ALWAYS" alwaysStartPolicyPatch + +# if server is part of a cluster and replica count will increase +if [[ -n ${clusterName} && "${keepReplicaConstant}" != 'true' ]]; then + #check if server starts by increasing replicas and unsetting policy + withReplicas="INCREASED" + withPolicy="UNSET" + checkStartedServers "${domainJson}" "${serverName}" "${clusterName}" "${withReplicas}" "${withPolicy}" startsByReplicaIncreaseAndPolicyUnset + createReplicaPatch "${domainJson}" "${clusterName}" "INCREMENT" incrementReplicaPatch replicaCount + if [[ -n ${managedServerPolicy} && ${startsByReplicaIncreaseAndPolicyUnset} == "true" ]]; then + # Server starts by increasing replicas and policy unset, increment and unset + printInfo "Unsetting the current start policy '${managedServerPolicy}' for '${serverName}' and incrementing replica count ${replicaCount}." + createPatchJsonToUnsetPolicyAndUpdateReplica "${domainJson}" "${serverName}" "${incrementReplicaPatch}" patchJson + elif [[ -z ${managedServerPolicy} && ${startsByReplicaIncreaseAndPolicyUnset} == "true" ]]; then + # Start policy is not set, server starts by increasing replicas based on effective policy, increment replicas + printInfo "Updating replica count for cluster '${clusterName}' to ${replicaCount}." + createPatchJsonToUpdateReplica "${incrementReplicaPatch}" patchJson + else + # Patch server policy to always and increment replicas + printInfo "Patching start policy of server '${serverName}' from '${effectivePolicy}' to 'ALWAYS' and \ +incrementing replica count for cluster '${clusterName}' to ${replicaCount}." + createPatchJsonToUpdateReplicaAndPolicy "${incrementReplicaPatch}" "${alwaysStartPolicyPatch}" patchJson + fi +elif [[ -n ${clusterName} && "${keepReplicaConstant}" == 'true' ]]; then + # Replica count needs to stay constant, check if server starts by unsetting policy + withReplicas="CONSTANT" + withPolicy="UNSET" + checkStartedServers "${domainJson}" "${serverName}" "${clusterName}" "${withReplicas}" "${withPolicy}" startsByPolicyUnset + if [[ "${effectivePolicy}" == "NEVER" && ${startsByPolicyUnset} == "true" ]]; then + # Server starts by unsetting policy, unset policy + printInfo "Unsetting the current start policy '${effectivePolicy}' for '${serverName}'." + createPatchJsonToUnsetPolicy "${domainJson}" "${serverName}" patchJson + else + # Patch server policy to always + printInfo "Patching start policy for '${serverName}' to 'ALWAYS'." + createPatchJsonToUpdatePolicy "${alwaysStartPolicyPatch}" patchJson + fi +elif [ "${isAdminServer}" == 'true' ]; then + printInfo "Patching start policy of '${serverName}' from '${effectivePolicy}' to 'IF_NEEDED'." + createPatchJsonToUpdateAdminPolicy "${domainJson}" "IF_NEEDED" patchJson +else + # Server is an independent managed server + printInfo "Unsetting the current start policy '${effectivePolicy}' for '${serverName}'." + createPatchJsonToUnsetPolicy "${domainJson}" "${serverName}" patchJson +fi + +executePatchCommand "${kubernetesCli}" "${domainUid}" "${domainNamespace}" "${patchJson}" "${verboseMode}" + +printInfo "Patch command succeeded !" diff --git a/OracleAccessManagement/kubernetes/domain-lifecycle/stopCluster.sh b/OracleAccessManagement/kubernetes/domain-lifecycle/stopCluster.sh new file mode 100755 index 000000000..6e0410932 --- /dev/null +++ b/OracleAccessManagement/kubernetes/domain-lifecycle/stopCluster.sh @@ -0,0 +1,119 @@ +# !/bin/sh +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# + +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" +source ${scriptDir}/helper.sh +if [ "${debug}" == "true" ]; then set -x; fi; + +function usage() { + + cat << EOF + + This script stops a WebLogic cluster in a domain by patching + 'spec.clusters[].serverStartPolicy' attribute of the domain + resource to 'NEVER'. This change will cause the operator to initiate shutdown + of cluster's WebLogic server instance pods if the pods are running. + + Usage: + + $(basename $0) -c mycluster [-n mynamespace] [-d mydomainuid] [-m kubecli] + + -c : Cluster name (required parameter). + + -d : Domain unique-id. Default is 'sample-domain1'. + + -n : Domain namespace. Default is 'sample-domain1-ns'. + + -m : Kubernetes command line interface. Default is 'kubectl' if KUBERNETES_CLI env + variable is not set. Otherwise default is the value of KUBERNETES_CLI env variable. + + -v : Enables verbose mode. Default is 'false'. + + -h : This help. + +EOF +exit $1 +} + +kubernetesCli=${KUBERNETES_CLI:-kubectl} +clusterName="" +domainUid="sample-domain1" +domainNamespace="sample-domain1-ns" +verboseMode=false +patchJson="" + +while getopts "vc:n:m:d:h" opt; do + case $opt in + c) clusterName="${OPTARG}" + ;; + n) domainNamespace="${OPTARG}" + ;; + d) domainUid="${OPTARG}" + ;; + m) kubernetesCli="${OPTARG}" + ;; + v) verboseMode=true; + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +set -eu + +# +# Function to perform validations, read files and initialize workspace +# +function initialize { + + validateErrors=false + + validateKubernetesCliAvailable + validateJqAvailable + + if [ -z "${clusterName}" ]; then + validationError "Please specify cluster name using '-c' parameter e.g. '-c cluster-1'." + fi + + failIfValidationErrors +} + +initialize + +# Get the domain in json format +domainJson=$(${kubernetesCli} get domain ${domainUid} -n ${domainNamespace} -o json --ignore-not-found) +if [ -z "${domainJson}" ]; then + printError "Unable to get domain resource for domain '${domainUid}' in namespace '${domainNamespace}'. Please make sure the 'domain_uid' and 'namespace' specified by the '-d' and '-n' arguments are correct. Exiting." + exit 1 +fi + +isValidCluster="" +validateClusterName "${domainUid}" "${domainNamespace}" "${clusterName}" isValidCluster +if [ "${isValidCluster}" != 'true' ]; then + printError "cluster ${clusterName} is not part of domain ${domainUid} in namespace ${domainNamespace}. Please make sure that cluster name is correct." + exit 1 +fi + +# Get server start policy for this server +getClusterPolicy "${domainJson}" "${clusterName}" startPolicy +if [ -z "${startPolicy}" ]; then + getDomainPolicy "${domainJson}" startPolicy +fi + +if [[ "${startPolicy}" == 'NEVER' || "${startPolicy}" == 'ADMIN_ONLY' ]]; then + printInfo "No changes needed, exiting. The cluster '${clusterName}' is already stopped or stopping. The effective value of spec.clusters[?(clusterName="${clusterName}"].serverStartPolicy attribute on the domain resource is 'NEVER' or 'ADMIN_ONLY'." + exit 0 +fi + +# Set policy value to NEVER +printInfo "Patching start policy of cluster '${clusterName}' from '${startPolicy}' to 'NEVER'." +createPatchJsonToUpdateClusterPolicy "${domainJson}" "${clusterName}" "NEVER" patchJson + +executePatchCommand "${kubernetesCli}" "${domainUid}" "${domainNamespace}" "${patchJson}" "${verboseMode}" + +printInfo "Successfully patched cluster '${clusterName}' with 'NEVER' start policy!" diff --git a/OracleAccessManagement/kubernetes/domain-lifecycle/stopDomain.sh b/OracleAccessManagement/kubernetes/domain-lifecycle/stopDomain.sh new file mode 100755 index 000000000..d62f6b280 --- /dev/null +++ b/OracleAccessManagement/kubernetes/domain-lifecycle/stopDomain.sh @@ -0,0 +1,95 @@ +# !/bin/sh +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# + +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" +source ${scriptDir}/helper.sh + +function usage() { + + cat << EOF + + This script stops a deployed WebLogic domain by patching + 'spec.serverStartPolicy' attribute of domain resource to 'NEVER'. + This change will cause the operator to initiate shutdown of the + domain's WebLogic server instance pods if the pods are running. + + Usage: + + $(basename $0) [-n mynamespace] [-d mydomainuid] [-m kubecli] + + -d : Domain unique-id. Default is 'sample-domain1'. + + -n : Domain namespace. Default is 'sample-domain1-ns'. + + -m : Kubernetes command line interface. Default is 'kubectl' if KUBERNETES_CLI env + variable is not set. Otherwise default is the value of KUBERNETES_CLI env variable. + + -v : Enables verbose mode. Default is 'false'. + + -h : This help. + +EOF +exit $1 +} + +kubernetesCli=${KUBERNETES_CLI:-kubectl} +domainUid="sample-domain1" +domainNamespace="sample-domain1-ns" +verboseMode=false + +while getopts "vn:d:m:h" opt; do + case $opt in + n) domainNamespace="${OPTARG}" + ;; + d) domainUid="${OPTARG}" + ;; + m) kubernetesCli="${OPTARG}" + ;; + v) verboseMode=true; + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +set -eu +set -o pipefail + +function initialize { + + validateErrors=false + + validateKubernetesCliAvailable + validateJqAvailable + failIfValidationErrors +} + +initialize + +# Get the domain in json format +domainJson=$(${kubernetesCli} get domain ${domainUid} -n ${domainNamespace} -o json --ignore-not-found) + +if [ -z "${domainJson}" ]; then + printError "Domain resource for domain '${domainUid}' not found in namespace '${domainNamespace}'. Exiting." + exit 1 +fi + +getDomainPolicy "${domainJson}" serverStartPolicy + +if [ "${serverStartPolicy}" == 'NEVER' ]; then + printInfo "No changes needed, exiting. The domain '${domainUid}' is already stopped or stopping. The value of 'spec.serverStartPolicy' attribute on the domain resource is 'NEVER'." + exit 0 +fi + +printInfo "Patching domain '${domainUid}' in namespace '${domainNamespace}' from serverStartPolicy='${serverStartPolicy}' to 'NEVER'." + +createPatchJsonToUpdateDomainPolicy "NEVER" patchJson + +executePatchCommand "${kubernetesCli}" "${domainUid}" "${domainNamespace}" "${patchJson}" "${verboseMode}" + +printInfo "Successfully patched domain '${domainUid}' in namespace '${domainNamespace}' with 'NEVER' start policy!" diff --git a/OracleAccessManagement/kubernetes/domain-lifecycle/stopServer.sh b/OracleAccessManagement/kubernetes/domain-lifecycle/stopServer.sh new file mode 100755 index 000000000..ec35b4d97 --- /dev/null +++ b/OracleAccessManagement/kubernetes/domain-lifecycle/stopServer.sh @@ -0,0 +1,248 @@ +# !/bin/sh +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# + +# This script stops a WebLogic managed server in a domain. +# Internal code notes :- +# - If server start policy is NEVER or policy is IF_NEEDED and the server is not +# selected to start based on the replica count, it means that server is already +# stopped or is in the process of stopping. In this case, script exits without +# making any changes. +# +# - If server is part of a cluster and keep_replica_constant option is false (the default) +# and the effective start policy of the server is IF_NEEDED and decreasing replica count +# will naturally stop the server, the script decreases the replica count. +# +# - If server is part of a cluster and keep_replica_constant option is false (the default) +# and unsetting policy and decreasing the replica count will stop the server, script +# unsets the policy and decreases replica count. For e.g. if replica count is 2 and +# start policy of server2 is ALWAYS, unsetting policy and decreasing replica count will +# stop server2. +# +# - If option to keep replica count constant ('-k') is selected and unsetting start policy +# will naturally stop the server, script will unset the policy. For e.g. if replica count +# is 1 and start policy of server2 is ALWAYS, unsetting policy will stop server2. +# +# - If above conditions are not true, it implies that server policy is IF_NEEDED and server +# is selected to start. In this case, script sets start policy to NEVER. For e.g. replica +# count is 2 and server1 needs to be stopped. The script also decrements the replica count +# by default. If option to keep replica count constant ('-k') is selected, it only sets the +# start policy to NEVER. +# + +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" +source ${scriptDir}/helper.sh +if [ "${debug}" == "true" ]; then set -x; fi; +set -eu + +function usage() { + + cat << EOF + + This script stops a running WebLogic server in a domain. For managed servers, it either + decreases the value of 'spec.clusters[].replicas' or updates the + 'spec.managedServers[].serverStartPolicy' attribute of the domain + resource or both as necessary to stop the server. For the administration server, it updates + the value of 'spec.adminServer.serverStartPolicy' attribute of the domain resource. The + 'spec.clusters[].replicas' value can be kept constant by using '-k' option. + Please see README.md for more details. + + Usage: + + $(basename $0) -s myserver [-n mynamespace] [-d mydomainuid] [-k] [-m kubecli] [-v] + + -s : The WebLogic server name (not the pod name). + This parameter is required. + + -d : Domain unique-id. Default is 'sample-domain1'. + + -n : Domain namespace. Default is 'sample-domain1-ns'. + + -k : Keep replica count constant for the clustered servers. The default behavior + is to decrement the replica count for the clustered servers. This parameter + is ignored for the administration and non-clustered managed servers. + + -m : Kubernetes command line interface. Default is 'kubectl' if KUBERNETES_CLI env + variable is not set. Otherwise default is the value of KUBERNETES_CLI env variable. + + -v : Enables verbose mode. Default is 'false'. + + -h : This help. + +EOF +exit $1 +} + +kubernetesCli=${KUBERNETES_CLI:-kubectl} +serverName="" +clusterName="" +domainUid="sample-domain1" +domainNamespace="sample-domain1-ns" +keepReplicaConstant=false +verboseMode=false +serverStartPolicy=NEVER +serverStarted="" +effectivePolicy="" +managedServerPolicy="" +stoppedWhenAlwaysPolicyReset="" +replicasEqualsMinReplicas="" +withReplicas="CONSTANT" +withPolicy="CONSTANT" +patchJson="" +isAdminServer=false + +while getopts "vks:m:n:d:h" opt; do + case $opt in + s) serverName="${OPTARG}" + ;; + n) domainNamespace="${OPTARG}" + ;; + m) kubernetesCli="${OPTARG}" + ;; + d) domainUid="${OPTARG}" + ;; + k) keepReplicaConstant=true; + ;; + v) verboseMode=true; + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +# +# Function to perform validations, read files and initialize workspace +# +function initialize { + + validateErrors=false + + validateKubernetesCliAvailable + validateJqAvailable + + # Validate that server name parameter is specified. + if [ -z "${serverName}" ]; then + validationError "Please specify the server name using '-s' parameter e.g. '-s managed-server1'." + fi + + failIfValidationErrors +} + +initialize + +# Get the domain in json format +domainJson=$(${kubernetesCli} get domain ${domainUid} -n ${domainNamespace} -o json --ignore-not-found) +if [ -z "${domainJson}" ]; then + printError "Unable to get domain resource for domain '${domainUid}' in namespace '${domainNamespace}'. Please make sure the 'domain_uid' and 'namespace' specified by the '-d' and '-n' arguments are correct. Exiting." + exit 1 +fi + +# Validate that specified server is either part of a cluster or is an independent managed server +validateServerAndFindCluster "${domainUid}" "${domainNamespace}" "${serverName}" isValidServer clusterName isAdminServer +if [ "${isValidServer}" != 'true' ]; then + printError "Server ${serverName} is not part of any cluster and it's not an independent managed server. Please make sure that server name specified is correct." + exit 1 +fi + +getEffectivePolicy "${domainJson}" "${serverName}" "${clusterName}" effectivePolicy +if [ "${isAdminServer}" == 'true' ]; then + getEffectiveAdminPolicy "${domainJson}" effectivePolicy + if [ "${effectivePolicy}" == "NEVER" ]; then + printInfo "No changes needed, exiting. Server should be already stopping or stopped because effective sever start policy is 'NEVER'." + exit 0 + fi +fi + +if [ -n "${clusterName}" ]; then + # Server is part of a cluster, check currently started servers + checkStartedServers "${domainJson}" "${serverName}" "${clusterName}" "${withReplicas}" "${withPolicy}" serverStarted + if [[ "${effectivePolicy}" == "NEVER" || "${effectivePolicy}" == "ADMIN_ONLY" || "${serverStarted}" != "true" ]]; then + printInfo "No changes needed, exiting. Server should be already stopping or stopped. This is either because of the sever start policy or server is chosen to be stopped based on current replica count." + exit 0 + fi +else + # Server is an independent managed server. + if [ "${effectivePolicy}" == "NEVER" ] || [[ "${effectivePolicy}" == "ADMIN_ONLY" && "${isAdminServer}" != 'true' ]]; then + printInfo "No changes needed, exiting. Server should be already stopping or stopped because effective sever start policy is 'NEVER' or 'ADMIN_ONLY'." + exit 0 + fi +fi + +if [[ -n "${clusterName}" && "${keepReplicaConstant}" == 'false' ]]; then + # check if replica count can decrease below current value + isReplicaCountEqualToMinReplicas "${domainJson}" "${clusterName}" replicasEqualsMinReplicas + if [ "${replicasEqualsMinReplicas}" == 'true' ]; then + printInfo "Not decreasing the replica count value: it is at its minimum. \ + (See 'domain.spec.allowReplicasBelowMinDynClusterSize' and \ + 'domain.status.clusters[].minimumReplicas' for details)." + keepReplicaConstant=true + fi +fi + +# Create server start policy patch with NEVER value +createServerStartPolicyPatch "${domainJson}" "${serverName}" "${serverStartPolicy}" neverStartPolicyPatch +getServerPolicy "${domainJson}" "${serverName}" managedServerPolicy +if [ -n "${managedServerPolicy}" ]; then + effectivePolicy=${managedServerPolicy} +fi +if [[ -n "${clusterName}" && "${effectivePolicy}" == "ALWAYS" ]]; then + # Server is part of a cluster and start policy is ALWAYS. + withReplicas="CONSTANT" + withPolicy="UNSET" + checkStartedServers "${domainJson}" "${serverName}" "${clusterName}" "${withReplicas}" "${withPolicy}" startedWhenAlwaysPolicyReset +fi + +if [[ -n "${clusterName}" && "${keepReplicaConstant}" != 'true' ]]; then + # server is part of a cluster and replica count will decrease + withReplicas="DECREASED" + withPolicy="UNSET" + checkStartedServers "${domainJson}" "${serverName}" "${clusterName}" "${withReplicas}" "${withPolicy}" startedWhenRelicaReducedAndPolicyReset + createReplicaPatch "${domainJson}" "${clusterName}" "DECREMENT" replicaPatch replicaCount + + if [[ -n ${managedServerPolicy} && "${startedWhenRelicaReducedAndPolicyReset}" != "true" ]]; then + # Server shuts down by unsetting start policy and decrementing replica count, unset and decrement + printInfo "Unsetting the current start policy '${managedServerPolicy}' for '${serverName}' \ + and decrementing replica count to ${replicaCount}." + createPatchJsonToUnsetPolicyAndUpdateReplica "${domainJson}" "${serverName}" "${replicaPatch}" patchJson + elif [[ -z ${managedServerPolicy} && "${startedWhenRelicaReducedAndPolicyReset}" != "true" ]]; then + # Start policy is not set, server shuts down by decrementing replica count, decrement replicas + printInfo "Updating replica count for cluster ${clusterName} to ${replicaCount}." + createPatchJsonToUpdateReplica "${replicaPatch}" patchJson + elif [[ ${managedServerPolicy} == "ALWAYS" && "${startedWhenAlwaysPolicyReset}" != "true" ]]; then + # Server shuts down by unsetting the start policy, unset and decrement replicas + printInfo "Unsetting the current start policy '${managedServerPolicy}' for '${serverName}' \ + and decrementing replica count to ${replicaCount}." + createPatchJsonToUnsetPolicyAndUpdateReplica "${domainJson}" "${serverName}" "${replicaPatch}" patchJson + else + # Patch server start policy to NEVER and decrement replica count + printInfo "Patching start policy of server '${serverName}' from '${effectivePolicy}' to 'NEVER' \ + and decrementing replica count for cluster '${clusterName}' to ${replicaCount}." + createPatchJsonToUpdateReplicaAndPolicy "${replicaPatch}" "${neverStartPolicyPatch}" patchJson + fi +elif [[ -n ${clusterName} && "${keepReplicaConstant}" == 'true' ]]; then + # Server is part of a cluster and replica count needs to stay constant + if [[ ${managedServerPolicy} == "ALWAYS" && "${startedWhenAlwaysPolicyReset}" != "true" ]]; then + # Server start policy is AlWAYS and server shuts down by unsetting the policy, unset policy + printInfo "Unsetting the current start policy '${effectivePolicy}' for '${serverName}'." + createPatchJsonToUnsetPolicy "${domainJson}" "${serverName}" patchJson + else + # Patch server start policy to NEVER + printInfo "Patching start policy of '${serverName}' from '${effectivePolicy}' to 'NEVER'." + createPatchJsonToUpdatePolicy "${neverStartPolicyPatch}" patchJson + fi +elif [ "${isAdminServer}" == 'true' ]; then + printInfo "Patching start policy of '${serverName}' from '${effectivePolicy}' to 'NEVER'." + createPatchJsonToUpdateAdminPolicy "${domainJson}" "${serverStartPolicy}" patchJson +else + # Server is an independent managed server, patch server start policy to NEVER + printInfo "Patching start policy of '${serverName}' from '${effectivePolicy}' to 'NEVER'." + createPatchJsonToUpdatePolicy "${neverStartPolicyPatch}" patchJson +fi + +executePatchCommand "${kubernetesCli}" "${domainUid}" "${domainNamespace}" "${patchJson}" "${verboseMode}" + +printInfo "Patch command succeeded !" diff --git a/OracleAccessManagement/kubernetes/elasticsearch-and-kibana/README.md b/OracleAccessManagement/kubernetes/elasticsearch-and-kibana/README.md new file mode 100755 index 000000000..bd62bcc56 --- /dev/null +++ b/OracleAccessManagement/kubernetes/elasticsearch-and-kibana/README.md @@ -0,0 +1,31 @@ +# Sample to deploy Elasticsearch and Kibana + + +When you install the WebLogic operator Helm chart, you can set +`elkIntegrationEnabled` to `true` in your `values.yaml` file to tell the operator to send the contents of the operator's logs to Elasticsearch. + +Typically, you would have already configured Elasticsearch and Kibana in the +Kubernetes cluster, and also would have specified `elasticSearchHost` and `elasticSearchPort` in your `values.yaml` file to point to where Elasticsearch is already running. + +This sample configures the Elasticsearch and Kibana deployments and services. +It's useful for trying out the operator in a Kubernetes cluster that doesn't already +have them configured. + +It runs the Elastic Stack on the same host and port that the operator's Helm chart defaults +to, therefore, you only need to set `elkIntegrationEnabled` to `true` in your +`values.yaml` file. + +To control Elasticsearch memory parameters (Heap allocation and Enabling/Disabling swapping) please open the file `elasticsearch_and_kibana.yaml`, search for env variables of the elasticsearch container and change the values of the following. + +* ES_JAVA_OPTS: value may contain for example -Xms512m -Xmx512m to lower the default memory usage (please be aware that this value is only applicable for demo purpose and it is not the one recommended by Elasticsearch itself) +* bootstrap.memory_lock: value may contain true (enables the usage of mlockall to try to lock the process address space into RAM, preventing any Elasticsearch memory from being swapped out) or false (disables the usage of mlockall to try to lock the process address space into RAM, preventing any Elasticsearch memory from being swapped out). + +To install Elasticsearch and Kibana, use: +```shell +$ kubectl apply -f kubernetes/samples/scripts/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml +``` + +To remove them, use: +```shell +$ kubectl delete -f kubernetes/samples/scripts/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml +``` diff --git a/OracleAccessManagement/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml b/OracleAccessManagement/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml new file mode 100755 index 000000000..97b0b9186 --- /dev/null +++ b/OracleAccessManagement/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml @@ -0,0 +1,117 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +# When a user installs the WebLogic operator Helm chart, the user can set +# elkIntegrationEnabled to true in their values.yaml to tell the operator to send the +# contents of the operator's log to Elasticsearch. +# +# Typically, a user would have already configured Elasticsearch and Kibana in the +# Kubernetes cluster, and also would specify elasticSearchHost and elasticSearchPort +# in their values.yaml file to point to where Elasticsearch is already running. +# +# This sample configures the Elasticsearch and Kibana deployments and services. +# It's useful for trying out the operator in a Kubernetes cluster that doesn't already +# have them configured. +# +# It runs Elasticstack on the same host and port that the operator's Helm chart defaults +# to, therefore, the customer only needs to set elkIntegrationEnabled to true in their +# values.yaml file. +# +# To configure them, use: +# kubectl apply -f kubernetes/samples/scripts/elasticsearch_and_kibana.yaml +# +# To remove them, use: +# kubectl delete -f kubernetes/samples/scripts/elasticsearch_and_kibana.yaml + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + namespace: "default" + name: "elasticsearch" + labels: + app: "elasticsearch" +spec: + replicas: 1 + selector: + matchLabels: + app: "elasticsearch" + template: + metadata: + labels: + app: "elasticsearch" + spec: + initContainers: + - name: set-vm-max-map-count + image: busybox + imagePullPolicy: IfNotPresent + command: ['sysctl', '-w', 'vm.max_map_count=262144'] + securityContext: + privileged: true + containers: + - name: "elasticsearch" + image: "elasticsearch:6.8.0" + ports: + - containerPort: 9200 + - containerPort: 9300 + env: + - name: ES_JAVA_OPTS + value: -Xms1024m -Xmx1024m + +--- +kind: "Service" +apiVersion: "v1" +metadata: + namespace: "default" + name: "elasticsearch" +spec: + ports: + - name: "http" + protocol: "TCP" + port: 9200 + targetPort: 9200 + - name: "https" + protocol: "TCP" + port: 9300 + targetPort: 9300 + selector: + app: "elasticsearch" + +--- +apiVersion: "apps/v1" +kind: "Deployment" +metadata: + namespace: "default" + name: "kibana" + labels: + app: "kibana" +spec: + replicas: 1 + selector: + matchLabels: + app: "kibana" + template: + metadata: + labels: + app: "kibana" + spec: + containers: + - name: "kibana" + image: "kibana:6.8.0" + ports: + - containerPort: 5601 + +--- +apiVersion: "v1" +kind: "Service" +metadata: + namespace: "default" + name: "kibana" + labels: + app: "kibana" +spec: + type: "NodePort" + ports: + - port: 5601 + selector: + app: "kibana" diff --git a/OracleAccessManagement/kubernetes/3.0.1/grafana/weblogic_dashboard.json b/OracleAccessManagement/kubernetes/grafana/weblogic_dashboard.json old mode 100644 new mode 100755 similarity index 100% rename from OracleAccessManagement/kubernetes/3.0.1/grafana/weblogic_dashboard.json rename to OracleAccessManagement/kubernetes/grafana/weblogic_dashboard.json diff --git a/OracleAccessManagement/kubernetes/logging-services/logstash/README.md b/OracleAccessManagement/kubernetes/logging-services/logstash/README.md new file mode 100755 index 000000000..72572c557 --- /dev/null +++ b/OracleAccessManagement/kubernetes/logging-services/logstash/README.md @@ -0,0 +1,59 @@ +## Publish OracleAccessManagement server and diagnostics logs into Elasticsearch + +## Prerequisites +See [here](https://oracle.github.io/weblogic-kubernetes-operator/samples/simple/elastic-stack/) for the steps to integrate Elasticsearch for the WebLogic Kubernetes operator. + +Before deploying the WebLogic Kubernetes operator edit `values.yaml` in weblogic-kubernetes-operator/kubernetes/charts/weblogic-operator/ to enable elastic search integration. +Configure the following variables: +```bash +# elkIntegrationEnabled specifies whether or not ELK integration is enabled. +elkIntegrationEnabled: true +# logStashImage specifies the docker image containing logstash. +# This parameter is ignored if 'elkIntegrationEnabled' is false. +logStashImage: "logstash:6.6.0" + +# elasticSearchHost specifies the hostname of where Elasticsearch is running. +# This parameter is ignored if 'elkIntegrationEnabled' is false. +elasticSearchHost: "elasticsearch.default.svc.cluster.local" + +# elasticSearchPort specifies the port number of where Elasticsearch is running. +# This parameter is ignored if 'elkIntegrationEnabled' is false. +elasticSearchPort: 9200 +``` +Deployment of WebLogic Kubernetes operator with above changes, will create an additional logstash container as sidecar. This logstash container will push the operator logs to the configured Elasticsearch server. + +### WebLogic Server logs + +The WebLogic server logs or diagnostics logs can be pushed to Elasticsearch server using logstash pod. The logstash pod should have access to the shared domain home or the log location. The persistent volume of the domain home can be used in the logstash pod. + +### Create the logstash pod + +1. Get Domain home persistence volume claim details +Get the persistent volume details of the domain home of the WebLogic server(s). + + ```bash + $ kubectl get pvc -n accessns + ``` + +1. Create logstash configuration. +Create logstash configuration file. The logstash configuration file can be loaded from a volume. + ```bash + $ kubectl cp logstash.conf accessns/accessinfra-adminserver:/u01/oracle/user_projects/domains --namespace accessns + ``` + + You can use sample logstash configuration file generated to push server and diagnostic logs of all servers available at DOMAIN_HOME/servers//logs/-diagnostic.log + +1. Copy the logstash.conf into say /u01/oracle/user_projects/domains so that it can be used for logstash deployment, using Administration Server pod + +1. Create deployment YAML for logstash pod. +You can use sample logstash.yaml file generated to create deployment for logstash pod. The mounted persistent volume of the domain home will provide access to the WebLogic server logs to logstash pod. +Make sure to point the logstash configuration file to correct location and also correct domain home persistence volume claim. + +1. Deploy logstash to start publish logs to Elasticsearch: + + ```bash + $ kubectl create -f logstash.yaml + ``` + +1. Now, you can view the diagnostics logs using Kibana with index pattern `logstash-*`. + diff --git a/OracleAccessManagement/kubernetes/logging-services/logstash/logstash.conf b/OracleAccessManagement/kubernetes/logging-services/logstash/logstash.conf new file mode 100755 index 000000000..37567b0a2 --- /dev/null +++ b/OracleAccessManagement/kubernetes/logging-services/logstash/logstash.conf @@ -0,0 +1,25 @@ +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# + +input { + file { + path => "/u01/oracle/user_projects/domains/accessinfra/servers/**/logs/*-diagnostic.log" + start_position => beginning + } + file { + path => "/u01/oracle/user_projects/domains/logs/accessinfra/*.log" + start_position => beginning + } +} + +filter { + grok { + match => [ "message", "<%{DATA:log_timestamp}> <%{WORD:log_level}> <%{WORD:thread}> <%{HOSTNAME:hostname}> <%{HOSTNAME:servername}> <%{DATA:timer}> <<%{DATA:kernel}>> <> <%{DATA:uuid}> <%{NUMBER:timestamp}> <%{DATA:misc}> <%{DATA:log_number}> <%{DATA:log_message}>" ] + } +} +output { + elasticsearch { + hosts => ["elasticsearch.default.svc.cluster.local:9200"] + } +} diff --git a/OracleAccessManagement/kubernetes/logging-services/logstash/logstash.yaml b/OracleAccessManagement/kubernetes/logging-services/logstash/logstash.yaml new file mode 100755 index 000000000..5a7d449b0 --- /dev/null +++ b/OracleAccessManagement/kubernetes/logging-services/logstash/logstash.yaml @@ -0,0 +1,39 @@ +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: logstash + namespace: accessns +spec: + selector: + matchLabels: + app: logstash + template: # create pods using pod definition in this template + metadata: + labels: + app: logstash + spec: + volumes: + - name: domain-storage-volume + persistentVolumeClaim: + claimName: domain-pvc + - name: shared-logs + emptyDir: {} + containers: + - name: logstash + image: logstash:6.6.0 + command: ["/bin/sh"] + args: ["/usr/share/logstash/bin/logstash", "-f", "/u01/oracle/user_projects/domains/logstash.conf"] + imagePullPolicy: IfNotPresent + volumeMounts: + - mountPath: /u01/oracle/user_projects + name: domain-storage-volume + - name: shared-logs + mountPath: /shared-logs + ports: + - containerPort: 5044 + name: logstash + diff --git a/OracleAccessManagement/kubernetes/logging-services/weblogic-logging-exporter/README.md b/OracleAccessManagement/kubernetes/logging-services/weblogic-logging-exporter/README.md new file mode 100755 index 000000000..9389d3827 --- /dev/null +++ b/OracleAccessManagement/kubernetes/logging-services/weblogic-logging-exporter/README.md @@ -0,0 +1,131 @@ +## Publish WebLogic Server logs into Elasticsearch + +The WebLogic Logging Exporter adds a log event handler to WebLogic Server, such that WebLogic Server logs can be integrated into Elastic Stack in Kubernetes directly, by using the Elasticsearch REST API. + +## Prerequisite + +This document assumes that you have already deployed Elasticsearch/Kibana environment. If you have not, please use a sample/demo deployment of Elasticsearch/Kibana from WebLogic Kubernetes operator. + +To deploy Elasticsearch and Kibana on the Kubernetes cluster: +```bash +$ kubectl create -f https://raw.githubusercontent.com/oracle/weblogic-kubernetes-operator/master/kubernetes/samples/scripts/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml +``` + +Follow these steps to setup WebLogic Logging Exporter in a WebLogic operator environment and push the WebLogic server logs to Elasticsearch/Kibana + +1. Download WebLogic logging exporter binaries + + The WebLogic logging exporter pre-built binaries are available in the github releases page: [Release 1.0.1](https://github.com/oracle/weblogic-logging-exporter/releases) + + ```bash + $ wget https://github.com/oracle/weblogic-logging-exporter/releases/download/v1.0.1/weblogic-logging-exporter.jar + ``` + + Download weblogic-logging-exporter.jar from the github release link above. Also download dependency jar - snakeyaml-1.27.jar from Maven Central. + + ```bash + $ wget -O snakeyaml-1.27.jar https://search.maven.org/remotecontent?filepath=org/yaml/snakeyaml/1.27/snakeyaml-1.27.jar + ``` +1. Copy JAR files into the Kubernetes WebLogic Administration Server Pod + + Copy weblogic-logging-exporter.jar and snakeyaml-1.27.jar to the domain home folder in the Administration server pod. + + ```bash + $ kubectl cp weblogic-logging-exporter.jar accessns/accessinfra-adminserver:/u01/oracle/user_projects/domains/accessinfra/ + $ kubectl cp snakeyaml-1.27.jar accessns/accessinfra-adminserver:/u01/oracle/user_projects/domains/accessinfra/ + ``` + +1. Add a startup class to the domain configuration + + In this step, we configure weblogic-logging-exporter JAR as a startup class in the WebLogic servers where we intend to collect the logs. + + a) In the Administration Console, navigate to `Environment` then `Startup and Shutdown classes` in the main menu. + + b) Add a new Startup class. You may choose any descriptive name and the class name must be `weblogic.logging.exporter.Startup`. + + c) Target the startup class to each server that you want to export logs from. + + You can verify this by checking for the update in your config.xml which should be similar to this example: + + ```bash + + LoggingExporterStartupClass + AdminServer + weblogic.logging.exporter.Startup + + ``` + +1. Update WebLogic Server CLASS Path. + + In this step, we set the class path for weblogic-logging-exporter and its dependencies. + + a) Copy setDomainEnv.sh from the pod to local folder. + ```bash + $ kubectl cp accessns/accessinfra-adminserver:/u01/oracle/user_projects/domains/accessinfra/bin/setDomainEnv.sh setDomainEnv.sh + ``` + b) Modify setDomainEnv.sh to update the Server Class path. + ```bash + CLASSPATH=/u01/oracle/user_projects/domains/accessinfra/weblogic-logging-exporter.jar:/u01/oracle/user_projects/domains/accessinfra/snakeyaml-1.27.jar:${CLASSPATH} + export CLASSPATH + ``` + + c) Copy back the modified setDomainEnv.sh to the pod. + ```bash + $ kubectl cp setDomainEnv.sh accessns/accessinfra-adminserver:/u01/oracle/user_projects/domains/accessinfra/bin/setDomainEnv.sh + ``` + +1. Create configuration file for the WebLogic Logging Exporter. +Copy WebLogicLoggingExporter.yaml to the domain folder in the WebLogic server pod. YAML specifies the elasticsearch server host and port number. + ```bash + $ kubectl cp WebLogicLoggingExporter.yaml accessns/accessinfra-adminserver:/u01/oracle/user_projects/domains/accessinfra/config/ + ``` + +1. Restart WebLogic Servers + + Now we can restart the WebLogic servers for the weblogic-logging-exporter to get loaded in the servers. + + To restart the servers, use stopDomain.sh and startDomain.sh scripts from https://github.com/oracle/weblogic-kubernetes-operator/tree/master/kubernetes/samples/scripts/domain-lifecycle + + The stopDomain.sh script shuts down a domain by patching the `spec.serverStartPolicy` attribute of the domain resource to `NEVER`. The operator will shut down the WebLogic Server instance Pods that are part of the domain after the `spec.serverStartPolicy` attribute is updated to `NEVER`. See the script usage information by using the -h option. + + ```bash + $ stopDomain.sh -d accessinfra -n accessns + ``` + Sample output: + ```bash + [INFO] Patching domain 'accessinfra' in namespace 'accessns' from serverStartPolicy='IF_NEEDED' to 'NEVER'. + domain.weblogic.oracle/accessinfra patched + [INFO] Successfully patched domain 'accessinfra' in namespace 'accessns' with 'NEVER' start policy! + ``` + + Verify servers by checking the pod status. + ```bash + $ kubectl get pods -n accessns + ``` + + After all the servers are shutdown, run startDomain.sh script to start again. + + The startDomain.sh script starts a deployed domain by patching the `spec.serverStartPolicy` attribute of the domain resource to `IF_NEEDED`. The operator will start the WebLogic Server instance Pods that are part of the domain after the `spec.serverStartPolicy` attribute of the domain resource is updated to `IF_NEEDED`. See the script usage information by using the -h option. + + ```bash + $ startDomain.sh -d accessinfra -n accessns + ``` + Sample output: + ```bash + [INFO] Patching domain 'accessinfra' from serverStartPolicy='NEVER' to 'IF_NEEDED'. + domain.weblogic.oracle/accessinfra patched + [INFO] Successfully patched domain 'accessinfra' in namespace 'accessns' with 'IF_NEEDED' start policy! + ``` + + Verify servers by checking the pod status. Pod status will be RUNNING. + ```bash + $ kubectl get pods -n accessns + ``` + In the server logs, you will be able to see the weblogic-logging-exporter class being called. + +1. Create an index pattern in Kibana + + We need to create an index pattern in Kibana for the logs to be available in the dashboard. + + Create an index pattern `wls*` in `Kibana` > `Management`. After the server starts, you will be able to see the log data from the WebLogic servers in the Kibana dashboard, + diff --git a/OracleAccessManagement/kubernetes/logging-services/weblogic-logging-exporter/WebLogicLoggingExporter.yaml b/OracleAccessManagement/kubernetes/logging-services/weblogic-logging-exporter/WebLogicLoggingExporter.yaml new file mode 100755 index 000000000..08ee215d8 --- /dev/null +++ b/OracleAccessManagement/kubernetes/logging-services/weblogic-logging-exporter/WebLogicLoggingExporter.yaml @@ -0,0 +1,13 @@ +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# + +weblogicLoggingIndexName: wls +publishHost: elasticsearch.default.svc.cluster.local +publishPort: 9200 +domainUID: accessinfra +weblogicLoggingExporterEnabled: true +weblogicLoggingExporterSeverity: Notice +weblogicLoggingExporterBulkSize: 1 + + diff --git a/OracleAccessManagement/kubernetes/monitoring-service/README.md b/OracleAccessManagement/kubernetes/monitoring-service/README.md new file mode 100755 index 000000000..41938d9f1 --- /dev/null +++ b/OracleAccessManagement/kubernetes/monitoring-service/README.md @@ -0,0 +1,120 @@ +## Monitor the OracleAccessManagement instance using Prometheus and Grafana +Using the `WebLogic Monitoring Exporter` you can scrape runtime information from a running OracleAccessManagement instance and monitor them using Prometheus and Grafana. + +### Prerequisites + +- Have Docker and a Kubernetes cluster running and have `kubectl` installed and configured. +- Have Helm installed. +- An OracleAccessManagement domain deployed by `weblogic-operator` is running in the Kubernetes cluster. + +### Prepare to use the setup monitoring script + +The sample scripts for setup monitoring for OracleAccessManagement domain are available at `${WORKDIR}/monitoring-service`. + +You must edit `monitoring-inputs.yaml`(or a copy of it) to provide the details of your domain. Refer to the configuration parameters below to understand the information that you must provide in this file. + +#### Configuration parameters + +The following parameters can be provided in the inputs file. + +| Parameter | Description | Default | +| --- | --- | --- | +| `domainUID` | domainUID of the OracleAccessManagement domain. | `accessinfra` | +| `domainNamespace` | Kubernetes namespace of the OracleAccessManagement domain. | `accessns` | +| `setupKubePrometheusStack` | Boolean value indicating whether kube-prometheus-stack (Prometheus, Grafana and Alertmanager) to be installed | `true` | +| `additionalParamForKubePrometheusStack` | The script install's kube-prometheus-stack with `service.type` as NodePort and values for `service.nodePort` as per the parameters defined in `monitoring-inputs.yaml`. Use `additionalParamForKubePrometheusStack` parameter to further configure with additional parameters as per [values.yaml](https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/values.yaml). Sample value to disable NodeExporter, Prometheus-Operator TLS support and Admission webhook support for PrometheusRules resources is `--set nodeExporter.enabled=false --set prometheusOperator.tls.enabled=false --set prometheusOperator.admissionWebhooks.enabled=false`| | +| `monitoringNamespace` | Kubernetes namespace for monitoring setup. | `monitoring` | +| `adminServerName` | Name of the Administration Server. | `AdminServer` | +| `adminServerPort` | Port number for the Administration Server inside the Kubernetes cluster. | `7001` | +| `oamClusterName` | Name of the oamCluster. | `oam_cluster` | +| `oamManagedServerPort` | Port number of the managed servers in the oamCluster. | `14100` | +| `wlsMonitoringExporterTooamCluster` | Boolean value indicating whether to deploy WebLogic Monitoring Exporter to oamCluster. | `false` | +| `policyClusterName` | Name of the policyCluster. | `policy_cluster` | +| `policyManagedServerPort` | Port number of the managed servers in the policyCluster. | `15100` | +| `wlsMonitoringExporterTopolicyCluster` | Boolean value indicating whether to deploy WebLogic Monitoring Exporter to policyCluster. | `false` | +| `exposeMonitoringNodePort` | Boolean value indicating if the Monitoring Services (Prometheus, Grafana and Alertmanager) is exposed outside of the Kubernetes cluster. | `false` | +| `prometheusNodePort` | Port number of the Prometheus outside the Kubernetes cluster. | `32101` | +| `grafanaNodePort` | Port number of the Grafana outside the Kubernetes cluster. | `32100` | +| `alertmanagerNodePort` | Port number of the Alertmanager outside the Kubernetes cluster. | `32102` | +| `weblogicCredentialsSecretName` | Name of the Kubernetes secret which has Administration Server’s user name and password. | `accessinfra-domain-credentials` | + +Note that the values specified in the `monitoring-inputs.yaml` file will be used to install kube-prometheus-stack (Prometheus, Grafana and Alertmanager) and deploying WebLogic Monitoring Exporter into the OracleAccessManagement domain. Hence make the domain specific values to be same as that used during domain creation. + +### Run the setup monitoring script + +Update the values in `monitoring-inputs.yaml` as per your requirement and run the `setup-monitoring.sh` script, specifying your inputs file: + +```bash +$ cd ${WORKDIR}/monitoring-service +$ ./setup-monitoring.sh \ + -i monitoring-inputs.yaml +``` +The script will perform the following steps: + +- Helm install `prometheus-community/kube-prometheus-stack` of version "16.5.0" if `setupKubePrometheusStack` is set to `true`. +- Deploys WebLogic Monitoring Exporter to Administration Server. +- Deploys WebLogic Monitoring Exporter to `oamCluster` if `wlsMonitoringExporterTooamCluster` is set to `true`. +- Exposes the Monitoring Services (Prometheus at `32101`, Grafana at `32100` and Alertmanager at `32102`) outside of the Kubernetes cluster if `exposeMonitoringNodePort` is set to `true`. +- Imports the WebLogic Server Grafana Dashboard if `setupKubePrometheusStack` is set to `true`. +- Deploys WebLogic Monitoring Exporter to Administration Server. +- Deploys WebLogic Monitoring Exporter to `policyCluster` if `wlsMonitoringExporterTopolicyCluster` is set to `true`. +- Exposes the Monitoring Services (Prometheus at `32101`, Grafana at `32100` and Alertmanager at `32102`) outside of the Kubernetes cluster if `exposeMonitoringNodePort` is set to `true`. +- Imports the WebLogic Server Grafana Dashboard if `setupKubePrometheusStack` is set to `true`. + +### Verify the results +The setup monitoring script will report failure if there was any error. However, verify that required resources were created by the script. + +#### Verify the kube-prometheus-stack + +To confirm that `prometheus-community/kube-prometheus-stack` was installed when `setupKubePrometheusStack` is set to `true`, run the following command: + +```bash +$ helm ls -n +``` +Replace with value for Kubernetes namespace used for monitoring. + +Sample output: +```bash +$ helm ls -n monitoring +NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION +monitoring monitoring 1 2021-06-18 12:58:35.177221969 +0000 UTC deployed kube-prometheus-stack-16.5.0 0.48.0 +$ +``` + +#### Verify the Prometheus, Grafana and Alertmanager setup + +When `exposeMonitoringNodePort` was set to `true`, verify that monitoring services are accessible outside of the Kubernetes cluster: + +- `32100` is the external port for Grafana and with credentials `admin:admin` +- `32101` is the external port for Prometheus +- `32102` is the external port for Alertmanager + +#### Verify the service discovery of WebLogic Monitoring Exporter + +Verify whether prometheus is able to discover wls-exporter and collect the metrics: + +1. Access the Prometheus dashboard at http://mycompany.com:32101/ + +1. Navigate to Status to see the Service Discovery details. + +1. Verify that wls-exporter is listed in the discovered services. + +#### Verify the WebLogic Server dashoard + +You can access the Grafana dashboard at http://mycompany.com:32100/. + +1. Log in to Grafana dashboard with username: `admin` and password: `admin`. + +1. Navigate to "WebLogic Server Dashboard" under General and verify. + +### Delete the monitoring setup + +To delete the monitoring setup created by [Run the setup monitoring script](#run-the-setup-monitoring-script), run the below command: + +```bash +$ cd ${WORKDIR}/monitoring-service +$ ./delete-monitoring.sh \ + -i monitoring-inputs.yaml +``` + + diff --git a/OracleAccessManagement/kubernetes/monitoring-service/config/config.yml.template b/OracleAccessManagement/kubernetes/monitoring-service/config/config.yml.template new file mode 100755 index 000000000..792f64d27 --- /dev/null +++ b/OracleAccessManagement/kubernetes/monitoring-service/config/config.yml.template @@ -0,0 +1,64 @@ +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +metricsNameSnakeCase: true +restPort: %PORT% +queries: +- key: name + keyName: location + prefix: wls_server_ + applicationRuntimes: + key: name + keyName: app + componentRuntimes: + prefix: wls_webapp_config_ + type: WebAppComponentRuntime + key: name + values: [deploymentState, contextRoot, sourceInfo, openSessionsHighCount, openSessionsCurrentCount, sessionsOpenedTotalCount, sessionCookieMaxAgeSecs, sessionInvalidationIntervalSecs, sessionTimeoutSecs, singleThreadedServletPoolSize, sessionIDLength, servletReloadCheckSecs, jSPPageCheckSecs] + servlets: + prefix: wls_servlet_ + key: servletName + +- JVMRuntime: + prefix: wls_jvm_ + key: name + +- executeQueueRuntimes: + prefix: wls_socketmuxer_ + key: name + values: [pendingRequestCurrentCount] + +- workManagerRuntimes: + prefix: wls_workmanager_ + key: name + values: [stuckThreadCount, pendingRequests, completedRequests] + +- threadPoolRuntime: + prefix: wls_threadpool_ + key: name + values: [executeThreadTotalCount, queueLength, stuckThreadCount, hoggingThreadCount] + +- JMSRuntime: + key: name + keyName: jmsruntime + prefix: wls_jmsruntime_ + JMSServers: + prefix: wls_jms_ + key: name + keyName: jmsserver + destinations: + prefix: wls_jms_dest_ + key: name + keyName: destination + +- persistentStoreRuntimes: + prefix: wls_persistentstore_ + key: name +- JDBCServiceRuntime: + JDBCDataSourceRuntimeMBeans: + prefix: wls_datasource_ + key: name +- JTARuntime: + prefix: wls_jta_ + key: name + diff --git a/OracleIdentityGovernance/kubernetes/3.0.1/weblogic_dashboard.json b/OracleAccessManagement/kubernetes/monitoring-service/config/weblogic-server-dashboard-import.json old mode 100644 new mode 100755 similarity index 100% rename from OracleIdentityGovernance/kubernetes/3.0.1/weblogic_dashboard.json rename to OracleAccessManagement/kubernetes/monitoring-service/config/weblogic-server-dashboard-import.json diff --git a/OracleAccessManagement/kubernetes/monitoring-service/config/weblogic-server-dashboard.json b/OracleAccessManagement/kubernetes/monitoring-service/config/weblogic-server-dashboard.json new file mode 100644 index 000000000..23961d230 --- /dev/null +++ b/OracleAccessManagement/kubernetes/monitoring-service/config/weblogic-server-dashboard.json @@ -0,0 +1,3315 @@ +{ + "dashboard": { + "__inputs": [], + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "5.2.4" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph", + "version": "5.0.0" + }, + { + "type": "panel", + "id": "singlestat", + "name": "Singlestat", + "version": "5.0.0" + }, + { + "type": "panel", + "id": "table", + "name": "Table", + "version": "5.0.0" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": null, + "iteration": 1563266678971, + "links": [], + "panels": [ + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 32, + "panels": [], + "title": "Servers", + "type": "row" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "decimals": 0, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 13, + "x": 0, + "y": 1 + }, + "hideTimeOverride": true, + "id": 16, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "count(count (wls_jvm_uptime{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\"}) by (name))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{weblogic_serverName}}", + "refId": "A" + } + ], + "thresholds": "", + "timeFrom": null, + "timeShift": null, + "title": "Running Servers", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 11, + "x": 13, + "y": 1 + }, + "id": 23, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "count(count(wls_webapp_config_deployment_state{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\"}) by (app))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "timeFrom": null, + "timeShift": null, + "title": "Deployed Applications", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "decimals": 1, + "description": "", + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 6, + "x": 0, + "y": 4 + }, + "hideTimeOverride": true, + "id": 104, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "repeat": "serverName", + "repeatDirection": "v", + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "weblogic_serverName", + "targets": [ + { + "expr": "wls_server_activation_time{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\",weblogic_serverName=\"$serverName\"}", + "format": "table", + "hide": false, + "instant": true, + "interval": "10s", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "50,80", + "timeFrom": null, + "timeShift": null, + "title": "Server Name", + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "", + "value": "" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "#56A64B", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 6, + "y": 4 + }, + "id": 84, + "interval": "", + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "repeat": "serverName", + "repeatDirection": "v", + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "wls_server_state_val{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=\"$serverName\"}", + "format": "time_series", + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": "", + "timeFrom": null, + "timeShift": null, + "title": "Server Status", + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "SHUTDOWN", + "value": "0" + }, + { + "op": "=", + "text": "STARTING", + "value": "1" + }, + { + "op": "=", + "text": "RUNNING", + "value": "2" + }, + { + "op": "=", + "text": "STANDBY", + "value": "3" + }, + { + "op": "=", + "text": "FAILED", + "value": "8" + }, + { + "op": "=", + "text": "FAILED", + "value": "17" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "decimals": 1, + "description": "", + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 10, + "y": 4 + }, + "hideTimeOverride": true, + "id": 27, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "repeat": "serverName", + "repeatDirection": "v", + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "instance", + "targets": [ + { + "expr": "100 - wls_jvm_heap_free_percent{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=\"$serverName\"}", + "format": "time_series", + "hide": false, + "instant": true, + "interval": "10s", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "50,80", + "timeFrom": null, + "timeShift": null, + "title": "Heap Usage", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "", + "value": "" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorPostfix": false, + "colorValue": true, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "decimals": 1, + "description": "", + "format": "ms", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 5, + "x": 14, + "y": 4 + }, + "hideTimeOverride": true, + "id": 91, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "repeat": "serverName", + "repeatDirection": "v", + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "instance", + "targets": [ + { + "expr": "wls_jvm_uptime{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=\"$serverName\"}", + "format": "time_series", + "hide": false, + "instant": true, + "interval": "10s", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "50,80", + "timeFrom": null, + "timeShift": null, + "title": "Running Time", + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "", + "value": "" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorPostfix": false, + "colorValue": true, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "decimals": 0, + "description": "", + "format": "short", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 5, + "x": 19, + "y": 4 + }, + "hideTimeOverride": true, + "id": 96, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "repeat": "serverName", + "repeatDirection": "v", + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "instance", + "targets": [ + { + "expr": "wls_server_open_sockets_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=\"$serverName\"}", + "format": "time_series", + "hide": false, + "instant": true, + "interval": "10s", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "50,80", + "timeFrom": null, + "timeShift": null, + "title": "Open Sockets", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "", + "value": "" + } + ], + "valueName": "current" + }, + { + "aliasColors": { + " heap free managed-server-1": "super-light-green", + " heap free managed-server-2": "dark-green", + "heap size managed-server-1 ": "super-light-red", + "heap size managed-server-2 ": "dark-red" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 10, + "w": 24, + "x": 0, + "y": 7 + }, + "id": 12, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "wls_jvm_heap_free_current{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": " Heap Free ({{weblogic_serverName}})", + "refId": "B" + }, + { + "expr": "wls_jvm_heap_size_current{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", + "format": "time_series", + "instant": false, + "intervalFactor": 1, + "legendFormat": "Heap Size ({{weblogic_serverName}})", + "refId": "A" + }, + { + "expr": "wls_jvm_heap_size_max{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", + "format": "time_series", + "hide": true, + "intervalFactor": 1, + "legendFormat": "Heap Max ({{weblogic_serverName}})", + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "JVM Heap", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "decbytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + " heap free managed-server-1": "super-light-green", + " heap free managed-server-2": "dark-green", + "heap size managed-server-1 ": "super-light-red", + "heap size managed-server-2 ": "dark-red" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 17 + }, + "id": 21, + "legend": { + "alignAsTable": false, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "wls_jvm_process_cpu_load{weblogic_domainUID=~\"$domainName\", weblogic_clusterName=~\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"} * 100", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": " {{weblogic_serverName}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "CPU Load", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percent", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 17 + }, + "id": 10, + "legend": { + "alignAsTable": false, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "wls_threadpool_execute_thread_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Total Threads ({{weblogic_serverName}})", + "refId": "A" + }, + { + "expr": "wls_threadpool_stuck_thread_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Stuck Threads ({{weblogic_serverName}})", + "refId": "D" + }, + { + "expr": "wls_threadpool_queue_length{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", + "format": "time_series", + "hide": true, + "intervalFactor": 1, + "legendFormat": "queue", + "refId": "C" + }, + { + "expr": "wls_threadpool_hogging_thread_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", + "format": "time_series", + "hide": true, + "intervalFactor": 1, + "legendFormat": "hogging", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Thread Pool", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 24 + }, + "id": 35, + "panels": [ + { + "columns": [], + "datasource": "$datasource", + "fontSize": "100%", + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 28 + }, + "hideTimeOverride": true, + "id": 126, + "links": [], + "pageSize": null, + "scroll": true, + "showHeader": true, + "sort": { + "col": 13, + "desc": true + }, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Webapp", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "app", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "Total Sessions", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "hidden", + "unit": "short" + } + ], + "targets": [ + { + "expr": "topk($topN,sum(wls_webapp_config_sessions_opened_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (app))", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "B" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Total Sessions (top $topN)", + "transform": "table", + "type": "table" + }, + { + "columns": [], + "datasource": "$datasource", + "fontSize": "100%", + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 28 + }, + "hideTimeOverride": true, + "id": 136, + "links": [], + "pageSize": null, + "scroll": true, + "showHeader": true, + "sort": { + "col": 13, + "desc": true + }, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Webapp", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "app", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "Total Requests", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "hidden", + "unit": "short" + } + ], + "targets": [ + { + "expr": "topk($topN,sum(wls_servlet_invocation_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (app))", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "B" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Total Requests (top $topN)", + "transform": "table", + "type": "table" + }, + { + "columns": [], + "datasource": "$datasource", + "fontSize": "100%", + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 28 + }, + "hideTimeOverride": true, + "id": 134, + "links": [], + "pageSize": null, + "scroll": true, + "showHeader": true, + "sort": { + "col": 13, + "desc": true + }, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Webapp", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "app", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "Total Time", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value", + "thresholds": [], + "type": "number", + "unit": "ms" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "hidden", + "unit": "short" + } + ], + "targets": [ + { + "expr": "topk($topN,sum(wls_servlet_execution_time_total{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (app))", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "B" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Total Execution Time (top $topN)", + "transform": "table", + "type": "table" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 35 + }, + "id": 14, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(wls_webapp_config_open_sessions_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (app)", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{app}}", + "refId": "A" + }, + { + "expr": "", + "format": "time_series", + "intervalFactor": 1, + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Current Sessions ", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 35 + }, + "id": 128, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": false + }, + "lines": false, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": " sum(irate(wls_webapp_config_sessions_opened_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])) by (app)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{app}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Session Rate ", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": "per second", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 43 + }, + "id": 132, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "(sum(wls_servlet_execution_time_average{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (app)) / (count(wls_servlet_execution_time_average{weblogic_domainUID=\"domain1\", weblogic_clusterName=\"cluster-1\"}) by (app))", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{app}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Execution Time per Request ", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "ms", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 43 + }, + "id": 138, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": false + }, + "lines": false, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(wls_servlet_invocation_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])) by (app)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{app}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Request Rate ", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": "per second", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "title": "Web Applications", + "type": "row" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 25 + }, + "id": 43, + "panels": [ + { + "columns": [], + "datasource": "$datasource", + "fontSize": "100%", + "gridPos": { + "h": 4, + "w": 24, + "x": 0, + "y": 29 + }, + "hideTimeOverride": true, + "id": 111, + "links": [], + "pageSize": null, + "scroll": true, + "showHeader": true, + "sort": { + "col": 0, + "desc": true + }, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Server", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "weblogic_serverName", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "Name", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "name", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "Active Connections", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #A", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Current Capacity", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #C", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Total Connections", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #D", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Total Connections", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #D", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "hidden", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sum(wls_datasource_curr_capacity{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (weblogic_serverName,name)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "C" + }, + { + "expr": "sum(wls_datasource_active_connections_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (weblogic_serverName,name)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "A" + }, + { + "expr": "sum(wls_datasource_connections_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (weblogic_serverName,name)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "D" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Overview", + "transform": "table", + "type": "table" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 8, + "w": 13, + "x": 0, + "y": 33 + }, + "id": 50, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "wls_datasource_active_connections_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": " {{name}} @ {{weblogic_serverName}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Active Connections", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 8, + "w": 11, + "x": 13, + "y": 33 + }, + "id": 71, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "irate(wls_datasource_connections_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": " {{name}} @ {{weblogic_serverName}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Connection Rate", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": "per second", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 9, + "w": 11, + "x": 0, + "y": 41 + }, + "id": 46, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "wls_datasource_waiting_for_connection_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": " {{name}} @ {{weblogic_serverName}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Pending Connection Requests", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 9, + "w": 13, + "x": 11, + "y": 41 + }, + "id": 73, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "wls_datasource_connection_delay_time{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": " {{name}} @ {{weblogic_serverName}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Average Connection Delay Time", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "title": "Data Sources", + "type": "row" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 26 + }, + "id": 40, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 6, + "w": 12, + "x": 0, + "y": 30 + }, + "id": 145, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(wls_jmsruntime_connections_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (weblogic_serverName)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{weblogic_serverName}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "JMS Connections", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 6, + "w": 12, + "x": 12, + "y": 30 + }, + "id": 147, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(wls_jmsruntime_connections_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])) by (weblogic_serverName)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{weblogic_serverName}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "JMS Connection Rate", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "columns": [], + "datasource": "$datasource", + "fontSize": "100%", + "gridPos": { + "h": 4, + "w": 24, + "x": 0, + "y": 36 + }, + "hideTimeOverride": true, + "id": 113, + "links": [], + "pageSize": null, + "scroll": true, + "showHeader": true, + "sort": { + "col": 0, + "desc": true + }, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Name", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "jmsserver", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "Current Dests", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #B", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Total Msgs", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #A", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Total Bytes", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #D", + "thresholds": [], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Total Dests", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #E", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "hidden", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sum(wls_jms_destinations_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "B" + }, + { + "expr": "sum(wls_jms_messages_received_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "A" + }, + { + "expr": "sum(wls_jms_bytes_received_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "D" + }, + { + "expr": "sum(wls_jms_destinations_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "E" + }, + { + "expr": "sum(wls_jms_destinations_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "F" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "JMSServer Overview", + "transform": "table", + "type": "table" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 40 + }, + "id": 54, + "legend": { + "alignAsTable": false, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(wls_jms_messages_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Current ({{jmsserver}})", + "refId": "A" + }, + { + "expr": "sum(wls_jms_messages_pending_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Pending ({{jmsserver}})", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Messages", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 40 + }, + "id": 56, + "legend": { + "alignAsTable": false, + "avg": false, + "current": true, + "max": false, + "min": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(wls_jms_bytes_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Current ({{jmsserver}})", + "refId": "A" + }, + { + "expr": "sum(wls_jms_bytes_pending_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Pending ({{jmsserver}})", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Bytes", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 47 + }, + "id": 58, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(wls_jms_messages_received_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])) by (jmsserver)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{jmsserver}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Received Message Rate ", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 47 + }, + "id": 117, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(wls_jms_bytes_received_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])) by (jmsserver)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{jmsserver}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Received Byte Rate", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "columns": [], + "datasource": "$datasource", + "fontSize": "100%", + "gridPos": { + "h": 5, + "w": 24, + "x": 0, + "y": 54 + }, + "hideTimeOverride": true, + "id": 119, + "links": [], + "pageSize": null, + "scroll": true, + "showHeader": true, + "sort": { + "col": 3, + "desc": false + }, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Destination", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "destination", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "Current Consumers", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #A", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Current Msgs", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #B", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Pending Msgs", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #C", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Currrent Bytes", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #D", + "thresholds": [], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Pending Bytes", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #E", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Total Msgs", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #F", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Total Bytes", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #G", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "hidden", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sum(wls_jms_dest_consumers_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (destination)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "A" + }, + { + "expr": "sum(wls_jms_dest_messages_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (destination)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "B" + }, + { + "expr": "sum(wls_jms_dest_messages_pending_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (destination)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "C" + }, + { + "expr": "sum(wls_jms_dest_bytes_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (destination)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "D" + }, + { + "expr": "sum(wls_jms_dest_bytes_pending_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (destination)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "E" + }, + { + "expr": "sum(wls_jms_dest_messages_received_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (destination)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 1, + "refId": "F" + }, + { + "expr": "sum(wls_jms_dest_bytes_received_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (destination)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "G" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Destinations Overview", + "transform": "table", + "type": "table" + } + ], + "title": "JMS Services", + "type": "row" + } + ], + "refresh": false, + "schemaVersion": 16, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "current": { + "text": "prometheus", + "value": "prometheus" + }, + "hide": 0, + "label": null, + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": {}, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "Domain", + "multi": false, + "name": "domainName", + "options": [], + "query": "label_values(weblogic_domainUID)", + "refresh": 1, + "regex": "", + "sort": 2, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": {}, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "Cluster", + "multi": false, + "name": "clusterName", + "options": [], + "query": "label_values(wls_jvm_uptime{weblogic_domainUID=\"$domainName\"},weblogic_clusterName)", + "refresh": 1, + "regex": "", + "sort": 2, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": {}, + "datasource": "$datasource", + "hide": 0, + "includeAll": true, + "label": "Server", + "multi": true, + "name": "serverName", + "options": [], + "query": "label_values(wls_jvm_uptime{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\"},weblogic_serverName)", + "refresh": 1, + "regex": "", + "sort": 2, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "selected": false, + "tags": [], + "text": "5", + "value": "5" + }, + "hide": 0, + "includeAll": false, + "label": "Top N", + "multi": false, + "name": "topN", + "options": [ + { + "selected": false, + "text": "3", + "value": "3" + }, + { + "selected": true, + "text": "5", + "value": "5" + }, + { + "selected": false, + "text": "7", + "value": "7" + }, + { + "selected": false, + "text": "10", + "value": "10" + } + ], + "query": "3, 5, 7, 10", + "skipUrlSync": false, + "type": "custom" + } + ] + }, + "time": { + "from": "now-30m", + "to": "now" + }, + "timepicker": { + "hidden": false, + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "WebLogic Server Dashboard", + "uid": "5yUwzbZWz", + "version": 6 + } +} + diff --git a/OracleAccessManagement/kubernetes/monitoring-service/config/weblogic.xml b/OracleAccessManagement/kubernetes/monitoring-service/config/weblogic.xml new file mode 100755 index 000000000..c4e2df0c5 --- /dev/null +++ b/OracleAccessManagement/kubernetes/monitoring-service/config/weblogic.xml @@ -0,0 +1,18 @@ + + + + wls-exporter + + + com.google.gson.* + javax.inject.* + org.apache.commons.* + org.apache.http.* + org.glassfish.hk2.* + org.jvnet.hk2.* + org.jvnet.tiger_types.* + org.yaml.snakeyaml.* + + + + diff --git a/OracleAccessManagement/kubernetes/monitoring-service/delete-monitoring.sh b/OracleAccessManagement/kubernetes/monitoring-service/delete-monitoring.sh new file mode 100755 index 000000000..b676e9b40 --- /dev/null +++ b/OracleAccessManagement/kubernetes/monitoring-service/delete-monitoring.sh @@ -0,0 +1,122 @@ +#!/bin/bash +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# delete-monitoring.sh + +# Initialize +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" +OLD_PWD=`pwd` + + +# +## Function to exit and print an error message +## $1 - text of message +function fail { + printError $* + exit 1 +} + +# Function to print an error message +function removeFileIfExists { + echo "input is $1" + if [ -f $1 ]; then + rm -f $1 + fi +} + +function exitIfError { + if [ "$1" != "0" ]; then + echo "$2" + exit $1 + fi +} +# +# Function to parse a yaml file and generate the bash exports +# $1 - Input filename +# $2 - Output filename +function parseYaml { + local s='[[:space:]]*' w='[a-zA-Z0-9_]*' fs=$(echo @|tr @ '\034') + sed -ne "s|^\($s\):|\1|" \ + -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \ + -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $1 | + awk -F$fs '{ + if (length($3) > 0) { + # javaOptions may contain tokens that are not allowed in export command + # we need to handle it differently. + if ($2=="javaOptions") { + printf("%s=%s\n", $2, $3); + } else { + printf("export %s=\"%s\"\n", $2, $3); + } + } + }' > $2 +} + +function usage { + echo usage: ${script} -i file [-v] [-h] + echo " -i Parameter inputs file, must be specified." + echo " -h Help" + exit $1 +} + + +function deleteKubePrometheusStack { + helm delete ${monitoringNamespace} --namespace ${monitoringNamespace} +} + +#Parse the inputs +while getopts "hi:" opt; do + case $opt in + i) valuesInputFile="${OPTARG}" + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +if [ -z ${valuesInputFile} ]; then + echo "${script}: -i must be specified." + missingRequiredOption="true" +fi + +if [ "${missingRequiredOption}" == "true" ]; then + usage 1 +fi + +if [ ! -f ${valuesInputFile} ]; then + echo "Unable to locate the input parameters file ${valuesInputFile}" + fail 'The error listed above must be resolved before the script can continue' +fi + +exportValuesFile=$(mktemp /tmp/export-values-XXXXXXXXX.sh) +parseYaml ${valuesInputFile} ${exportValuesFile} + + +source ${exportValuesFile} +rm ${exportValuesFile} + +# Setting up the WebLogic Monitoring Exporter + +echo "Undeploy WebLogic Monitoring Exporter started" +serviceMonitor=${scriptDir}/manifests/wls-exporter-ServiceMonitor.yaml +kubectl delete --ignore-not-found=true -f ${serviceMonitor} +script=${scriptDir}/scripts/undeploy-weblogic-monitoring-exporter.sh +sh ${script} +if [ "$?" != "0" ]; then + echo "ERROR: $script failed." + echo "Undeploy WebLogic Monitoring Exporter completed with errors. Review the logs and rerun" +else + echo "Undeploy WebLogic Monitoring Exporter completed." +fi + +if [ "${setupKubePrometheusStack}" = "true" ]; then + echo "Deleting Prometheus and grafana started" + deleteKubePrometheusStack + echo "Deleting Prometheus and grafana completed" +fi +cd $OLD_PWD + diff --git a/OracleAccessManagement/kubernetes/monitoring-service/manifests/prometheus-roleBinding-domain-namespace.yaml b/OracleAccessManagement/kubernetes/monitoring-service/manifests/prometheus-roleBinding-domain-namespace.yaml new file mode 100755 index 000000000..e37b9830f --- /dev/null +++ b/OracleAccessManagement/kubernetes/monitoring-service/manifests/prometheus-roleBinding-domain-namespace.yaml @@ -0,0 +1,20 @@ +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +apiVersion: rbac.authorization.k8s.io/v1 +items: +- apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: prometheus-k8s + namespace: oamns + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: prometheus-k8s + subjects: + - kind: ServiceAccount + name: prometheus-k8s + namespace: monitoring +kind: RoleBindingList + diff --git a/OracleAccessManagement/kubernetes/monitoring-service/manifests/prometheus-roleSpecific-domain-namespace.yaml b/OracleAccessManagement/kubernetes/monitoring-service/manifests/prometheus-roleSpecific-domain-namespace.yaml new file mode 100755 index 000000000..a881c8647 --- /dev/null +++ b/OracleAccessManagement/kubernetes/monitoring-service/manifests/prometheus-roleSpecific-domain-namespace.yaml @@ -0,0 +1,23 @@ +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +apiVersion: rbac.authorization.k8s.io/v1 +items: +- apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: prometheus-k8s + namespace: oamns + rules: + - apiGroups: + - "" + resources: + - services + - endpoints + - pods + verbs: + - get + - list + - watch +kind: RoleList + diff --git a/OracleAccessManagement/kubernetes/monitoring-service/manifests/wls-exporter-ServiceMonitor.yaml b/OracleAccessManagement/kubernetes/monitoring-service/manifests/wls-exporter-ServiceMonitor.yaml new file mode 100755 index 000000000..be289f234 --- /dev/null +++ b/OracleAccessManagement/kubernetes/monitoring-service/manifests/wls-exporter-ServiceMonitor.yaml @@ -0,0 +1,44 @@ +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +apiVersion: v1 +kind: Secret +metadata: + name: basic-auth + namespace: oamns +data: + password: d2VsY29tZTE= + user: d2VibG9naWM= +type: Opaque +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: wls-exporter + namespace: oamns + labels: + k8s-app: wls-exporter + release: monitoring +spec: + namespaceSelector: + matchNames: + - oamns + selector: + matchLabels: + weblogic.domainName: accessdomain + endpoints: + - basicAuth: + password: + name: basic-auth + key: password + username: + name: basic-auth + key: user + port: default + relabelings: + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + interval: 10s + honorLabels: true + path: /wls-exporter/metrics + diff --git a/OracleAccessManagement/kubernetes/monitoring-service/manifests/wls-exporter-ServiceMonitor.yaml.template b/OracleAccessManagement/kubernetes/monitoring-service/manifests/wls-exporter-ServiceMonitor.yaml.template new file mode 100755 index 000000000..87d9e4088 --- /dev/null +++ b/OracleAccessManagement/kubernetes/monitoring-service/manifests/wls-exporter-ServiceMonitor.yaml.template @@ -0,0 +1,44 @@ +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +apiVersion: v1 +kind: Secret +metadata: + name: basic-auth + namespace: accessns +data: + password: %PASSWORD% + user: %USERNAME% +type: Opaque +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: wls-exporter + namespace: accessns + labels: + k8s-app: wls-exporter + release: monitoring +spec: + namespaceSelector: + matchNames: + - accessns + selector: + matchLabels: + weblogic.domainName: accessinfra + endpoints: + - basicAuth: + password: + name: basic-auth + key: password + username: + name: basic-auth + key: user + port: default + relabelings: + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + interval: 10s + honorLabels: true + path: /wls-exporter/metrics + diff --git a/OracleAccessManagement/kubernetes/monitoring-service/monitoring-inputs.yaml b/OracleAccessManagement/kubernetes/monitoring-service/monitoring-inputs.yaml new file mode 100755 index 000000000..dd2386588 --- /dev/null +++ b/OracleAccessManagement/kubernetes/monitoring-service/monitoring-inputs.yaml @@ -0,0 +1,65 @@ +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# The version of this inputs file. Do not modify. +version: create-accessdomain-monitoring-inputs-v1 + +# Unique ID identifying your domain. +# This ID must not contain an underscope ("_"), and must be lowercase and unique across all domains in a Kubernetes cluster. +domainUID: accessdomain + +# Name of the domain namespace +domainNamespace: oamns + +# Boolean value indicating whether to install kube-prometheus-stack +setupKubePrometheusStack: true + +# Additional parameters for helm install kube-prometheus-stack +# Refer https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/values.yaml for additional parameters +# Sample : +# additionalParamForKubePrometheusStack: --set nodeExporter.enabled=false --set prometheusOperator.tls.enabled=false --set prometheusOperator.admissionWebhooks.enabled=false +additionalParamForKubePrometheusStack: + +# Name of the monitoring namespace +monitoringNamespace: monitoring + +# Name of the Admin Server +adminServerName: AdminServer +# +# Port number for admin server +adminServerPort: 7001 + +# Cluster name +oamClusterName: oam_cluster + +# Port number for managed server +oamManagedServerPort: 14100 + +# WebLogic Monitoring Exporter to Cluster +wlsMonitoringExporterTooamCluster: true + +# Cluster name +policyClusterName: policy_cluster + +# Port number for managed server +policyManagedServerPort: 15100 + +# WebLogic Monitoring Exporter to Cluster +wlsMonitoringExporterTopolicyCluster: true + + +# Boolean to indicate if the adminNodePort will be exposed +exposeMonitoringNodePort: true + +# NodePort to expose Prometheus +prometheusNodePort: 32101 + +# NodePort to expose Grafana +grafanaNodePort: 32100 + +# NodePort to expose Alertmanager +alertmanagerNodePort: 32102 + +# Name of the Kubernetes secret for the Admin Server's username and password +weblogicCredentialsSecretName: accessdomain-domain-credentials + diff --git a/OracleAccessManagement/kubernetes/monitoring-service/scripts/deploy-weblogic-monitoring-exporter.py b/OracleAccessManagement/kubernetes/monitoring-service/scripts/deploy-weblogic-monitoring-exporter.py new file mode 100755 index 000000000..24f9f8334 --- /dev/null +++ b/OracleAccessManagement/kubernetes/monitoring-service/scripts/deploy-weblogic-monitoring-exporter.py @@ -0,0 +1,105 @@ +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +import sys +#======================================================= +# Function for fresh plain deployment +#======================================================= +def newDeploy(appName,target): + try: + print 'Deploying .........' + deploy(appName,'/u01/oracle/wls-exporter-deploy/'+appName+'.war', target, upload="true",remote="true") + startApplication(appName) + except Exception, ex: + print ex.toString() + +#======================================================== +# Main program here... +# Target you can change as per your need +#======================================================== + +def usage(): + argsList = ' -domainName -adminServerName -adminURL -username -password ' + argsList=argsList + ' -oamClusterName ' + ' -wlsMonitoringExporterTooamCluster ' + argsList=argsList + ' -policyClusterName ' + ' -wlsMonitoringExporterTopolicyCluster ' + print sys.argv[0] + argsList + sys.exit(0) + +if len(sys.argv) < 1: + usage() + +# domainName will be passed by command line parameter -domainName. +domainName = "accessdomain" + +# adminServerName will be passed by command line parameter -adminServerName +adminServerName = "AdminServer" + +# adminURL will be passed by command line parameter -adminURL +adminURL = "accessdomain-adminserver:7001" + +# oamClusterName will be passed by command line parameter -oamClusterName +oamClusterName = "oam_cluster" + +# wlsMonitoringExporterTooamCluster will be passed by command line parameter -wlsMonitoringExporterTooamCluster +wlsMonitoringExporterTooamCluster = "true" + + +# policyClusterName will be passed by command line parameter -policyClusterName +policyClusterName = "policy_cluster" + +# wlsMonitoringExporterTopolicyCluster will be passed by command line parameter -wlsMonitoringExporterTopolicyCluster +wlsMonitoringExporterTopolicyCluster = "true" + +# username will be passed by command line parameter -username +username = "weblogic" + +# password will be passed by command line parameter -password +password = "welcome1" + +i=1 +while i < len(sys.argv): + if sys.argv[i] == '-domainName': + domainName = sys.argv[i+1] + i += 2 + elif sys.argv[i] == '-adminServerName': + adminServerName = sys.argv[i+1] + i += 2 + elif sys.argv[i] == '-adminURL': + adminURL = sys.argv[i+1] + i += 2 + elif sys.argv[i] == '-username': + username = sys.argv[i+1] + i += 2 + elif sys.argv[i] == '-password': + password = sys.argv[i+1] + i += 2 + elif sys.argv[i] == '-oamClusterName': + oamClusterName = sys.argv[i+1] + i += 2 + elif sys.argv[i] == '-wlsMonitoringExporterTooamCluster': + wlsMonitoringExporterTooamCluster = sys.argv[i+1] + i += 2 + elif sys.argv[i] == '-policyClusterName': + policyClusterName = sys.argv[i+1] + i += 2 + elif sys.argv[i] == '-wlsMonitoringExporterTopolicyCluster': + wlsMonitoringExporterTopolicyCluster = sys.argv[i+1] + i += 2 + else: + print 'Unexpected argument switch at position ' + str(i) + ': ' + str(sys.argv[i]) + usage() + sys.exit(1) + +# Deployment +connect(username, password, 't3://' + adminURL) +cd('AppDeployments') +newDeploy('wls-exporter-adminserver',adminServerName) +if 'true' == wlsMonitoringExporterTooamCluster: + newDeploy('wls-exporter-oam',oamClusterName) + +if 'true' == wlsMonitoringExporterTopolicyCluster: + newDeploy('wls-exporter-policy',policyClusterName) + +disconnect() +exit() + diff --git a/OracleAccessManagement/kubernetes/monitoring-service/scripts/deploy-weblogic-monitoring-exporter.sh b/OracleAccessManagement/kubernetes/monitoring-service/scripts/deploy-weblogic-monitoring-exporter.sh new file mode 100755 index 000000000..21bfa7a80 --- /dev/null +++ b/OracleAccessManagement/kubernetes/monitoring-service/scripts/deploy-weblogic-monitoring-exporter.sh @@ -0,0 +1,37 @@ +#!/bin/bash +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# Initialize +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" +warDir=$PWD +source ${scriptDir}/utils.sh + +# Setting default values +initialize +# Function to lowercase a value and make it a legal DNS1123 name +# $1 - value to convert to lowercase +function toDNS1123Legal { + local val=`echo $1 | tr "[:upper:]" "[:lower:]"` + val=${val//"_"/"-"} + echo "$val" +} + +# username and password from Kubernetes secret +username=`kubectl get secrets ${weblogicCredentialsSecretName} -n ${domainNamespace} -o=jsonpath='{.data.username}'|base64 --decode` +password=`kubectl get secrets ${weblogicCredentialsSecretName} -n ${domainNamespace} -o=jsonpath='{.data.password}'|base64 --decode` + +adminServerPodName="${domainUID}-$(toDNS1123Legal ${adminServerName})" + +InputParameterList=" -domainName ${domainUID} -adminServerName ${adminServerName} -adminURL ${adminServerPodName}:${adminServerPort} -username ${username} -password ${password}" +InputParameterList="${InputParameterList} -oamClusterName ${oamClusterName} -wlsMonitoringExporterTooamCluster ${wlsMonitoringExporterTooamCluster}" +InputParameterList="${InputParameterList} -policyClusterName ${policyClusterName} -wlsMonitoringExporterTopolicyCluster ${wlsMonitoringExporterTopolicyCluster}" + +echo "Deploying WebLogic Monitoring Exporter with domainNamespace[$domainNamespace], domainUID[$domainUID], adminServerPodName[$adminServerPodName]" +. $scriptDir/get-wls-exporter.sh +kubectl cp $scriptDir/wls-exporter-deploy ${domainNamespace}/${adminServerPodName}:/u01/oracle +kubectl cp $scriptDir/deploy-weblogic-monitoring-exporter.py ${domainNamespace}/${adminServerPodName}:/u01/oracle/wls-exporter-deploy +EXEC_DEPLOY="kubectl exec -it -n ${domainNamespace} ${adminServerPodName} -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/deploy-weblogic-monitoring-exporter.py ${InputParameterList}" +eval ${EXEC_DEPLOY} + diff --git a/OracleAccessManagement/kubernetes/monitoring-service/scripts/get-wls-exporter.sh b/OracleAccessManagement/kubernetes/monitoring-service/scripts/get-wls-exporter.sh new file mode 100755 index 000000000..3f880f8e7 --- /dev/null +++ b/OracleAccessManagement/kubernetes/monitoring-service/scripts/get-wls-exporter.sh @@ -0,0 +1,46 @@ +#!/bin/bash +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# Initialize +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" +source ${scriptDir}/utils.sh +warDir=$scriptDir/../bin +mkdir -p $warDir +curl -L -o $warDir/wls-exporter.war https://github.com/oracle/weblogic-monitoring-exporter/releases/download/v2.0.0/wls-exporter.war +mkdir -p $scriptDir/wls-exporter-deploy +echo "created $scriptDir/wls-exporter-deploy dir" + +function update_wls_exporter_war { + servername=$1 + port=$2 + tmp_dir=$(mktemp -d -t ci-XXXXXXXXXX) + echo "created $tmp_dir" + mkdir -p $tmp_dir/WEB-INF + cp $scriptDir/../config/config.yml.template $tmp_dir/config.yml + cp $scriptDir/../config/weblogic.xml $tmp_dir/WEB-INF/weblogic.xml + cp $warDir/wls-exporter.war $tmp_dir/wls-exporter.war + + sed -i -e "s:%PORT%:${port}:g" $tmp_dir/config.yml + pushd $tmp_dir + echo "in temp dir" + zip wls-exporter.war WEB-INF/weblogic.xml + zip wls-exporter.war config.yml + + cp wls-exporter.war ${scriptDir}/wls-exporter-deploy/wls-exporter-${servername}.war + popd +} + +initialize + +update_wls_exporter_war adminserver ${adminServerPort} +if [[ ${wlsMonitoringExporterTooamCluster} == "true" ]]; +then + update_wls_exporter_war oam ${oamManagedServerPort} +fi +if [[ ${wlsMonitoringExporterTopolicyCluster} == "true" ]]; +then + update_wls_exporter_war policy ${policyManagedServerPort} +fi + diff --git a/OracleAccessManagement/kubernetes/monitoring-service/scripts/undeploy-weblogic-monitoring-exporter.py b/OracleAccessManagement/kubernetes/monitoring-service/scripts/undeploy-weblogic-monitoring-exporter.py new file mode 100755 index 000000000..b06988469 --- /dev/null +++ b/OracleAccessManagement/kubernetes/monitoring-service/scripts/undeploy-weblogic-monitoring-exporter.py @@ -0,0 +1,103 @@ +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +import sys +#======================================================= +# Function for undeployment +#======================================================= +def unDeploy(appName,target): + print 'Undeploying .........' + try: + stopApplication(appName) + undeploy(appName, target) + except Exception, ex: + print ex.toString() + +#======================================================== +# Main program here... +# Target you can change as per your need +#======================================================== +def usage(): + argsList = ' -domainName -adminServerName -adminURL -username -password ' + argsList=argsList + ' -oamClusterName ' + ' -wlsMonitoringExporterTooamCluster ' + argsList=argsList + ' -policyClusterName ' + ' -wlsMonitoringExporterTopolicyCluster ' + print sys.argv[0] + argsList + sys.exit(0) + +if len(sys.argv) < 1: + usage() + +# domainName will be passed by command line parameter -domainName. +domainName = "accessdomain" + +# adminServerName will be passed by command line parameter -adminServerName +adminServerName = "AdminServer" + +# adminURL will be passed by command line parameter -adminURL +adminURL = "accessdomain-adminserver:7001" + +# oamClusterName will be passed by command line parameter -oamClusterName +oamClusterName = "oam_cluster" + +# wlsMonitoringExporterTooamCluster will be passed by command line parameter -wlsMonitoringExporterTooamCluster +wlsMonitoringExporterTooamCluster = "true" +# policyClusterName will be passed by command line parameter -policyClusterName +policyClusterName = "policy_cluster" + +# wlsMonitoringExporterTopolicyCluster will be passed by command line parameter -wlsMonitoringExporterTopolicyCluster +wlsMonitoringExporterTopolicyCluster = "true" + +# username will be passed by command line parameter -username +username = "weblogic" + +# password will be passed by command line parameter -password +password = "welcome1" + + +i=1 +while i < len(sys.argv): + if sys.argv[i] == '-domainName': + domainName = sys.argv[i+1] + i += 2 + elif sys.argv[i] == '-adminServerName': + adminServerName = sys.argv[i+1] + i += 2 + elif sys.argv[i] == '-adminURL': + adminURL = sys.argv[i+1] + i += 2 + elif sys.argv[i] == '-username': + username = sys.argv[i+1] + i += 2 + elif sys.argv[i] == '-password': + password = sys.argv[i+1] + i += 2 + elif sys.argv[i] == '-oamClusterName': + oamClusterName = sys.argv[i+1] + i += 2 + elif sys.argv[i] == '-wlsMonitoringExporterTooamCluster': + wlsMonitoringExporterTooamCluster = sys.argv[i+1] + i += 2 + elif sys.argv[i] == '-policyClusterName': + policyClusterName = sys.argv[i+1] + i += 2 + elif sys.argv[i] == '-wlsMonitoringExporterTopolicyCluster': + wlsMonitoringExporterTopolicyCluster = sys.argv[i+1] + i += 2 + + else: + print 'Unexpected argument switch at position ' + str(i) + ': ' + str(sys.argv[i]) + usage() + sys.exit(1) + +# Undeploy +connect(username, password, 't3://' + adminURL) +unDeploy('wls-exporter-adminserver',adminServerName) +if 'true' == wlsMonitoringExporterTooamCluster: + unDeploy('wls-exporter-oam',oamClusterName) + +if 'true' == wlsMonitoringExporterTopolicyCluster: + unDeploy('wls-exporter-policy',policyClusterName) + +disconnect() +exit() + diff --git a/OracleAccessManagement/kubernetes/monitoring-service/scripts/undeploy-weblogic-monitoring-exporter.sh b/OracleAccessManagement/kubernetes/monitoring-service/scripts/undeploy-weblogic-monitoring-exporter.sh new file mode 100755 index 000000000..33cdf72dc --- /dev/null +++ b/OracleAccessManagement/kubernetes/monitoring-service/scripts/undeploy-weblogic-monitoring-exporter.sh @@ -0,0 +1,39 @@ +#!/bin/bash +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# Initialize +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" +source ${scriptDir}/utils.sh + +# Function to lowercase a value and make it a legal DNS1123 name +# $1 - value to convert to lowercase +function toDNS1123Legal { + local val=`echo $1 | tr "[:upper:]" "[:lower:]"` + val=${val//"_"/"-"} + echo "$val" +} + +initialize + +# username and password from Kubernetes secret +username=`kubectl get secrets ${weblogicCredentialsSecretName} -n ${domainNamespace} -o=jsonpath='{.data.username}'|base64 --decode` +password=`kubectl get secrets ${weblogicCredentialsSecretName} -n ${domainNamespace} -o=jsonpath='{.data.password}'|base64 --decode` + +adminServerPodName="${domainUID}-$(toDNS1123Legal ${adminServerName})" + +InputParameterList="-domainName ${domainUID} -adminServerName ${adminServerName} -adminURL ${adminServerPodName}:${adminServerPort} -username ${username} -password ${password}" +InputParameterList="${InputParameterList} -oamClusterName ${oamClusterName} -wlsMonitoringExporterTooamCluster ${wlsMonitoringExporterTooamCluster}" +InputParameterList="${InputParameterList} -policyClusterName ${policyClusterName} -wlsMonitoringExporterTopolicyCluster ${wlsMonitoringExporterTopolicyCluster}" + +# Copy weblogic monitoring exporter jars for deployment +echo "Undeploying WebLogic Monitoring Exporter: domainNamespace[$domainNamespace], domainUID[$domainUID], adminServerPodName[$adminServerPodName]" + +kubectl cp $scriptDir/undeploy-weblogic-monitoring-exporter.py ${domainNamespace}/${adminServerPodName}:/u01/oracle/undeploy-weblogic-monitoring-exporter.py +EXEC_UNDEPLOY="kubectl exec -it -n ${domainNamespace} ${adminServerPodName} -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/undeploy-weblogic-monitoring-exporter.py ${InputParameterList}" +eval ${EXEC_UNDEPLOY} + +# Cleanup the local wars +rm -rf ${scriptDir}/wls-exporter-deploy + diff --git a/OracleAccessManagement/kubernetes/monitoring-service/scripts/utils.sh b/OracleAccessManagement/kubernetes/monitoring-service/scripts/utils.sh new file mode 100755 index 000000000..b3799563b --- /dev/null +++ b/OracleAccessManagement/kubernetes/monitoring-service/scripts/utils.sh @@ -0,0 +1,61 @@ +#!/bin/bash +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# + +function initialize { + if [ -z ${domainNamespace} ]; then + echo "domainNamespace is empty, setting to default oamns" + domainNamespace="oamns" + fi + + if [ -z ${domainUID} ]; then + echo "domainUID is empty, setting to default accessdomain" + domainUID="accessdomain" + fi + + if [ -z ${weblogicCredentialsSecretName} ]; then + echo "weblogicCredentialsSecretName is empty, setting to default \"accessdomain-domain-credentials\"" + weblogicCredentialsSecretName="accessdomain-domain-credentials" + fi + + if [ -z ${adminServerName} ]; then + echo "adminServerName is empty, setting to default \"AdminServer\"" + adminServerName="AdminServer" + fi + + if [ -z ${adminServerPort} ]; then + echo "adminServerPort is empty, setting to default \"7001\"" + adminServerPort="7001" + fi + + if [ -z ${oamClusterName} ]; then + echo "oamClusterName is empty, setting to default \"oam_cluster\"" + oamClusterName="oam_cluster" + fi + + if [ -z ${oamManagedServerPort} ]; then + echo "oamManagedServerPort is empty, setting to default \"14100\"" + oamManagedServerPort="14100" + fi + + if [ -z ${wlsMonitoringExporterTooamCluster} ]; then + echo "wlsMonitoringExporterTooamCluster is empty, setting to default \"false\"" + wlsMonitoringExporterTooamCluster="true" + fi + if [ -z ${policyClusterName} ]; then + echo "policyClusterName is empty, setting to default \"policy_cluster\"" + policyClusterName="policy_cluster" + fi + + if [ -z ${policyManagedServerPort} ]; then + echo "policyManagedServerPort is empty, setting to default \"15100\"" + policyManagedServerPort="15100" + fi + + if [ -z ${wlsMonitoringExporterTopolicyCluster} ]; then + echo "wlsMonitoringExporterTopolicyCluster is empty, setting to default \"false\"" + wlsMonitoringExporterTopolicyCluster="true" + fi +} + diff --git a/OracleAccessManagement/kubernetes/monitoring-service/setup-monitoring.sh b/OracleAccessManagement/kubernetes/monitoring-service/setup-monitoring.sh new file mode 100755 index 000000000..c36b4bb82 --- /dev/null +++ b/OracleAccessManagement/kubernetes/monitoring-service/setup-monitoring.sh @@ -0,0 +1,192 @@ +#!/bin/bash +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# setup-monitoring.sh + +# Initialize +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" +OLD_PWD=`pwd` + + + +# +# Function to exit and print an error message +# $1 - text of message +function fail { + printError $* + exit 1 +} + +# Function to print an error message +function printError { + echo [ERROR] $* +} + + +# +# Function to remove a file if it exists +# +function removeFileIfExists { + echo "input is $1" + if [ -f $1 ]; then + rm -f $1 + fi +} + +function exitIfError { + if [ "$1" != "0" ]; then + echo "$2" + exit $1 + fi +} + +# +# Function to parse a yaml file and generate the bash exports +# $1 - Input filename +# $2 - Output filename +function parseYaml { + local s='[[:space:]]*' w='[a-zA-Z0-9_]*' fs=$(echo @|tr @ '\034') + sed -ne "s|^\($s\):|\1|" \ + -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \ + -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $1 | + awk -F$fs '{ + if (length($3) > 0) { + # javaOptions may contain tokens that are not allowed in export command + # we need to handle it differently. + if ($2=="javaOptions") { + printf("%s=%s\n", $2, $3); + } else { + printf("export %s=\"%s\"\n", $2, $3); + } + } + }' > $2 +} + +function usage { + echo usage: ${script} -i file [-v] [-h] + echo " -i Parameter inputs file, must be specified." + echo " -h Help" + exit $1 +} + +function installKubePrometheusStack { + helm repo add prometheus-community https://prometheus-community.github.io/helm-charts + helm repo update + echo "Setup prometheus-community/kube-prometheus-stack in progress" + if [ ${exposeMonitoringNodePort} == "true" ]; then + + helm install ${monitoringNamespace} prometheus-community/kube-prometheus-stack \ + --namespace ${monitoringNamespace} \ + --set prometheus.service.type=NodePort --set prometheus.service.nodePort=${prometheusNodePort} \ + --set alertmanager.service.type=NodePort --set alertmanager.service.nodePort=${alertmanagerNodePort} \ + --set grafana.adminPassword=admin --set grafana.service.type=NodePort --set grafana.service.nodePort=${grafanaNodePort} \ + --version "16.5.0" ${additionalParamForKubePrometheusStack} \ + --atomic --wait + else + helm install ${monitoringNamespace} prometheus-community/kube-prometheus-stack \ + --namespace ${monitoringNamespace} \ + --set grafana.adminPassword=admin \ + --version "16.5.0" ${additionalParamForKubePrometheusStack} \ + --atomic --wait + fi + exitIfError $? "ERROR: prometheus-community/kube-prometheus-stack install failed." +} +#Parse the inputs +while getopts "hi:" opt; do + case $opt in + i) valuesInputFile="${OPTARG}" + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +if [ -z ${valuesInputFile} ]; then + echo "${script}: -i must be specified." + missingRequiredOption="true" +fi + +if [ "${missingRequiredOption}" == "true" ]; then + usage 1 +fi + +if [ ! -f ${valuesInputFile} ]; then + echo "Unable to locate the input parameters file ${valuesInputFile}" + fail 'The error listed above must be resolved before the script can continue' +fi + + +exportValuesFile=$(mktemp /tmp/export-values-XXXXXXXXX.sh) +parseYaml ${valuesInputFile} ${exportValuesFile} + + +source ${exportValuesFile} +rm ${exportValuesFile} + + +if [ "${setupKubePrometheusStack}" = "true" ]; then + if test "$(kubectl get namespace ${monitoringNamespace} --ignore-not-found | wc -l)" = 0; then + echo "The namespace ${monitoringNamespace} for install prometheus-community/kube-prometheus-stack does not exist. Creating the namespace ${monitoringNamespace}" + kubectl create namespace ${monitoringNamespace} + fi + echo -e "Monitoring setup in ${monitoringNamespace} in progress\n" + + # Create the namespace and CRDs, and then wait for them to be availble before creating the remaining resources + kubectl label nodes --all kubernetes.io/os=linux --overwrite=true + + echo "Setup prometheus-community/kube-prometheus-stack started" + installKubePrometheusStack + cd $OLD_PWD + + echo "Setup prometheus-community/kube-prometheus-stack completed" +fi + +username=`kubectl get secrets ${weblogicCredentialsSecretName} -n ${domainNamespace} -o=jsonpath='{.data.username}'|base64 --decode` +password=`kubectl get secrets ${weblogicCredentialsSecretName} -n ${domainNamespace} -o=jsonpath='{.data.password}'|base64 --decode` + +# Setting up the WebLogic Monitoring Exporter +echo "Deploy WebLogic Monitoring Exporter started" +script=${scriptDir}/scripts/deploy-weblogic-monitoring-exporter.sh +sh ${script} +exitIfError $? "ERROR: $script failed." +echo "Deploy WebLogic Monitoring Exporter completed" + + +# Deploy servicemonitors +serviceMonitor=${scriptDir}/manifests/wls-exporter-ServiceMonitor.yaml +cp "${serviceMonitor}.template" "${serviceMonitor}" +sed -i -e "s/release: monitoring/release: ${monitoringNamespace}/g" ${serviceMonitor} +sed -i -e "s/user: %USERNAME%/user: `echo -n $username|base64 -w0`/g" ${serviceMonitor} +sed -i -e "s/password: %PASSWORD%/password: `echo -n $password|base64 -w0`/g" ${serviceMonitor} +sed -i -e "s/namespace:.*/namespace: ${domainNamespace}/g" ${serviceMonitor} +sed -i -e "s/weblogic.domainName:.*/weblogic.domainName: ${domainUID}/g" ${serviceMonitor} +sed -i -e "$!N;s/matchNames:\n -.*/matchNames:\n - ${domainNamespace}/g;P;D" ${serviceMonitor} + +kubectl apply -f ${serviceMonitor} + +if [ "${setupKubePrometheusStack}" = "true" ]; then + # Deploying WebLogic Server Grafana Dashboard + echo "Deploying WebLogic Server Grafana Dashboard...." + grafanaEndpointIP=$(kubectl get endpoints ${monitoringNamespace}-grafana -n ${monitoringNamespace} -o=jsonpath="{.subsets[].addresses[].ip}") + grafanaEndpointPort=$(kubectl get endpoints ${monitoringNamespace}-grafana -n ${monitoringNamespace} -o=jsonpath="{.subsets[].ports[].port}") + grafanaEndpoint="${grafanaEndpointIP}:${grafanaEndpointPort}" + curl --noproxy "*" -X POST -H "Content-Type: application/json" -d @config/weblogic-server-dashboard.json http://admin:admin@${grafanaEndpoint}/api/dashboards/db + echo "" + echo "Deployed WebLogic Server Grafana Dashboard successfully" + echo "" + if [ ${exposeMonitoringNodePort} == "true" ]; then + echo "Grafana is available at NodePort: ${grafanaNodePort}" + echo "Prometheus is available at NodePort: ${prometheusNodePort}" + echo "Altermanager is available at NodePort: ${alertmanagerNodePort}" + echo "==============================================================" + fi +else + echo "Please import config/weblogic-server-dashboard.json manually into Grafana" +fi + +echo "" + diff --git a/OracleAccessManagement/kubernetes/rest/README.md b/OracleAccessManagement/kubernetes/rest/README.md new file mode 100755 index 000000000..f0e09b088 --- /dev/null +++ b/OracleAccessManagement/kubernetes/rest/README.md @@ -0,0 +1,38 @@ +# Sample to create certificates and keys for the operator + +When a user enables the operator's external REST API (by setting +`externalRestEnabled` to `true` when installing the operator Helm chart), the user needs +to provide the certificate and private key for api's SSL identity too (by creating a +`tls secret` before the installation of the operator helm chart). + +This sample script generates a self-signed certificate and private key that can be used +for the operator's external REST api when experimenting with the operator. They should +not be used in a production environment. + +The syntax of the script is: +```shell +$ kubernetes/samples/scripts/rest/generate-external-rest-identity.sh -n [-s ] +``` + +Where `` lists the subject alternative names to put into the generated self-signed +certificate for the external WebLogic Operator REST HTTPS interface, should match +the namespace where the operator will be installed, and optionally the secret name, which defaults +to `weblogic-operator-external-rest-identity`. Each must be prefaced +by `DNS:` (for a name) or `IP:` (for an address), for example: +``` +DNS:myhost,DNS:localhost,IP:127.0.0.1 +``` + +You should include the addresses of all masters and load balancers in this list. The certificate +cannot be conveniently changed after installation of the operator. + +The script creates the secret in the weblogic-operator namespace with the self-signed +certificate and private key + +Example usage: +```shell +$ generate-external-rest-identity.sh IP:127.0.0.1 -n weblogic-operator > my_values.yaml +$ echo "externalRestEnabled: true" >> my_values.yaml + ... +$ helm install my_operator kubernetes/charts/weblogic-operator --namespace my_operator-ns --values my_values.yaml --wait +``` diff --git a/OracleAccessManagement/kubernetes/rest/generate-external-rest-identity.sh b/OracleAccessManagement/kubernetes/rest/generate-external-rest-identity.sh new file mode 100755 index 000000000..e645d3925 --- /dev/null +++ b/OracleAccessManagement/kubernetes/rest/generate-external-rest-identity.sh @@ -0,0 +1,200 @@ +#!/usr/bin/env bash +# Copyright (c) 2017, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# When the customer enables the operator's external REST api (by setting +# externalRestEnabled to true when installing the operator helm chart), the customer needs +# to provide the certificate and private key for api's SSL identity too (by creating a +# tls secret before the installation of the operator helm chart). +# +# This sample script generates a self-signed certificate and private key that can be used +# for the operator's external REST api when experimenting with the operator. They should +# not be used in a production environment. +# +# The sytax of the script is: +# +# kubernetes/samples/scripts/rest/generate-external-rest-identity.sh -a -n +# +# Where lists the subject alternative names to put into the generated self-signed +# certificate for the external WebLogic Operator REST https interface, for example: +# +# DNS:myhost,DNS:localhost,IP:127.0.0.1 +# +# You should include the addresses of all masters and load balancers in this list. The certificate +# cannot be conveniently changed after installation of the operator. +# +# The script creates the secret in the weblogic-operator namespace with the self-signed +# certificate and private key +# +# Example usage: +# generate-external-rest-identity.sh -a IP:127.0.0.1 -n weblogic-operator > my_values.yaml +# echo "externalRestEnabled: true" >> my_values.yaml +# ... +# helm install my_operator kubernetes/charts/weblogic-operator --namespace my_operator-ns --values my_values.yaml --wait +usage(){ +cat < -n +Options: +-a SANS Required, the SANs for the certificate +-n NAMESPACE Required, the namespace where the secret will be created. +-s SECRET_NAME Optional, the name of the kubernetes secret. Default is: weblogic-operator-external-rest-identity. +-h, --help Display this help text. +EOF +exit 1 +} + +if [ ! -x "$(command -v keytool)" ]; then + echo "Can't find keytool. Please add it to the path." + exit 1 +fi + +if [ ! -x "$(command -v openssl)" ]; then + echo "Can't find openssl. Please add it to the path." + exit 1 +fi + +if [ ! -x "$(command -v base64)" ]; then + echo "Can't find base64. Please add it to the path." + exit 1 +fi + +TEMP_DIR=`mktemp -d` +if [ $? -ne 0 ]; then + echo "$0: Can't create temp directory." + exit 1 +fi + +if [ -z $TEMP_DIR ]; then + echo "Can't create temp directory." + exit 1 +fi + +function cleanup { + rm -r $TEMP_DIR + if [[ $SUCCEEDED != "true" ]]; then + exit 1 + fi +} + +set -e +#set -x + +trap "cleanup" EXIT + +SECRET_NAME="weblogic-operator-external-rest-identity" + +while [ $# -gt 0 ] + do + key="$1" + case $key in + -a) + shift # past argument + if [ $# -eq 0 ] || [ ${1:0:1} == "-" ]; then echo "SANs is required and is missing"; usage; fi + SANS=$1 + shift # past value + ;; + -n) + shift # past argument + if [ $# -eq 0 ] || [ ${1:0:1} == "-" ]; then echo "Namespace is required and is missing"; usage; fi + NAMESPACE=$1 + shift # past value + ;; + -s) + shift # past argument + if [ $# -eq 0 ] || [ ${1:0:1} == "-" ]; then echo "Invalid secret name $1"; usage; fi + SECRET_NAME=$1 + shift # past value + ;; + -h) + shift # past argument + ;; + *) + SANS=$1 + shift # past argument + ;; + esac +done + +if [ -z "$SANS" ] +then + 1>&2 + echo "SANs is required and is missing" + usage +fi + +if [ -z "$NAMESPACE" ] +then + 1>&2 + echo "Namespace is required and is missing" + usage +fi + +DAYS_VALID="3650" +TEMP_PW="temp_password" +OP_PREFIX="weblogic-operator" +OP_ALIAS="${OP_PREFIX}-alias" +OP_JKS="${TEMP_DIR}/${OP_PREFIX}.jks" +OP_PKCS12="${TEMP_DIR}/${OP_PREFIX}.p12" +OP_CSR="${TEMP_DIR}/${OP_PREFIX}.csr" +OP_CERT_PEM="${TEMP_DIR}/${OP_PREFIX}.cert.pem" +OP_KEY_PEM="${TEMP_DIR}/${OP_PREFIX}.key.pem" + +# generate a keypair for the operator's REST service, putting it in a keystore +keytool \ + -genkey \ + -keystore ${OP_JKS} \ + -alias ${OP_ALIAS} \ + -storepass ${TEMP_PW} \ + -keypass ${TEMP_PW} \ + -keysize 2048 \ + -keyalg RSA \ + -validity ${DAYS_VALID} \ + -dname "CN=weblogic-operator" \ + -ext KU=digitalSignature,nonRepudiation,keyEncipherment,dataEncipherment,keyAgreement \ + -ext SAN="${SANS}" \ +2> /dev/null + +# extract the cert to a pem file +keytool \ + -exportcert \ + -keystore ${OP_JKS} \ + -storepass ${TEMP_PW} \ + -alias ${OP_ALIAS} \ + -rfc \ +> ${OP_CERT_PEM} 2> /dev/null + +# convert the keystore to a pkcs12 file +keytool \ + -importkeystore \ + -srckeystore ${OP_JKS} \ + -srcstorepass ${TEMP_PW} \ + -destkeystore ${OP_PKCS12} \ + -srcstorepass ${TEMP_PW} \ + -deststorepass ${TEMP_PW} \ + -deststoretype PKCS12 \ +2> /dev/null + +# extract the private key from the pkcs12 file to a pem file +openssl \ + pkcs12 \ + -in ${OP_PKCS12} \ + -passin pass:${TEMP_PW} \ + -nodes \ + -nocerts \ + -out ${OP_KEY_PEM} \ +2> /dev/null + +set +e +# Check if namespace exist +kubectl get namespace $NAMESPACE >/dev/null 2>/dev/null +if [ $? -eq 1 ]; then + echo "Namespace $NAMESPACE does not exist" + exit 1 +fi +kubectl get secret $SECRET_NAME -n $NAMESPACE >/dev/null 2>/dev/null +if [ $? -eq 1 ]; then + kubectl create secret tls "$SECRET_NAME" --cert=${OP_CERT_PEM} --key=${OP_KEY_PEM} -n $NAMESPACE >/dev/null +fi +echo "externalRestIdentitySecret: $SECRET_NAME" + +SUCCEEDED=true diff --git a/OracleAccessManagement/kubernetes/scaling/scalingAction.sh b/OracleAccessManagement/kubernetes/scaling/scalingAction.sh new file mode 100755 index 000000000..0da098e68 --- /dev/null +++ b/OracleAccessManagement/kubernetes/scaling/scalingAction.sh @@ -0,0 +1,504 @@ +#!/bin/bash +# Copyright (c) 2017, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +# script parameters +scaling_action="" +wls_domain_uid="" +wls_cluster_name="" +wls_domain_namespace="default" +operator_service_name="internal-weblogic-operator-svc" +operator_namespace="weblogic-operator" +operator_service_account="weblogic-operator" +scaling_size=1 +access_token="" +no_op="" +kubernetes_master="https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT}" +log_file_name="scalingAction.log" + +# timestamp +# purpose: echo timestamp in the form yyyy-mm-ddThh:mm:ss.nnnnnnZ +# example: 2018-10-01T14:00:00.000001Z +function timestamp() { + local timestamp="`date --utc '+%Y-%m-%dT%H:%M:%S.%NZ' 2>&1`" + if [ ! "${timestamp/illegal/xyz}" = "${timestamp}" ]; then + # old shell versions don't support %N or --utc + timestamp="`date -u '+%Y-%m-%dT%H:%M:%S.000000Z' 2>&1`" + fi + echo "${timestamp}" +} + +function trace() { + echo "@[$(timestamp)][$wls_domain_namespace][$wls_domain_uid][$wls_cluster_name][INFO]" "$@" >> ${log_file_name} +} + +function print_usage() { + echo "Usage: scalingAction.sh --action=[scaleUp | scaleDown] --domain_uid= --cluster_name= [--kubernetes_master=https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT}] [--access_token=] [--wls_domain_namespace=default] [--operator_namespace=weblogic-operator] [--operator_service_name=weblogic-operator] [--scaling_size=1] [--no_op]" + echo " where" + echo " action - scaleUp or scaleDown" + echo " domain_uid - WebLogic Domain Unique Identifier" + echo " cluster_name - WebLogic Cluster Name" + echo " kubernetes_master - Kubernetes master URL, default=https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT}" + echo " access_token - Service Account Bearer token for authentication and authorization for access to REST Resources" + echo " wls_domain_namespace - Kubernetes name space WebLogic Domain is defined in, default=default" + echo " operator_service_name - WebLogic Operator Service name, default=internal-weblogic-operator-svc" + echo " operator_service_account - Kubernetes Service Account for WebLogic Operator, default=weblogic-operator" + echo " operator_namespace - WebLogic Operator Namespace, default=weblogic-operator" + echo " scaling_size - number of WebLogic server instances by which to scale up or down, default=1" + echo " no_op - if specified, returns without doing anything. For use by unit test to include methods in the script" + exit 1 +} + +# Retrieve WebLogic Operator Service Account Token for Authorization +function initialize_access_token() { + if [ -z "$access_token" ] + then + access_token=`cat /var/run/secrets/kubernetes.io/serviceaccount/token` + fi +} + +function logScalingParameters() { + trace "scaling_action: $scaling_action" + trace "wls_domain_uid: $wls_domain_uid" + trace "wls_cluster_name: $wls_cluster_name" + trace "wls_domain_namespace: $wls_domain_namespace" + trace "operator_service_name: $operator_service_name" + trace "operator_service_account: $operator_service_account" + trace "operator_namespace: $operator_namespace" + trace "scaling_size: $scaling_size" +} + +function jq_available() { + if [ -x "$(command -v jq)" ] && [ -z "$DONT_USE_JQ" ]; then + return; + fi + false +} + +# Query WebLogic Operator Service Port +function get_operator_internal_rest_port() { + local STATUS=$(curl \ + -v \ + --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt \ + -H "Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" \ + -X GET $kubernetes_master/api/v1/namespaces/$operator_namespace/services/$operator_service_name/status) + if [ $? -ne 0 ] + then + trace "Failed to retrieve status of $operator_service_name in name space: $operator_namespace" + trace "STATUS: $STATUS" + exit 1 + fi + + local port + if jq_available; then + local extractPortCmd="(.spec.ports[] | select (.name == \"rest\") | .port)" + port=$(echo "${STATUS}" | jq "${extractPortCmd}" 2>> ${log_file_name}) + else +cat > cmds-$$.py << INPUT +import sys, json +for i in json.load(sys.stdin)["spec"]["ports"]: + if i["name"] == "rest": + print(i["port"]) +INPUT +port=$(echo "${STATUS}" | python cmds-$$.py 2>> ${log_file_name}) + fi + echo "$port" +} + +# Retrieve the api version of the deployed Custom Resource Domain +function get_domain_api_version() { + # Retrieve Custom Resource Definition for WebLogic domain + local APIS=$(curl \ + -v \ + --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt \ + -H "Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" \ + -X GET \ + $kubernetes_master/apis) + if [ $? -ne 0 ] + then + trace "Failed to retrieve list of APIs from Kubernetes cluster" + trace "APIS: $APIS" + exit 1 + fi + +# Find domain version + local domain_api_version + if jq_available; then + local extractVersionCmd="(.groups[] | select (.name == \"weblogic.oracle\") | .preferredVersion.version)" + domain_api_version=$(echo "${APIS}" | jq -r "${extractVersionCmd}" 2>> ${log_file_name}) + else +cat > cmds-$$.py << INPUT +import sys, json +for i in json.load(sys.stdin)["groups"]: + if i["name"] == "weblogic.oracle": + print(i["preferredVersion"]["version"]) +INPUT +domain_api_version=`echo ${APIS} | python cmds-$$.py 2>> ${log_file_name}` + fi + echo "$domain_api_version" +} + +# Retrieve Custom Resource Domain +function get_custom_resource_domain() { + local DOMAIN=$(curl \ + -v \ + --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt \ + -H "Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" \ + $kubernetes_master/apis/weblogic.oracle/$domain_api_version/namespaces/$wls_domain_namespace/domains/$wls_domain_uid) + if [ $? -ne 0 ]; then + trace "Failed to retrieve WebLogic Domain Custom Resource Definition" + exit 1 + fi + echo "$DOMAIN" +} + +# Verify if cluster is defined in clusters of the Custom Resource Domain +# args: +# $1 Custom Resource Domain +function is_defined_in_clusters() { + local DOMAIN="$1" + local in_cluster_startup="False" + + if jq_available; then + local inClusterStartupCmd="(.spec.clusters[] | select (.clusterName == \"${wls_cluster_name}\"))" + local clusterDefinedInCRD=$(echo "${DOMAIN}" | jq "${inClusterStartupCmd}" 2>> ${log_file_name}) + if [ "${clusterDefinedInCRD}" != "" ]; then + in_cluster_startup="True" + fi + else +cat > cmds-$$.py << INPUT +import sys, json +outer_loop_must_break = False +for j in json.load(sys.stdin)["spec"]["clusters"]: + if j["clusterName"] == "$wls_cluster_name": + outer_loop_must_break = True + print (True) + break +if outer_loop_must_break == False: + print (False) +INPUT +in_cluster_startup=`echo ${DOMAIN} | python cmds-$$.py 2>> ${log_file_name}` + fi + echo "$in_cluster_startup" +} + +# Gets the current replica count of the cluster +# args: +# $1 Custom Resource Domain +function get_num_ms_in_cluster() { + local DOMAIN="$1" + local num_ms + if jq_available; then + local numManagedServersCmd="(.spec.clusters[] | select (.clusterName == \"${wls_cluster_name}\") | .replicas)" + num_ms=$(echo "${DOMAIN}" | jq "${numManagedServersCmd}" 2>> ${log_file_name}) + else +cat > cmds-$$.py << INPUT +import sys, json +for j in json.load(sys.stdin)["spec"]["clusters"]: + if j["clusterName"] == "$wls_cluster_name": + print (j["replicas"]) +INPUT + num_ms=`echo ${DOMAIN} | python cmds-$$.py 2>> ${log_file_name}` + fi + + if [ "${num_ms}" == "null" ] || [ "${num_ms}" == '' ] ; then + num_ms=0 + fi + + echo "$num_ms" +} + +# Gets the replica count at the Domain level +# args: +# $1 Custom Resource Domain +function get_num_ms_domain_scope() { + local DOMAIN="$1" + local num_ms + if jq_available; then + num_ms=$(echo "${DOMAIN}" | jq -r '.spec.replicas' 2>> ${log_file_name}) + else +cat > cmds-$$.py << INPUT +import sys, json +print (json.load(sys.stdin)["spec"]["replicas"]) +INPUT + num_ms=`echo ${DOMAIN} | python cmds-$$.py 2>> ${log_file_name}` + fi + + if [ "${num_ms}" == "null" ] || [ "${num_ms}" == '' ] ; then + # if not defined then default to 0 + num_ms=0 + fi + + echo "$num_ms" +} + +# +# Function to get minimum replica count for cluster +# $1 - Domain resource in json format +# $2 - Name of the cluster +# $3 - Return value containing minimum replica count +# +function get_min_replicas { + local domainJson=$1 + local clusterName=$2 + local __result=$3 + + eval $__result=0 + if jq_available; then + minReplicaCmd="(.status.clusters[] | select (.clusterName == \"${clusterName}\")) \ + | .minimumReplicas" + minReplicas=$(echo ${domainJson} | jq "${minReplicaCmd}" 2>> ${log_file_name}) + else +cat > cmds-$$.py << INPUT +import sys, json +for j in json.load(sys.stdin)["status"]["clusters"]: + if j["clusterName"] == "$clusterName": + print (j["minimumReplicas"]) +INPUT + minReplicas=`echo ${DOMAIN} | python cmds-$$.py 2>> ${log_file_name}` + fi + eval $__result=${minReplicas} +} + +# Get the current replica count for the WLS cluster if defined in the CRD's Cluster +# configuration. If WLS cluster is not defined in the CRD then return the Domain +# scoped replica value, if present. Returns replica count = 0 if no replica count found. +# args: +# $1 "True" if WLS cluster configuration defined in CRD, "False" otherwise +# $2 Custom Resource Domain +function get_replica_count() { + local in_cluster_startup="$1" + local DOMAIN="$2" + local num_ms + if [ "$in_cluster_startup" == "True" ] + then + trace "$wls_cluster_name defined in clusters" + num_ms=$(get_num_ms_in_cluster "$DOMAIN") + else + trace "$wls_cluster_name NOT defined in clusters" + num_ms=$(get_num_ms_domain_scope "$DOMAIN") + fi + + get_min_replicas "${DOMAIN}" "${wls_cluster_name}" minReplicas + if [[ "${num_ms}" -lt "${minReplicas}" ]]; then + # Reset managed server count to minimum replicas + num_ms=${minReplicas} + fi + + echo "$num_ms" +} + +# Determine the nuber of managed servers to scale +# args: +# $1 scaling action (scaleUp or scaleDown) +# $2 current replica count +# $3 scaling increment value +function calculate_new_ms_count() { + local scaling_action="$1" + local current_replica_count="$2" + local scaling_size="$3" + local new_ms + if [ "$scaling_action" == "scaleUp" ]; + then + # Scale up by specified scaling size + # shellcheck disable=SC2004 + new_ms=$(($current_replica_count + $scaling_size)) + else + # Scale down by specified scaling size + new_ms=$(($current_replica_count - $scaling_size)) + fi + echo "$new_ms" +} + +# Verify if requested managed server scaling count is less than the configured +# minimum replica count for the cluster. +# args: +# $1 Managed server count +# $2 Custom Resource Domain +# $3 Cluster name +function verify_minimum_ms_count_for_cluster() { + local new_ms="$1" + local domainJson="$2" + local clusterName="$3" + # check if replica count is less than minimum replicas + get_min_replicas "${domainJson}" "${clusterName}" minReplicas + if [ "${new_ms}" -lt "${minReplicas}" ]; then + trace "Scaling request to new managed server count $new_ms is less than configured minimum \ + replica count $minReplicas" + exit 1 + fi +} + +# Create the REST endpoint CA certificate in PEM format +# args: +# $1 certificate file name to create +function create_ssl_certificate_file() { + local pem_filename="$1" + if [ ${INTERNAL_OPERATOR_CERT} ]; + then + echo ${INTERNAL_OPERATOR_CERT} | base64 --decode > $pem_filename + else + trace "Operator Cert File not found" + exit 1 + fi +} + +# Create request body for scaling request +# args: +# $1 replica count +function get_request_body() { +local new_ms="$1" +local request_body=$(cat < +WebLogicHost ${WEBLOGIC_HOST} +WebLogicPort ${WEBLOGIC_PORT} + + +# Directive for weblogic admin Console deployed on Weblogic Admin Server + +SetHandler weblogic-handler +WebLogicHost domain1-admin-server +WebLogicPort ${WEBLOGIC_PORT} + + +# Directive for all application deployed on weblogic cluster with a prepath defined by LOCATION variable +# For example, if the LOCAITON is set to '/weblogic', all applications deployed on the cluster can be accessed via +# http://myhost:myport/weblogic/application_end_url +# where 'myhost' is the IP of the machine that runs the Apache web tier, and +# 'myport' is the port that the Apache web tier is publicly exposed to. +# Note that LOCATION cannot be set to '/' unless this is the only Location module configured. + +WLSRequest On +WebLogicCluster domain1-cluster-cluster-1:8001 +PathTrim /weblogic1 + + +# Directive for all application deployed on weblogic cluster with a prepath defined by LOCATION2 variable +# For example, if the LOCAITON2 is set to '/weblogic2', all applications deployed on the cluster can be accessed via +# http://myhost:myport/weblogic2/application_end_url +# where 'myhost' is the IP of the machine that runs the Apache web tier, and +# 'myport' is the port that the Apache webt ier is publicly exposed to. + +WLSRequest On +WebLogicCluster domain2-cluster-cluster-1:8021 +PathTrim /weblogic2 + +``` + +* Create a PV / PVC (pv-claim-name) that can be used to store the `custom_mod_wl_apache.conf`. Refer to the [Sample for creating a PV or PVC](/kubernetes/samples/scripts/create-weblogic-domain-pv-pvc/README.md). + +## 5. Prepare your own certificate and private key +In production, Oracle strongly recommends that you provide your own certificates. Run the following commands to generate your own certificate and private key using `openssl`. + +```shell +$ cd kubernetes/samples/charts/apache-samples/custom-sample +$ export VIRTUAL_HOST_NAME=apache-sample-host +$ export SSL_CERT_FILE=apache-sample.crt +$ export SSL_CERT_KEY_FILE=apache-sample.key +$ sh certgen.sh +``` + +## 6. Prepare the input values for the Apache webtier Helm chart +Run the following commands to prepare the input value file for the Apache webtier Helm chart. + +```shell +$ base64 -i ${SSL_CERT_FILE} | tr -d '\n' +$ base64 -i ${SSL_CERT_KEY_FILE} | tr -d '\n' +$ touch input.yaml +``` +Edit the input parameters file, `input.yaml`. The file content is similar to below. + +```yaml +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +# Use this to provide your own Apache webtier configuration as needed; simply define this +# Persistence Volume which contains your own custom_mod_wl_apache.conf file. +persistentVolumeClaimName: + +# The VirtualHostName of the Apache HTTP server. It is used to enable custom SSL configuration. +virtualHostName: apache-sample-host + +# The customer supplied certificate to use for Apache webtier SSL configuration. +# The value must be a string containing a base64 encoded certificate. Run following command to get it. +# base64 -i ${SSL_CERT_FILE} | tr -d '\n' +customCert: + +# The customer supplied private key to use for Apache webtier SSL configuration. +# The value must be a string containing a base64 encoded key. Run following command to get it. +# base64 -i ${SSL_KEY_FILE} | tr -d '\n' +customKey: +``` + +## 7. Install the Apache webtier Helm chart +The Apache webtier Helm chart is located in the `kubernetes/samples/charts/apache-webtier` directory. Install the Apache webtier Helm chart to the `apache-sample` namespace with the specified input parameters: + +```shell +$ cd kubernetes/samples/charts +$ helm install my-release --values apache-samples/custom-sample/input.yaml --namespace apache-sample apache-webtier +``` + +## 8. Run the sample application +Now you can send requests to different WebLogic domains with the unique entry point of Apache with different paths. Alternatively, you can access the URLs in a web browser. +```shell +$ curl --silent http://${HOSTNAME}:30305/weblogic1/testwebapp/ +$ curl --silent http://${HOSTNAME}:30305/weblogic2/testwebapp/ +``` +Also, you can use SSL URLs to send requests to different WebLogic domains. Access the SSL URL via the `curl` command or a web browser. +```shell +$ curl -k --silent https://${HOSTNAME}:30443/weblogic1/testwebapp/ +$ curl -k --silent https://${HOSTNAME}:30443/weblogic2/testwebapp/ +``` + +## 9. Uninstall the Apache webtier +```shell +$ helm uninstall my-release --namespace apache-sample +``` diff --git a/OracleIdentityGovernance/kubernetes/charts/apache-samples/custom-sample/certgen.sh b/OracleIdentityGovernance/kubernetes/charts/apache-samples/custom-sample/certgen.sh new file mode 100755 index 000000000..20dd9fa51 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/apache-samples/custom-sample/certgen.sh @@ -0,0 +1,51 @@ +#!/bin/sh +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# Since: June, 2018 +# Author: dongbo.xiao@oracle.com +# Description: script to start Apache HTTP Server +# +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. + +# Generated configuration file +CONFIG_FILE="config.txt" + +cat > $CONFIG_FILE <<-EOF +[req] +default_bits = 2048 +prompt = no +default_md = sha256 +req_extensions=v3_req +extensions=v3_req +distinguished_name = dn + +[dn] +C = US +ST = CA +L = Redwood Shores +O = Oracle Corporation +OU = Apache HTTP Server With Plugin +CN = $VIRTUAL_HOST_NAME + +[v3_req] +subjectAltName = @alt_names +[alt_names] +DNS.1 = $VIRTUAL_HOST_NAME +DNS.2 = $VIRTUAL_HOST_NAME.cloud.oracle.com +DNS.3 = *.$VIRTUAL_HOST_NAME +DNS.4 = localhost +EOF + +echo "Generating certs for $VIRTUAL_HOST_NAME" + +# Generate our Private Key, CSR and Certificate +# Use SHA-2 as SHA-1 is unsupported from Jan 1, 2017 + +openssl req -x509 -newkey rsa:2048 -sha256 -nodes -keyout "$SSL_CERT_KEY_FILE" -days 3650 -out "$SSL_CERT_FILE" -config "$CONFIG_FILE" + +# OPTIONAL - write an info to see the details of the generated crt +openssl x509 -noout -fingerprint -text < "$SSL_CERT_FILE" > "$SSL_CERT_FILE.info" +# Protect the key +chmod 400 "$SSL_CERT_KEY_FILE" +chmod 400 "$SSL_CERT_FILE.info" diff --git a/OracleIdentityGovernance/kubernetes/charts/apache-samples/custom-sample/custom_mod_wl_apache.conf b/OracleIdentityGovernance/kubernetes/charts/apache-samples/custom-sample/custom_mod_wl_apache.conf new file mode 100755 index 000000000..8a2d05f0d --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/apache-samples/custom-sample/custom_mod_wl_apache.conf @@ -0,0 +1,37 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + + +WebLogicHost ${WEBLOGIC_HOST} +WebLogicPort ${WEBLOGIC_PORT} + + +# Directive for weblogic admin Console deployed on Weblogic Admin Server + +SetHandler weblogic-handler +WebLogicHost domain1-admin-server +WebLogicPort ${WEBLOGIC_PORT} + + +# Directive for all application deployed on weblogic cluster with a prepath defined by LOCATION variable +# For example, if the LOCAITON is set to '/weblogic', all applications deployed on the cluster can be accessed via +# http://myhost:myport/weblogic/application_end_url +# where 'myhost' is the IP of the machine that runs the Apache web tier, and +# 'myport' is the port that the Apache web tier is publicly exposed to. +# Note that LOCATION cannot be set to '/' unless this is the only Location module configured. + +WLSRequest On +WebLogicCluster domain1-cluster-cluster-1:8001 +PathTrim /weblogic1 + + +# Directive for all application deployed on weblogic cluster with a prepath defined by LOCATION2 variable +# For example, if the LOCAITON2 is set to '/weblogic2', all applications deployed on the cluster can be accessed via +# http://myhost:myport/weblogic2/application_end_url +# where 'myhost' is the IP of the machine that runs the Apache web tier, and +# 'myport' is the port that the Apache webt ier is publicly exposed to. + +WLSRequest On +WebLogicCluster domain2-cluster-cluster-1:8021 +PathTrim /weblogic2 + diff --git a/OracleIdentityGovernance/kubernetes/charts/apache-samples/custom-sample/input.yaml b/OracleIdentityGovernance/kubernetes/charts/apache-samples/custom-sample/input.yaml new file mode 100755 index 000000000..95eaec6e9 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/apache-samples/custom-sample/input.yaml @@ -0,0 +1,28 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +# Use this to provide your own Apache webtier configuration as needed; simply define the +# Persistence Volume which contains your own custom_mod_wl_apache.conf file and provide the Persistence Volume Claim Name +persistentVolumeClaimName: + +# imagePullSecrets contains an optional list of Kubernetes secrets, that are needed +# to access the registry containing the apache webtier image. +# If no secrets are required, then omit this property. +# +# Example : a secret is needed, and has been stored in 'my-apache-webtier-secret' +# +# imagePullSecrets: +# - name: my-apache-webtier-secret + +# The VirtualHostName of the Apache HTTP server. It is used to enable custom SSL configuration. +virtualHostName: apache-sample-host + +# The customer supplied certificate to use for Apache webtier SSL configuration. +# The value must be a string containing a base64 encoded certificate. Run following command to get it. +# base64 -i ${SSL_CERT_FILE} | tr -d '\n' +customCert: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURxakNDQXBJQ0NRQ0w2Q2JwRWZ6QnB6QU5CZ2txaGtpRzl3MEJBUXNGQURDQmxqRUxNQWtHQTFVRUJoTUMKVlZNeEN6QUpCZ05WQkFnTUFrTkJNUmN3RlFZRFZRUUhEQTVTWldSM2IyOWtJRk5vYjNKbGN6RWJNQmtHQTFVRQpDZ3dTVDNKaFkyeGxJRU52Y25CdmNtRjBhVzl1TVNjd0pRWURWUVFMREI1QmNHRmphR1VnU0ZSVVVDQlRaWEoyClpYSWdWMmwwYUNCUWJIVm5hVzR4R3pBWkJnTlZCQU1NRW1Gd1lXTm9aUzF6WVcxd2JHVXRhRzl6ZERBZUZ3MHgKT0RFeE1UUXhOVEF3TURGYUZ3MHlPREV4TVRFeE5UQXdNREZhTUlHV01Rc3dDUVlEVlFRR0V3SlZVekVMTUFrRwpBMVVFQ0F3Q1EwRXhGekFWQmdOVkJBY01EbEpsWkhkdmIyUWdVMmh2Y21Wek1Sc3dHUVlEVlFRS0RCSlBjbUZqCmJHVWdRMjl5Y0c5eVlYUnBiMjR4SnpBbEJnTlZCQXNNSGtGd1lXTm9aU0JJVkZSUUlGTmxjblpsY2lCWGFYUm8KSUZCc2RXZHBiakViTUJrR0ExVUVBd3dTWVhCaFkyaGxMWE5oYlhCc1pTMW9iM04wTUlJQklqQU5CZ2txaGtpRwo5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBeXBVWjEzV3ltcUVnSUZOVTZDa2E0SkFqMXFNemZ4T2FjTklNClVKRE9zZUtqdjNOYmpJb0szQTArcE9lRDNPOXBNcUVxM3F5ZWlCTUtnVEQwREhZNS9HQldjeEUvdUJyWk0rQzgKcnl3RVk5QTl5Y1drZ3h4NUFqSFM1ZnRLMFhpQU9OZWdnUnV0RTBTTnRmbmY3T0FwaStzU0k1RlBzT2V2ZWZGVgoybjJHUDg0bHNDTTZ3Y3FLcXRKeStwOC94VEJKdW1MY2RoL1daYktGTDd5YzFGSzdUNXdPVTB3eS9nZ1lVOUVvCk9tT3M3MENQWmloSkNrc1hrd1d0Q0JISEEwWGJPMXpYM1VZdnRpeGMwb2U3aFltd29zZnlQWU1raC9hL2pWYzEKWkhac25wQXZiWTZrVEoyY1dBa1hyS0srVmc5ZGJrWGVPY0FFTnNHazIvcXFxVGNOV1FJREFRQUJNQTBHQ1NxRwpTSWIzRFFFQkN3VUFBNElCQVFDQXZZNzBHVzBTM1V4d01mUHJGYTZvOFJxS3FNSDlCRE9lZ29zZGc5Nm9QakZnClgzRGJjblU5U0QxTzAyZUhNb0RTRldiNFlsK3dwZk9zUDFKekdQTERQcXV0RWRuVjRsbUJlbG15Q09xb0F4R0gKRW1vZGNUSWVxQXBnVDNEaHR1NW90UW4zZTdGaGNRRHhDelN6SldkUTRJTFh4SExsTVBkeHpRN1NwTzVySERGeAo0eEd6dkNHRkMwSlhBZ2w4dFhvR3dUYkpDR1hxYWV2cUIrNXVLY1NpSUo2M2dhQk1USytjUmF5MkR4L1dwcEdBClZWTnJsTWs4TEVQT1VSN2RZMm0xT3RaU1hCckdib3QwQjNEUG9yRkNpeVF5Q20vd0FYMFk0Z0hiMlNmcitOeFoKQkppb2VXajZ6ZGFvU3dPZkwxd2taWlJjVGtlZlZyZXdVRjZRQ3BCcAotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + +# The customer supplied private key to use for Apache webtier SSL configuration. +# The value must be a string containing a base64 encoded key. Run following command to get it. +# base64 -i ${SSL_KEY_FILE} | tr -d '\n' +customKey: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2Z0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktnd2dnU2tBZ0VBQW9JQkFRREtsUm5YZGJLYW9TQWcKVTFUb0tScmdrQ1BXb3pOL0U1cHcwZ3hRa002eDRxTy9jMXVNaWdyY0RUNms1NFBjNzJreW9TcmVySjZJRXdxQgpNUFFNZGpuOFlGWnpFVCs0R3RrejRMeXZMQVJqMEQzSnhhU0RISGtDTWRMbCswclJlSUE0MTZDQkc2MFRSSTIxCitkL3M0Q21MNnhJamtVK3c1Njk1OFZYYWZZWS96aVd3SXpyQnlvcXEwbkw2bnovRk1FbTZZdHgySDlabHNvVXYKdkp6VVVydFBuQTVUVERMK0NCaFQwU2c2WTZ6dlFJOW1LRWtLU3hlVEJhMElFY2NEUmRzN1hOZmRSaSsyTEZ6UwpoN3VGaWJDaXgvSTlneVNIOXIrTlZ6VmtkbXlla0M5dGpxUk1uWnhZQ1Jlc29yNVdEMTF1UmQ0NXdBUTJ3YVRiCitxcXBOdzFaQWdNQkFBRUNnZ0VCQUtPKzR4VnFHRVN1aWxZMnBVSEd2K2ZWK25IcWxweFh6eFQwWTJuWHNvck0KZzhralNGT1AzUGxEWjJoSmppZE9DUDBZa3B0TWNoUFJPRU4ydXowN2J1RlZTV3RXL09jbUpIeXZZalJCWXdiKwo4b0tlVTd4NmprRTgzcGh3aDJoTGUzRDJzZERKK3hyQTViNjZ5OG9lNHRZcTJ3Mk96aGhUSFY1MnVRdVRQS2xpCjJpSHNYQzIwT1dMSmRuMGU1a0IycTJhV3JJaUJBVzI1Y0JyRDQ5MWFyTDh0emJQOWM4eUUyWUdNM1FKaUFtbkYKNUxZUElzZFdVczJYNEhscWtUM0d6ZEVxNUtzV0pzdjN5QUkxOVJ4eXAwZXd1ditTN3hsRjdIZGlhbnR6ZUp4WAp3MnRWbHpjb1BVQVhoVHIxS0N1UDNCT3BQVXNvMG9oaDNzRFVXamVVWUNVQ2dZRUE3L25QYTE5ckpKUExJOFZiCllhQ2pEKzhTR0FvVWZwSDdRTVFyT2RzR0RkcWRKa2VlNEJ0RDBITUEzL1lLVGFUK0JvRVZmQ2czSWpZVWpmeGcKSkp0VWlJVlcya0RsMU5NY0xXaldINExPaFErQlRGbWcvbFlkc2puMW9FbUJ1Rk1NYWF0ejNGdmZscFRCekg4cwpwMHFyL0hJYTFTbllBckVTUXZUVk9MMVhtcThDZ1lFQTJCd1V6NmpQdVVGR3ZKS3RxWTZVbE9yYm05WXFyYVdDCjlhQ3ZBTDFHZ0Q1U1FEcGRVZnl3MVlWdm9hUU9DWHBOL0Z5UHZCdFF2TzYrbHp0MjVTcmMwZk0weHI3d3ZHRmEKSW5FcmlSOXAvMXdXU01yaWFXZitKaE81NENneFZ0alBXZm1pOVNhc0pqOE1jZVk0cUNCNUVJLzM1cjVaa3lFRQozeEhzcEUxVnVuY0NnWUJLYXBveXZzVTM4NGprRDloMW50M1NIQjN0VEhyc2dSSjhGQmtmZU5jWXhybEMzS1RjCjlEZUVWWlZvM2lCMTBYdGd3dmpKcHFMcVBnRUR3c2FCczVWMFBIMGhjMHlTUWVFVUI5V1dzZmFlOXA3dThVQm0KZm9mNDg5WkNuV2pYb3hGUFYzYTNWOW92RlBSQUdSUGMwT0FpaWJQZWRIcGk0MHc1YlRrTnZsR0RTd0tCZ1FESApubWk2eUR2WDZ5dmowN2tGL2VYUkNIK0NHdm1oSEZremVoRXNwYWtSbkg5dFJId2UxMEtnZUhqODNnVDVURGZzCis3THBGbklsa29JS1A2czdVN1JWV2tsTnErSENvRW9adGw5NGNjUC9WSmhnOU1iZWhtaUQwNFRHUVZvUjFvTHgKb1YyZEJQUFBBRDRHbDVtTjh6RGcwNXN4VUhKOUxPckxBa3VNR01NdlVRS0JnQ2RUUGgwVHRwODNUUVZFZnR3bwpuSGVuSEQzMkhrZkR0MTV4Wk84NVZGcTlONVg2MjB2amZKNkNyVnloS1RISllUREs1N2owQ3Z2STBFTksxNytpCi9yaXgwVlFNMTBIMFFuTkZlb0pmS0VITHhXb2czSHVBSVZxTEg4NmJwcytmb25nOCtuMGgvbk5NZUZNYjdSNUMKdmFHNEVkc0VHV0hZS2FiL2lzRlowUVU0Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K diff --git a/OracleIdentityGovernance/kubernetes/charts/apache-samples/default-sample/README.md b/OracleIdentityGovernance/kubernetes/charts/apache-samples/default-sample/README.md new file mode 100755 index 000000000..806bab5c9 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/apache-samples/default-sample/README.md @@ -0,0 +1,39 @@ +# Apache load balancer default sample +In this sample, we will configure the Apache webtier as a load balancer for a WebLogic domain using the default configuration. We will demonstrate how to use the Apache webtier to handle traffic to a backend WebLogic domain. + +## 1. Create a WebLogic domain +We need to prepare a backend domain for load balancing by the Apache webtier. Refer to the [sample](/kubernetes/samples/scripts/create-weblogic-domain/domain-home-on-pv/README.md), to create a WebLogic domain. Keep the default values for the following configuration parameters: +- namespace: `default` +- domainUID: `domain1` +- clusterName: `cluster-1` +- adminServerName: `admin-server` +- adminPort: `7001` +- managedServerPort: `8001` + +After the domain is successfully created, deploy the sample web application, `testwebapp.war`, on the domain cluster using the WLS Administration Console. The sample web application is located in the `kubernetes/samples/charts/application` directory. + +## 2. Build the Apache webtier Docker image +Refer to the [sample](https://github.com/oracle/docker-images/tree/master/OracleWebLogic/samples/12213-webtier-apache), to build the Apache webtier Docker image. + +## 3. Install the Apache webtier with a Helm chart +The Apache webtier Helm chart [is located here](../../apache-webtier/README.md). +Install the Apache webtier Helm chart into the default namespace with the default settings: +```shell +$ cd kubernetes/samples/charts +$ helm install my-release apache-webtier +``` + +## 4. Run the sample application +Now you can send request to the WebLogic domain with the unique entry point of Apache. Alternatively, you can access the URL in a web browser. +```shell +$ curl --silent http://${HOSTNAME}:30305/weblogic/testwebapp/ +``` +You can also use an SSL URL to send requests to the WebLogic domain. Access the SSL URL via the `curl` command or a web browser. +```shell +$ curl -k --silent https://${HOSTNAME}:30443/weblogic/testwebapp/ +``` + +## 5. Uninstall the Apache webtier +```shell +$ helm uninstall my-release +``` diff --git a/OracleIdentityGovernance/kubernetes/charts/apache-webtier/Chart.yaml b/OracleIdentityGovernance/kubernetes/charts/apache-webtier/Chart.yaml new file mode 100755 index 000000000..413b8ba2d --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/apache-webtier/Chart.yaml @@ -0,0 +1,20 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +apiVersion: v1 +name: apache-webtier +version: 1.0.0 +appVersion: 12.2.1.3 +description: Chart for Apache HTTP Server +keywords: +- apache +- http +- https +- load balance +- proxy +home: https://httpd.apache.org +sources: +- https://github.com/oracle/weblogic-kubernetes-operator/tree/master/kubernetes/samples/charts/apache-webtier +maintainers: +- name: Oracle +engine: gotpl diff --git a/OracleIdentityGovernance/kubernetes/charts/apache-webtier/README.md b/OracleIdentityGovernance/kubernetes/charts/apache-webtier/README.md new file mode 100755 index 000000000..2be875dd3 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/apache-webtier/README.md @@ -0,0 +1,92 @@ +# Apache webtier Helm chart + +This Helm chart bootstraps an Apache HTTP Server deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +The chart depends on the Docker image for the Apache HTTP Server with Oracle WebLogic Server Proxy Plugin (supported versions 12.2.1.3.0 and 12.2.1.4.0). See the details in [Apache HTTP Server with Oracle WebLogic Server Proxy Plugin on Docker](https://github.com/oracle/docker-images/tree/master/OracleWebLogic/samples/12213-webtier-apache). + +## Prerequisites + +You will need to build a Docker image with the Apache webtier in it using the sample provided [here](https://github.com/oracle/docker-images/tree/master/OracleWebLogic/samples/12213-webtier-apache) +in order to use this load balancer. + +## Installing the Chart +To install the chart with the release name `my-release`: +```shell +$ helm install my-release apache-webtier +``` +The command deploys the Apache HTTP Server on the Kubernetes cluster with the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete `my-release`: + +```shell +$ helm uninstall my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following table lists the configurable parameters of the Apache webtier chart and their default values. + + +| Parameter | Description | Default | +| -----------------------------------| ------------------------------------------------------------- | ----------------------| +| `image` | Apache webtier Docker image | `oracle/apache:12.2.1.3` | +| `imagePullPolicy` | Image pull policy for the Apache webtier Docker image | `IfNotPresent` | +| `imagePullSecrets` | Image pull Secrets required to access the registry containing the Apache webtier Docker image| ``| +| `persistentVolumeClaimName` | Persistence Volume Claim name Apache webtier | `` | +| `createRBAC` | Boolean indicating if RBAC resources should be created | `true` | +| `httpNodePort` | Node port to expose for HTTP access | `30305` | +| `httpsNodePort` | Node port to expose for HTTPS access | `30443` | +| `virtualHostName` | The `VirtualHostName` of the Apache HTTP Server | `` | +| `customCert` | The customer supplied certificate | `` | +| `customKey` | The customer supplied private key | `` | +| `domainUID` | Unique ID identifying a domain | `domain1` | +| `clusterName` | Cluster name | `cluster-1` | +| `adminServerName` | Name of the Administration Server | `admin-server` | +| `adminPort` | Port number for Administration Server | `7001` | +| `managedServerPort` | Port number for each Managed Server | `8001` | +| `location` | Prepath for all applications deployed on the WebLogic cluster | `/weblogic` | +| `useNonPriviledgedPorts` | Configuration of Apache webtier on NonPriviledgedPort | `false` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example: + +```shell +$ helm install my-release --set persistentVolumeClaimName=webtier-apache-pvc apache-webtier +``` + +Alternatively, a YAML file that specifies the values for the parameters can be provided while +installing the chart. For example: + +```shell +$ helm install my-release --values values.yaml apache-webtier +``` +## useNonPriviledgedPorts +By default, the chart will install the Apache webtier on PriviledgedPort (port 80). Set the flag `useNonPriviledgedPorts=true` to enable the Apache webtier to listen on port `8080` + + +## RBAC +By default, the chart will install the recommended RBAC roles and role bindings. + +Set the flag `--authorization-mode=RBAC` on the API server. See the following document for how to enable [RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/). + +To determine if your cluster supports RBAC, run the following command: + +```shell +$ kubectl api-versions | grep rbac +``` + +If the output contains "beta", you may install the chart with RBAC enabled. + +### Disable RBAC role/rolebinding creation + +To disable the creation of RBAC resources (on clusters with RBAC). Do the following: + +```shell +$ helm install my-release apache-webtier --set createRBAC=false +``` diff --git a/OracleIdentityGovernance/kubernetes/charts/apache-webtier/templates/_helpers.tpl b/OracleIdentityGovernance/kubernetes/charts/apache-webtier/templates/_helpers.tpl new file mode 100755 index 000000000..c7999d287 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/apache-webtier/templates/_helpers.tpl @@ -0,0 +1,25 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "apache.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +*/}} +{{- define "apache.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "apache.serviceAccountName" -}} +{{- printf "%s-%s" .Release.Name .Chart.Name | trunc 63 -}} +{{- end -}} diff --git a/OracleIdentityGovernance/kubernetes/charts/apache-webtier/templates/cluster-role-binding.yaml b/OracleIdentityGovernance/kubernetes/charts/apache-webtier/templates/cluster-role-binding.yaml new file mode 100755 index 000000000..188e54d1a --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/apache-webtier/templates/cluster-role-binding.yaml @@ -0,0 +1,17 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{ if .Values.createRBAC }} +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "apache.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "apache.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "apache.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} +{{ end }} diff --git a/OracleIdentityGovernance/kubernetes/charts/apache-webtier/templates/cluster-role.yaml b/OracleIdentityGovernance/kubernetes/charts/apache-webtier/templates/cluster-role.yaml new file mode 100755 index 000000000..449a87664 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/apache-webtier/templates/cluster-role.yaml @@ -0,0 +1,29 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{ if .Values.createRBAC }} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "apache.fullname" . }} +rules: + - apiGroups: + - "" + resources: + - pods + - services + - endpoints + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - watch +{{ end }} diff --git a/OracleIdentityGovernance/kubernetes/charts/apache-webtier/templates/deployment.yaml b/OracleIdentityGovernance/kubernetes/charts/apache-webtier/templates/deployment.yaml new file mode 100755 index 000000000..cd7b07ad3 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/apache-webtier/templates/deployment.yaml @@ -0,0 +1,106 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +kind: Deployment +apiVersion: apps/v1 +metadata: + name: {{ template "apache.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "apache.fullname" . }} +spec: + replicas: 1 + selector: + matchLabels: + app: {{ template "apache.fullname" . }} + template: + metadata: + labels: + app: {{ template "apache.fullname" . }} + spec: + serviceAccountName: {{ template "apache.serviceAccountName" . }} + terminationGracePeriodSeconds: 60 +{{- if or (and (.Values.virtualHostName) (.Values.customCert)) (.Values.persistentVolumeClaimName) }} + volumes: +{{- end }} +{{- if and (.Values.virtualHostName) (.Values.customCert) }} + - name: serving-cert + secret: + defaultMode: 420 + secretName: {{ template "apache.fullname" . }}-cert +{{- end }} +{{- if .Values.persistentVolumeClaimName }} + - name: {{ template "apache.fullname" . }} + persistentVolumeClaim: + claimName: {{ .Values.persistentVolumeClaimName | quote }} +{{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + {{ .Values.imagePullSecrets | toYaml }} + {{- end }} + containers: + - name: {{ template "apache.fullname" . }} + image: {{ .Values.image | quote }} + imagePullPolicy: {{ .Values.imagePullPolicy | quote }} +{{- if or (and (.Values.virtualHostName) (.Values.customCert)) (.Values.persistentVolumeClaimName) }} + volumeMounts: +{{- end }} +{{- if and (.Values.virtualHostName) (.Values.customCert) }} + - name: serving-cert + mountPath: "/var/serving-cert" +{{- end }} +{{- if .Values.persistentVolumeClaimName }} + - name: {{ template "apache.fullname" . }} + mountPath: "/config" +{{- end }} +{{- if or (not (.Values.persistentVolumeClaimName)) (.Values.virtualHostName) }} + env: +{{- end }} +{{- if .Values.useNonPriviledgedPorts }} + - name: NonPriviledgedPorts + value: "true" +{{- end }} +{{- if not (.Values.persistentVolumeClaimName) }} + - name: WEBLOGIC_CLUSTER + value: "{{ .Values.domainUID | replace "_" "-" | lower }}-cluster-{{ .Values.clusterName | replace "_" "-" | lower }}:{{ .Values.managedServerPort }}" + - name: LOCATION + value: {{ .Values.location | quote }} + - name: WEBLOGIC_HOST + value: "{{ .Values.domainUID | replace "_" "-" | lower }}-{{ .Values.adminServerName | replace "_" "-" | lower }}" + - name: WEBLOGIC_PORT + value: {{ .Values.adminPort | quote }} +{{- end }} +{{- if .Values.virtualHostName }} + - name: VIRTUAL_HOST_NAME + value: {{ .Values.virtualHostName | quote }} +{{- if .Values.customCert }} + - name: SSL_CERT_FILE + value: "/var/serving-cert/tls.crt" + - name: SSL_CERT_KEY_FILE + value: "/var/serving-cert/tls.key" +{{- end }} +{{- end }} + readinessProbe: + tcpSocket: +{{- if .Values.useNonPriviledgedPorts }} + port: 8080 +{{- else }} + port: 80 +{{- end }} + failureThreshold: 1 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 2 + livenessProbe: + tcpSocket: +{{- if .Values.useNonPriviledgedPorts }} + port: 8080 +{{- else }} + port: 80 +{{- end }} + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 2 diff --git a/OracleIdentityGovernance/kubernetes/charts/apache-webtier/templates/secret.yaml b/OracleIdentityGovernance/kubernetes/charts/apache-webtier/templates/secret.yaml new file mode 100755 index 000000000..bb716f50b --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/apache-webtier/templates/secret.yaml @@ -0,0 +1,14 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{ if .Values.customCert }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "apache.fullname" . }}-cert + namespace: {{ .Release.Namespace | quote }} +type: Opaque +data: + tls.crt: {{ .Values.customCert | quote }} + tls.key: {{ .Values.customKey | quote }} +{{ end }} diff --git a/OracleIdentityGovernance/kubernetes/charts/apache-webtier/templates/service-account.yaml b/OracleIdentityGovernance/kubernetes/charts/apache-webtier/templates/service-account.yaml new file mode 100755 index 000000000..f76d46aec --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/apache-webtier/templates/service-account.yaml @@ -0,0 +1,8 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "apache.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} diff --git a/OracleIdentityGovernance/kubernetes/charts/apache-webtier/templates/service.yaml b/OracleIdentityGovernance/kubernetes/charts/apache-webtier/templates/service.yaml new file mode 100755 index 000000000..c8b8089eb --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/apache-webtier/templates/service.yaml @@ -0,0 +1,28 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +apiVersion: v1 +kind: Service +metadata: + name: {{ template "apache.fullname" . }} + namespace: {{ .Release.Namespace | quote }} +spec: + type: NodePort + selector: + app: {{ template "apache.fullname" . }} + ports: +{{- if .Values.useNonPriviledgedPorts }} + - port: 8080 +{{- else}} + - port: 80 +{{- end }} + nodePort: {{ .Values.httpNodePort }} + name: http +{{- if .Values.virtualHostName }} + - port: 4433 +{{- else }} + - port: 443 +{{- end }} + nodePort: {{ .Values.httpsNodePort }} + name: https + diff --git a/OracleIdentityGovernance/kubernetes/charts/apache-webtier/values.yaml b/OracleIdentityGovernance/kubernetes/charts/apache-webtier/values.yaml new file mode 100755 index 000000000..ee0a8a815 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/apache-webtier/values.yaml @@ -0,0 +1,79 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +# Apache webtier docker image +image: "oracle/apache:12.2.1.3" + +# imagePullPolicy specifies the image pull policy for the apache webiter docker image +imagePullPolicy: "IfNotPresent" + +# imagePullSecrets contains an optional list of Kubernetes secrets, that are needed +# to access the registry containing the apache webtier image. +# If no secrets are required, then omit this property. +# +# Example : a secret is needed, and has been stored in 'my-apache-webtier-secret' +# +# imagePullSecrets: +# - name: my-apache-webtier-secret +# +# imagePullSecrets: +# - name: + +# Volume path for Apache webtier. By default, it is empty, which causes the volume +# mount be disabled and, therefore, the built-in Apache plugin config be used. +# Use this to provide your own Apache webtier configuration as needed; simply define this +# path and put your own custom_mod_wl_apache.conf file under this path. +persistentVolumeClaimName: + +# Boolean indicating if RBAC resources should be created +createRBAC: true + +# NodePort to expose for http access +httpNodePort: 30305 + +# NodePort to expose for https access +httpsNodePort: 30443 + +# The VirtualHostName of the Apache HTTP server. It is used to enable custom SSL configuration. +# If it is set, the Apache HTTP Server is configured to listen to port 4433 for SSL traffic. +virtualHostName: + +# The customer supplied certificate to use for Apache webtier SSL configuration. +# The value must be a string containing a base64 encoded certificate. +# If 'virtualHostName' is set, the custom certificate and private key are not provided, +# the default built-in auto-generated sample certificate and private key in the apache image will be used. +# This parameter is ignored if 'virtualHostName' is not set. +customCert: + +# The customer supplied private key to use for Apache webtier SSL configuration. +# The value must be a string containing a base64 encoded key. +# If 'virtualHostName' is set, the custom certificate and private key are not provided, +# the default built-in auto-generated sample certificate and private key in the apache image will be used. +# This parameter is ignored if 'virtualHostName' is not set. +customKey: + +# Unique ID identifying a domain. +# This ID must not contain an underscore ("_"), and must be lowercase and unique across all domains in a Kubernetes cluster. +domainUID: "domain1" + +# Cluster name +clusterName: "cluster-1" + +# Name of the admin server +adminServerName: "admin-server" + +# Port number for admin server +adminPort: 7001 + +# Port number for each managed server +managedServerPort: 8001 + +# Prepath for all application deployed on WebLogic cluster. +# For example, if it is set to '/weblogic', all applications deployed on the cluster can be accessed via +# http://myhost:myport/weblogic/application_end_url +# where 'myhost' is the IP of the machine that runs the Apache web tier, and +# 'myport' is the port that the Apache web tier is publicly exposed to. +location: "/weblogic" + +# Use non privileged port 8080 to listen. If set to false, default privileged port 80 will be used. +useNonPriviledgedPorts: false diff --git a/OracleIdentityGovernance/kubernetes/3.0.1/ingress-per-domain/Chart.yaml b/OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/Chart.yaml old mode 100644 new mode 100755 similarity index 54% rename from OracleIdentityGovernance/kubernetes/3.0.1/ingress-per-domain/Chart.yaml rename to OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/Chart.yaml index 5c7f11b71..dc3981291 --- a/OracleIdentityGovernance/kubernetes/3.0.1/ingress-per-domain/Chart.yaml +++ b/OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/Chart.yaml @@ -1,8 +1,8 @@ -# Copyright (c) 2020, Oracle Corporation and/or its affiliates. +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. - +# apiVersion: v1 appVersion: "1.0" -description: A Helm chart to create an Ingress for a Oracle WebLogic Server domain. +description: A Helm chart to create an Ingress for a WLS domain. name: ingress-per-domain version: 0.1.0 diff --git a/OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/templates/nginx-ingress-k8s1.19.yaml b/OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/templates/nginx-ingress-k8s1.19.yaml new file mode 100755 index 000000000..adbc02e0a --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/templates/nginx-ingress-k8s1.19.yaml @@ -0,0 +1,193 @@ +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +{{- if eq .Values.type "NGINX" }} +{{- if or (eq .Values.sslType "NONSSL") (eq .Values.sslType "SSL") }} +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ .Values.wlsDomain.domainUID }}-nginx + namespace: {{ .Release.Namespace }} + annotations: + nginx.ingress.kubernetes.io/affinity: 'cookie' + nginx.ingress.kubernetes.io/enable-access-log: 'false' + kubernetes.io/ingress.class: 'nginx' +{{- if eq .Values.sslType "SSL" }} + nginx.ingress.kubernetes.io/proxy-buffer-size: '2000k' + nginx.ingress.kubernetes.io/configuration-snippet: | + more_set_input_headers "X-Forwarded-Proto: https"; + more_set_input_headers "WL-Proxy-SSL: true"; + nginx.ingress.kubernetes.io/ingress.allow-http: 'false' +{{- end }} +spec: + rules: + - http: + paths: + - path: /console + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /em + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /soa + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.soaManagedServerPort }} + - path: /integration + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.soaManagedServerPort }} + - path: /soa-infra + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.soaManagedServerPort }} + - path: /identity + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /admin + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /oim + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /sysadmin + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /workflowservice + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /xlWebApp + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /Nexaweb + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /callbackResponseService + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /spml-xsd + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /HTTPClnt + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /reqsvc + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /iam + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /provisioning-callback + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /CertificationCallbackService + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /ucs + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /FacadeWebApp + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /OIGUI + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /weblogic + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + +{{- end }} +{{- end }} +{{- end }} + diff --git a/OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/templates/nginx-ingress.yaml b/OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/templates/nginx-ingress.yaml new file mode 100755 index 000000000..3e845d139 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/templates/nginx-ingress.yaml @@ -0,0 +1,124 @@ +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +{{- if semverCompare "<1.19-0" .Capabilities.KubeVersion.GitVersion -}} +{{- if eq .Values.type "NGINX" }} +{{- if or (eq .Values.sslType "NONSSL") (eq .Values.sslType "SSL") }} +--- +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ .Values.wlsDomain.domainUID }}-nginx + namespace: {{ .Release.Namespace }} + annotations: + nginx.ingress.kubernetes.io/affinity: 'cookie' + nginx.ingress.kubernetes.io/enable-access-log: 'false' + kubernetes.io/ingress.class: 'nginx' +{{- if eq .Values.sslType "SSL" }} + nginx.ingress.kubernetes.io/proxy-buffer-size: '2000k' + nginx.ingress.kubernetes.io/configuration-snippet: | + more_set_input_headers "X-Forwarded-Proto: https"; + more_set_input_headers "WL-Proxy-SSL: true"; + nginx.ingress.kubernetes.io/ingress.allow-http: 'false' +{{- end }} +spec: + rules: + - http: + paths: + - path: /console + backend: + serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + servicePort: {{ .Values.wlsDomain.adminServerPort }} + - path: /em + backend: + serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + servicePort: {{ .Values.wlsDomain.adminServerPort }} + - path: /soa + backend: + serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' + servicePort: {{ .Values.wlsDomain.soaManagedServerPort }} + - path: /integration + backend: + serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' + servicePort: {{ .Values.wlsDomain.soaManagedServerPort }} + - path: /soa-infra + backend: + serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' + servicePort: {{ .Values.wlsDomain.soaManagedServerPort }} + - path: /identity + backend: + serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + servicePort: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /admin + backend: + serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + servicePort: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /oim + backend: + serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + servicePort: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /sysadmin + backend: + serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + servicePort: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /workflowservice + backend: + serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + servicePort: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /xlWebApp + backend: + serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + servicePort: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /Nexaweb + backend: + serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + servicePort: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /callbackResponseService + backend: + serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + servicePort: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /spml-xsd + backend: + serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + servicePort: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /HTTPClnt + backend: + serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + servicePort: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /reqsvc + backend: + serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + servicePort: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /iam + backend: + serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + servicePort: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /provisioning-callback + backend: + serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + servicePort: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /CertificationCallbackService + backend: + serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + servicePort: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /ucs + backend: + serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + servicePort: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /FacadeWebApp + backend: + serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + servicePort: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /OIGUI + backend: + serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + servicePort: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /weblogic + backend: + serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + servicePort: {{ .Values.wlsDomain.oimManagedServerPort }} + +{{- end }} +{{- end }} +{{- end }} + diff --git a/OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/templates/traefik-ingress-k8s1.19.yaml b/OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/templates/traefik-ingress-k8s1.19.yaml new file mode 100755 index 000000000..d19b64c93 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/templates/traefik-ingress-k8s1.19.yaml @@ -0,0 +1,92 @@ +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +{{- if eq .Values.type "TRAEFIK" }} +{{- if or (eq .Values.sslType "NONSSL") (eq .Values.sslType "SSL") }} +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ .Values.wlsDomain.domainUID }}-traefik + namespace: {{ .Release.Namespace }} + labels: + weblogic.resourceVersion: domain-v2 + annotations: + kubernetes.io/ingress.class: 'traefik' +spec: + rules: + - host: '{{ .Values.traefik.hostname }}' + http: + paths: + - path: /console + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /em + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /soa + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.soaManagedServerPort }} + - path: /integration + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.soaManagedServerPort }} + - path: /soa-infra + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.soaManagedServerPort }} + - path: + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} +{{- if eq .Values.sslType "SSL" }} + tls: + - hosts: + - '{{ .Values.traefik.hostname }}' + secretName: {{ .Values.secretName }} +{{- end }} +--- +#Create Traefik Middleware custom resource for SSL Termination +{{- if eq .Values.sslType "SSL" }} +apiVersion: traefik.containo.us/v1alpha1 +kind: Middleware +metadata: + name: wls-proxy-ssl + namespace: {{ .Release.Namespace }} +spec: + headers: + customRequestHeaders: + X-Custom-Request-Header: "" + X-Forwarded-For: "" + WL-Proxy-Client-IP: "" + WL-Proxy-SSL: "" + WL-Proxy-SSL: "true" + sslRedirect: true +{{- end }} + +{{- end }} +{{- end }} +{{- end }} + diff --git a/OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/templates/traefik-ingress.yaml b/OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/templates/traefik-ingress.yaml new file mode 100755 index 000000000..efb029ee3 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/templates/traefik-ingress.yaml @@ -0,0 +1,74 @@ +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +{{- if semverCompare "<1.19-0" .Capabilities.KubeVersion.GitVersion -}} +{{- if eq .Values.type "TRAEFIK" }} +{{- if or (eq .Values.sslType "NONSSL") (eq .Values.sslType "SSL") }} +--- +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ .Values.wlsDomain.domainUID }}-traefik + namespace: {{ .Release.Namespace }} + labels: + weblogic.resourceVersion: domain-v2 + annotations: + kubernetes.io/ingress.class: 'traefik' +spec: + rules: + - host: '{{ .Values.traefik.hostname }}' + http: + paths: + - path: /console + backend: + serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + servicePort: {{ .Values.wlsDomain.adminServerPort }} + - path: /em + backend: + serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + servicePort: {{ .Values.wlsDomain.adminServerPort }} + - path: /soa + backend: + serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' + servicePort: {{ .Values.wlsDomain.soaManagedServerPort }} + - path: /integration + backend: + serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' + servicePort: {{ .Values.wlsDomain.soaManagedServerPort }} + - path: /soa-infra + backend: + serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' + servicePort: {{ .Values.wlsDomain.soaManagedServerPort }} + - path: + backend: + serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + servicePort: {{ .Values.wlsDomain.oimManagedServerPort }} +{{- if eq .Values.sslType "SSL" }} + tls: + - hosts: + - '{{ .Values.traefik.hostname }}' + secretName: {{ .Values.secretName }} +{{- end }} +--- +#Create Traefik Middleware custom resource for SSL Termination +{{- if eq .Values.sslType "SSL" }} +apiVersion: traefik.containo.us/v1alpha1 +kind: Middleware +metadata: + name: wls-proxy-ssl + namespace: {{ .Release.Namespace }} +spec: + headers: + customRequestHeaders: + X-Custom-Request-Header: "" + X-Forwarded-For: "" + WL-Proxy-Client-IP: "" + WL-Proxy-SSL: "" + WL-Proxy-SSL: "true" + sslRedirect: true +{{- end }} + +{{- end }} +{{- end }} +{{- end }} + diff --git a/OracleIdentityGovernance/kubernetes/3.0.1/ingress-per-domain/values.yaml b/OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/values.yaml old mode 100644 new mode 100755 similarity index 54% rename from OracleIdentityGovernance/kubernetes/3.0.1/ingress-per-domain/values.yaml rename to OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/values.yaml index bd23a79f6..907a843eb --- a/OracleIdentityGovernance/kubernetes/3.0.1/ingress-per-domain/values.yaml +++ b/OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/values.yaml @@ -1,33 +1,28 @@ -# Copyright (c) 2020, Oracle Corporation and/or its affiliates. +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. - +# +# # Default values for ingress-per-domain. # This is a YAML-formatted file. # Declare variables to be passed into your templates. +# +# Load balancer type. Supported values are: TRAEFIK, NGINX +type: NGINX + +# Type of Configuration Supported Values are : NONSSL, SSL +sslType: SSL -# Load balancer type. Supported values are: VOYAGER, NGINX -type: VOYAGER -# Type of Configuration Supported Values are : NONSSL,SSL -# tls: NONSSL -tls: SSL # TLS secret name if the mode is SSL secretName: domain1-tls-cert - -# WLS domain as backend to the load balancer +#WLS domain as backend to the load balancer wlsDomain: domainUID: oimcluster - oimClusterName: oim_cluster + adminServerName: AdminServer + adminServerPort: 7001 soaClusterName: soa_cluster soaManagedServerPort: 8001 + oimClusterName: oim_cluster oimManagedServerPort: 14000 - adminServerName: adminserver - adminServerPort: 7001 -# Voyager specific values -voyager: - # web port - webPort: 30305 - # stats port - statsPort: 30315 diff --git a/OracleIdentityGovernance/kubernetes/charts/traefik/values.yaml b/OracleIdentityGovernance/kubernetes/charts/traefik/values.yaml new file mode 100755 index 000000000..e94bf24f2 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/traefik/values.yaml @@ -0,0 +1,52 @@ +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +image: + name: traefik + tag: 2.2.8 + pullPolicy: IfNotPresent +ingressRoute: + dashboard: + enabled: true + # Additional ingressRoute annotations (e.g. for kubernetes.io/ingress.class) + annotations: {} + # Additional ingressRoute labels (e.g. for filtering IngressRoute by custom labels) + labels: {} +providers: + kubernetesCRD: + enabled: true + kubernetesIngress: + enabled: true + # IP used for Kubernetes Ingress endpoints +ports: + traefik: + port: 9000 + expose: true + # The exposed port for this service + exposedPort: 9000 + # The port protocol (TCP/UDP) + protocol: TCP + web: + port: 8000 + # hostPort: 8000 + expose: true + exposedPort: 30305 + nodePort: 30305 + # The port protocol (TCP/UDP) + protocol: TCP + # Use nodeport if set. This is useful if you have configured Traefik in a + # LoadBalancer + # nodePort: 32080 + # Port Redirections + # Added in 2.2, you can make permanent redirects via entrypoints. + # https://docs.traefik.io/routing/entrypoints/#redirection + # redirectTo: websecure + websecure: + port: 8443 +# # hostPort: 8443 + expose: true + exposedPort: 30443 + # The port protocol (TCP/UDP) + protocol: TCP + nodePort: 30443 + diff --git a/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/.helmignore b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/.helmignore new file mode 100755 index 000000000..1397cc19f --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/.helmignore @@ -0,0 +1,12 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +.git/ +.gitignore +*.bak +*.tmp +*.orig +*~ +.project +.idea/ diff --git a/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/Chart.yaml b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/Chart.yaml new file mode 100755 index 000000000..b5cac770e --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/Chart.yaml @@ -0,0 +1,10 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +apiVersion: v1 +name: weblogic-operator +description: Helm chart for configuring the WebLogic operator. + +type: application +version: 3.3.0 +appVersion: 3.3.0 diff --git a/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_domain-namespaces.tpl b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_domain-namespaces.tpl new file mode 100755 index 000000000..08988c28d --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_domain-namespaces.tpl @@ -0,0 +1,134 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{- define "operator.domainNamespaces" }} +{{- if (or (eq (default "List" .domainNamespaceSelectionStrategy) "Dedicated") (and .dedicated (eq (default "List" .domainNamespaceSelectionStrategy) "List"))) }} +{{- $args := include "utils.cloneDictionary" . | fromYaml -}} +{{- $key := .Release.Namespace -}} +{{- $ignore := set $args "domainNamespace" $key -}} +{{- include "operator.operatorRoleBindingNamespace" $args -}} +{{- else if eq (default "List" .domainNamespaceSelectionStrategy) "List" }} +{{- $args := include "utils.cloneDictionary" . | fromYaml -}} +{{- range $key := $args.domainNamespaces -}} +{{- $ignore := set $args "domainNamespace" $key -}} +{{- include "operator.operatorRoleBindingNamespace" $args -}} +{{- end }} +{{- else if eq .domainNamespaceSelectionStrategy "LabelSelector" }} +{{- $args := include "utils.cloneDictionary" . | fromYaml -}} +{{- /* + Split terms on commas not contained in parentheses. Unfortunately, the regular expression + support included with Helm templates does not include lookarounds. +*/ -}} +{{- $working := dict "rejected" (list) "terms" (list $args.domainNamespaceLabelSelector) }} +{{- if contains "," $args.domainNamespaceLabelSelector }} +{{- $cs := regexSplit "," $args.domainNamespaceLabelSelector -1 }} +{{- $ignore := set $working "st" (list) }} +{{- $ignore := set $working "item" "" }} +{{- range $c := $cs }} +{{- if and (contains "(" $c) (not (contains ")" $c)) }} +{{- $ignore := set $working "item" (print $working.item $c) }} +{{- else if not (eq $working.item "") }} +{{- $ignore := set $working "st" (append $working.st (print $working.item "," $c)) }} +{{- if contains ")" $c }} +{{- $ignore := set $working "item" "" }} +{{- end }} +{{- else }} +{{- $ignore := set $working "st" (append $working.st $c) }} +{{- end }} +{{- end }} +{{- $ignore := set $working "terms" $working.st }} +{{- end }} +{{- $namespaces := (lookup "v1" "Namespace" "" "").items }} +{{- range $t := $working.terms }} +{{- $term := trim $t }} +{{- range $index, $namespace := $namespaces }} +{{- /* + Label selector patterns + Equality-based: =, ==, != + Set-based: x in (a, b), x notin (a, b) + Existence: x, !x +*/ -}} +{{- if not $namespace.metadata.labels }} +{{- $ignore := set $namespace.metadata "labels" (dict) }} +{{- end }} +{{- if hasPrefix "!" $term }} +{{- if hasKey $namespace.metadata.labels (trimPrefix "!" $term) }} +{{- $ignore := set $working "rejected" (append $working.rejected $namespace.metadata.name) }} +{{- end }} +{{- else if contains "!=" $term }} +{{- $split := regexSplit "!=" $term 2 }} +{{- $key := nospace (first $split) }} +{{- if hasKey $namespace.metadata.labels $key }} +{{- if eq (last $split | nospace) (get $namespace.metadata.labels $key) }} +{{- $ignore := set $working "rejected" (append $working.rejected $namespace.metadata.name) }} +{{- end }} +{{- end }} +{{- else if contains "==" $term }} +{{- $split := regexSplit "==" $term 2 }} +{{- $key := nospace (first $split) }} +{{- if or (not (hasKey $namespace.metadata.labels $key)) (not (eq (last $split | nospace) (get $namespace.metadata.labels $key))) }} +{{- $ignore := set $working "rejected" (append $working.rejected $namespace.metadata.name) }} +{{- end }} +{{- else if contains "=" $term }} +{{- $split := regexSplit "=" $term 2 }} +{{- $key := nospace (first $split) }} +{{- if or (not (hasKey $namespace.metadata.labels $key)) (not (eq (last $split | nospace) (get $namespace.metadata.labels $key))) }} +{{- $ignore := set $working "rejected" (append $working.rejected $namespace.metadata.name) }} +{{- end }} +{{- else if contains " notin " $term }} +{{- $split := regexSplit " notin " $term 2 }} +{{- $key := nospace (first $split) }} +{{- if hasKey $namespace.metadata.labels $key }} +{{- $second := nospace (last $split) }} +{{- $parenContents := substr 1 (int (sub (len $second) 1)) $second }} +{{- $values := regexSplit "," $parenContents -1 }} +{{- range $value := $values }} +{{- if eq ($value | nospace) (get $namespace.metadata.labels $key) }} +{{- $ignore := set $working "rejected" (append $working.rejected $namespace.metadata.name) }} +{{- end }} +{{- end }} +{{- end }} +{{- else if contains " in " $term }} +{{- $split := regexSplit " in " $term 2 }} +{{- $key := nospace (first $split) }} +{{- if not (hasKey $namespace.metadata.labels $key) }} +{{- $ignore := set $working "rejected" (append $working.rejected $namespace.metadata.name) }} +{{- else }} +{{- $second := nospace (last $split) }} +{{- $parenContents := substr 1 (int (sub (len $second) 1)) $second }} +{{- $values := regexSplit "," $parenContents -1 }} +{{- $ignore := set $working "found" false }} +{{- range $value := $values }} +{{- if eq ($value | nospace) (get $namespace.metadata.labels $key) }} +{{- $ignore := set $working "found" true }} +{{- end }} +{{- end }} +{{- if not $working.found }} +{{- $ignore := set $working "rejected" (append $working.rejected $namespace.metadata.name) }} +{{- end }} +{{- end }} +{{- else }} +{{- if not (hasKey $namespace.metadata.labels $term) }} +{{- $ignore := set $working "rejected" (append $working.rejected $namespace.metadata.name) }} +{{- end }} +{{- end }} +{{- end }} +{{- end }} +{{- range $index, $namespace := $namespaces }} +{{- $key := $namespace.metadata.name -}} +{{- if not (has $key $working.rejected) }} +{{- $ignore := set $args "domainNamespace" $key -}} +{{- include "operator.operatorRoleBindingNamespace" $args -}} +{{- end }} +{{- end }} +{{- else if eq .domainNamespaceSelectionStrategy "RegExp" }} +{{- $args := include "utils.cloneDictionary" . | fromYaml -}} +{{- range $index, $namespace := (lookup "v1" "Namespace" "" "").items }} +{{- if regexMatch $args.domainNamespaceRegExp $namespace.metadata.name }} +{{- $key := $namespace.metadata.name -}} +{{- $ignore := set $args "domainNamespace" $key -}} +{{- include "operator.operatorRoleBindingNamespace" $args -}} +{{- end }} +{{- end }} +{{- end }} +{{- end }} diff --git a/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-clusterrole-domain-admin.tpl b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-clusterrole-domain-admin.tpl new file mode 100755 index 000000000..94cab9df7 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-clusterrole-domain-admin.tpl @@ -0,0 +1,40 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{- define "operator.operatorClusterRoleDomainAdmin" }} +--- +{{- if (or (eq (default "List" .domainNamespaceSelectionStrategy) "Dedicated") (and .dedicated (eq (default "List" .domainNamespaceSelectionStrategy) "List"))) }} +kind: "Role" +{{- else }} +kind: "ClusterRole" +{{- end }} +apiVersion: "rbac.authorization.k8s.io/v1" +metadata: + {{- if (or (eq (default "List" .domainNamespaceSelectionStrategy) "Dedicated") (and .dedicated (eq (default "List" .domainNamespaceSelectionStrategy) "List"))) }} + name: "weblogic-operator-role-domain-admin" + namespace: {{ .Release.Namespace | quote }} + {{- else }} + name: {{ list .Release.Namespace "weblogic-operator-clusterrole-domain-admin" | join "-" | quote }} + {{- end }} + labels: + weblogic.operatorName: {{ .Release.Namespace | quote }} +rules: +- apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete", "deletecollection"] +- apiGroups: [""] + resources: ["secrets", "pods", "events"] + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: ["pods/log"] + verbs: ["get", "list"] +- apiGroups: [""] + resources: ["pods/exec"] + verbs: ["get", "create"] +- apiGroups: ["weblogic.oracle"] + resources: ["domains"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete", "deletecollection"] +- apiGroups: ["weblogic.oracle"] + resources: ["domains/status"] + verbs: ["get", "watch"] +{{- end }} diff --git a/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-clusterrole-general.tpl b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-clusterrole-general.tpl new file mode 100755 index 000000000..2eba13b95 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-clusterrole-general.tpl @@ -0,0 +1,39 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{- define "operator.operatorClusterRoleGeneral" }} +--- +{{- if (or (eq (default "List" .domainNamespaceSelectionStrategy) "Dedicated") (and .dedicated (eq (default "List" .domainNamespaceSelectionStrategy) "List"))) }} +kind: "Role" +{{- else }} +kind: "ClusterRole" +{{- end }} +apiVersion: "rbac.authorization.k8s.io/v1" +metadata: + {{- if (or (eq (default "List" .domainNamespaceSelectionStrategy) "Dedicated") (and .dedicated (eq (default "List" .domainNamespaceSelectionStrategy) "List"))) }} + name: "weblogic-operator-role-general" + namespace: {{ .Release.Namespace | quote }} + {{- else }} + name: {{ list .Release.Namespace "weblogic-operator-clusterrole-general" | join "-" | quote }} + {{- end }} + labels: + weblogic.operatorName: {{ .Release.Namespace | quote }} +rules: +{{- if not (or (eq (default "List" .domainNamespaceSelectionStrategy) "Dedicated") (and .dedicated (eq (default "List" .domainNamespaceSelectionStrategy) "List"))) }} +- apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "list", "watch"] +- apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +{{- end }} +- apiGroups: ["weblogic.oracle"] + resources: ["domains", "domains/status"] + verbs: ["get", "list", "watch", "update", "patch"] +- apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: ["create"] +- apiGroups: ["authorization.k8s.io"] + resources: ["selfsubjectrulesreviews"] + verbs: ["create"] +{{- end }} diff --git a/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-clusterrole-namespace.tpl b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-clusterrole-namespace.tpl new file mode 100755 index 000000000..6310779bb --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-clusterrole-namespace.tpl @@ -0,0 +1,40 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{- define "operator.operatorClusterRoleNamespace" }} +--- +{{- if (or (eq (default "List" .domainNamespaceSelectionStrategy) "Dedicated") (and .dedicated (eq (default "List" .domainNamespaceSelectionStrategy) "List"))) }} +kind: "Role" +{{- else }} +kind: "ClusterRole" +{{- end }} +apiVersion: "rbac.authorization.k8s.io/v1" +metadata: + {{- if (or (eq (default "List" .domainNamespaceSelectionStrategy) "Dedicated") (and .dedicated (eq (default "List" .domainNamespaceSelectionStrategy) "List"))) }} + name: "weblogic-operator-role-namespace" + namespace: {{ .Release.Namespace | quote }} + {{- else }} + name: {{ list .Release.Namespace "weblogic-operator-clusterrole-namespace" | join "-" | quote }} + {{- end }} + labels: + weblogic.operatorName: {{ .Release.Namespace | quote }} +rules: +- apiGroups: [""] + resources: ["services", "configmaps", "pods", "events"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete", "deletecollection"] +- apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: ["pods/log"] + verbs: ["get", "list"] +- apiGroups: [""] + resources: ["pods/exec"] + verbs: ["get", "create"] +- apiGroups: ["batch"] + resources: ["jobs"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete", "deletecollection"] +- apiGroups: ["policy"] + resources: ["poddisruptionbudgets"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete", "deletecollection"] +{{- end }} diff --git a/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-clusterrole-nonresource.tpl b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-clusterrole-nonresource.tpl new file mode 100755 index 000000000..e3b6a2785 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-clusterrole-nonresource.tpl @@ -0,0 +1,15 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{- define "operator.operatorClusterRoleNonResource" }} +--- +kind: "ClusterRole" +apiVersion: "rbac.authorization.k8s.io/v1" +metadata: + name: {{ list .Release.Namespace "weblogic-operator-clusterrole-nonresource" | join "-" | quote }} + labels: + weblogic.operatorName: {{ .Release.Namespace | quote }} +rules: +- nonResourceURLs: ["/version/*"] + verbs: ["get"] +{{- end }} diff --git a/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-clusterrole-operator-admin.tpl b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-clusterrole-operator-admin.tpl new file mode 100755 index 000000000..46faed184 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-clusterrole-operator-admin.tpl @@ -0,0 +1,34 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{- define "operator.operatorClusterRoleOperatorAdmin" }} +--- +{{- if (or (eq (default "List" .domainNamespaceSelectionStrategy) "Dedicated") (and .dedicated (eq (default "List" .domainNamespaceSelectionStrategy) "List"))) }} +kind: "Role" +{{- else }} +kind: "ClusterRole" +{{- end }} +apiVersion: "rbac.authorization.k8s.io/v1" +metadata: + {{- if (or (eq (default "List" .domainNamespaceSelectionStrategy) "Dedicated") (and .dedicated (eq (default "List" .domainNamespaceSelectionStrategy) "List"))) }} + name: "weblogic-operator-role-operator-admin" + namespace: {{ .Release.Namespace | quote }} + {{- else }} + name: {{ list .Release.Namespace "weblogic-operator-clusterrole-operator-admin" | join "-" | quote }} + {{- end }} + labels: + weblogic.operatorName: {{ .Release.Namespace | quote }} +rules: +- apiGroups: [""] + resources: ["configmaps", "secrets"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete", "deletecollection"] +- apiGroups: [""] + resources: ["pods", "events"] + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: ["pods/log"] + verbs: ["get", "list"] +- apiGroups: [""] + resources: ["pods/exec"] + verbs: ["get", "create"] +{{- end }} diff --git a/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-clusterrolebinding-auth-delegator.tpl b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-clusterrolebinding-auth-delegator.tpl new file mode 100755 index 000000000..783f970e7 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-clusterrolebinding-auth-delegator.tpl @@ -0,0 +1,30 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{- define "operator.clusterRoleBindingAuthDelegator" }} +--- +apiVersion: "rbac.authorization.k8s.io/v1" +{{- if (or (eq (default "List" .domainNamespaceSelectionStrategy) "Dedicated") (and .dedicated (eq (default "List" .domainNamespaceSelectionStrategy) "List"))) }} +kind: "RoleBinding" +{{- else }} +kind: "ClusterRoleBinding" +{{- end }} +metadata: + labels: + weblogic.operatorName: {{ .Release.Namespace | quote}} + {{- if (or (eq (default "List" .domainNamespaceSelectionStrategy) "Dedicated") (and .dedicated (eq (default "List" .domainNamespaceSelectionStrategy) "List"))) }} + name: "weblogic-operator-rolebinding-auth-delegator" + namespace: {{ .Release.Namespace | quote }} + {{- else }} + name: {{ list .Release.Namespace "weblogic-operator-clusterrolebinding-auth-delegator" | join "-" | quote }} + {{- end }} +roleRef: + apiGroup: "rbac.authorization.k8s.io" + kind: "ClusterRole" + name: "system:auth-delegator" +subjects: +- kind: "ServiceAccount" + apiGroup: "" + name: {{ .serviceAccount | quote }} + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-clusterrolebinding-discovery.tpl b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-clusterrolebinding-discovery.tpl new file mode 100755 index 000000000..48c505fa5 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-clusterrolebinding-discovery.tpl @@ -0,0 +1,30 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{- define "operator.clusterRoleBindingDiscovery" }} +--- +apiVersion: "rbac.authorization.k8s.io/v1" +{{- if (or (eq (default "List" .domainNamespaceSelectionStrategy) "Dedicated") (and .dedicated (eq (default "List" .domainNamespaceSelectionStrategy) "List"))) }} +kind: "RoleBinding" +{{- else }} +kind: "ClusterRoleBinding" +{{- end }} +metadata: + labels: + weblogic.operatorName: {{ .Release.Namespace | quote }} + {{- if (or (eq (default "List" .domainNamespaceSelectionStrategy) "Dedicated") (and .dedicated (eq (default "List" .domainNamespaceSelectionStrategy) "List"))) }} + name: "weblogic-operator-rolebinding-discovery" + namespace: {{ .Release.Namespace | quote }} + {{- else }} + name: {{ list .Release.Namespace "weblogic-operator-clusterrolebinding-discovery" | join "-" | quote }} + {{- end }} +roleRef: + apiGroup: "rbac.authorization.k8s.io" + kind: "ClusterRole" + name: "system:discovery" +subjects: +- kind: "ServiceAccount" + apiGroup: "" + name: {{ .serviceAccount | quote }} + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-clusterrolebinding-general.tpl b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-clusterrolebinding-general.tpl new file mode 100755 index 000000000..f2994da33 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-clusterrolebinding-general.tpl @@ -0,0 +1,35 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{- define "operator.clusterRoleBindingGeneral" }} +--- +apiVersion: "rbac.authorization.k8s.io/v1" +{{- if (or (eq (default "List" .domainNamespaceSelectionStrategy) "Dedicated") (and .dedicated (eq (default "List" .domainNamespaceSelectionStrategy) "List"))) }} +kind: "RoleBinding" +{{- else }} +kind: "ClusterRoleBinding" +{{- end }} +metadata: + labels: + weblogic.operatorName: {{ .Release.Namespace | quote }} + {{- if (or (eq (default "List" .domainNamespaceSelectionStrategy) "Dedicated") (and .dedicated (eq (default "List" .domainNamespaceSelectionStrategy) "List"))) }} + name: "weblogic-operator-rolebinding-general" + namespace: {{ .Release.Namespace | quote }} + {{- else }} + name: {{ list .Release.Namespace "weblogic-operator-clusterrolebinding-general" | join "-" | quote }} + {{- end }} +roleRef: + apiGroup: "rbac.authorization.k8s.io" + {{- if (or (eq (default "List" .domainNamespaceSelectionStrategy) "Dedicated") (and .dedicated (eq (default "List" .domainNamespaceSelectionStrategy) "List"))) }} + kind: "Role" + name: "weblogic-operator-role-general" + {{- else }} + kind: "ClusterRole" + name: {{ list .Release.Namespace "weblogic-operator-clusterrole-general" | join "-" | quote }} + {{- end }} +subjects: +- kind: "ServiceAccount" + apiGroup: "" + name: {{ .serviceAccount | quote }} + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-clusterrolebinding-nonresource.tpl b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-clusterrolebinding-nonresource.tpl new file mode 100755 index 000000000..d998ab0e9 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-clusterrolebinding-nonresource.tpl @@ -0,0 +1,21 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{- define "operator.clusterRoleBindingNonResource" }} +--- +apiVersion: "rbac.authorization.k8s.io/v1" +kind: "ClusterRoleBinding" +metadata: + labels: + weblogic.operatorName: {{ .Release.Namespace | quote }} + name: {{ list .Release.Namespace "weblogic-operator-clusterrolebinding-nonresource" | join "-" | quote }} +roleRef: + apiGroup: "rbac.authorization.k8s.io" + kind: "ClusterRole" + name: {{ list .Release.Namespace "weblogic-operator-clusterrole-nonresource" | join "-" | quote }} +subjects: +- kind: "ServiceAccount" + apiGroup: "" + name: {{ .serviceAccount | quote }} + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-cm.tpl b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-cm.tpl new file mode 100755 index 000000000..dd6594de2 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-cm.tpl @@ -0,0 +1,58 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{- define "operator.operatorConfigMap" }} +--- +apiVersion: "v1" +data: + {{- if .externalRestEnabled }} + {{- if (hasKey . "externalRestIdentitySecret") }} + externalRestIdentitySecret: {{ .externalRestIdentitySecret | quote }} + {{- else }} + externalOperatorCert: {{ .externalOperatorCert | quote }} + {{- end }} + {{- end }} + {{- $configmap := (lookup "v1" "ConfigMap" .Release.Namespace "weblogic-operator-cm") }} + {{- if (and $configmap $configmap.data) }} + {{- $internalOperatorCert := index $configmap.data "internalOperatorCert" }} + {{- if $internalOperatorCert }} + internalOperatorCert: {{ $internalOperatorCert }} + {{- end }} + {{- end }} + serviceaccount: {{ .serviceAccount | quote }} + domainNamespaceSelectionStrategy: {{ (default "List" .domainNamespaceSelectionStrategy) | quote }} + domainNamespaces: {{ .domainNamespaces | uniq | sortAlpha | join "," | quote }} + {{- if .dedicated }} + dedicated: {{ .dedicated | quote }} + {{- end }} + {{- if .domainNamespaceLabelSelector }} + domainNamespaceLabelSelector: {{ .domainNamespaceLabelSelector | quote }} + {{- end }} + {{- if .domainNamespaceRegExp }} + domainNamespaceRegExp: {{ .domainNamespaceRegExp | quote }} + {{- end }} + {{- if .dns1123Fields }} + dns1123Fields: {{ .dns1123Fields | quote }} + {{- end }} + {{- if .featureGates }} + featureGates: {{ .featureGates | quote }} + {{- end }} + {{- if .introspectorJobNameSuffix }} + introspectorJobNameSuffix: {{ .introspectorJobNameSuffix | quote }} + {{- end }} + {{- if .externalServiceNameSuffix }} + externalServiceNameSuffix: {{ .externalServiceNameSuffix | quote }} + {{- end }} + {{- if .clusterSizePaddingValidationEnabled }} + clusterSizePaddingValidationEnabled: {{ .clusterSizePaddingValidationEnabled | quote }} + {{- end }} + {{- if .tokenReviewAuthentication }} + tokenReviewAuthentication: {{ .tokenReviewAuthentication | quote }} + {{- end }} +kind: "ConfigMap" +metadata: + labels: + weblogic.operatorName: {{ .Release.Namespace | quote }} + name: "weblogic-operator-cm" + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-dep.tpl b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-dep.tpl new file mode 100755 index 000000000..3fadac7dc --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-dep.tpl @@ -0,0 +1,158 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{- define "operator.operatorDeployment" }} +--- +apiVersion: "apps/v1" +kind: "Deployment" +metadata: + name: "weblogic-operator" + namespace: {{ .Release.Namespace | quote }} + labels: + weblogic.operatorName: {{ .Release.Namespace | quote }} +spec: + strategy: + type: Recreate + selector: + matchLabels: + weblogic.operatorName: {{ .Release.Namespace | quote }} + replicas: 1 + template: + metadata: + {{- with .annotations }} + annotations: + {{- end }} + {{- range $key, $value := .annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + labels: + weblogic.operatorName: {{ .Release.Namespace | quote }} + app: "weblogic-operator" + {{- range $key, $value := .labels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + spec: + serviceAccountName: {{ .serviceAccount | quote }} + {{- with .nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: "weblogic-operator" + image: {{ .image | quote }} + imagePullPolicy: {{ .imagePullPolicy | quote }} + command: ["bash"] + args: ["/operator/operator.sh"] + env: + - name: "OPERATOR_NAMESPACE" + valueFrom: + fieldRef: + fieldPath: "metadata.namespace" + - name: "OPERATOR_POD_NAME" + valueFrom: + fieldRef: + fieldPath: "metadata.name" + - name: "OPERATOR_POD_UID" + valueFrom: + fieldRef: + fieldPath: "metadata.uid" + - name: "OPERATOR_VERBOSE" + value: "false" + - name: "JAVA_LOGGING_LEVEL" + value: {{ .javaLoggingLevel | quote }} + - name: "JAVA_LOGGING_MAXSIZE" + value: {{ .javaLoggingFileSizeLimit | default 20000000 | quote }} + - name: "JAVA_LOGGING_COUNT" + value: {{ .javaLoggingFileCount | default 10 | quote }} + {{- if .remoteDebugNodePortEnabled }} + - name: "REMOTE_DEBUG_PORT" + value: {{ .internalDebugHttpPort | quote }} + - name: "DEBUG_SUSPEND" + {{- if .suspendOnDebugStartup }} + value: "y" + {{- else }} + value: "n" + {{- end }} + {{- end }} + {{- if .mockWLS }} + - name: "MOCK_WLS" + value: "true" + {{- end }} + resources: + requests: + cpu: {{ .cpuRequests | default "250m" }} + memory: {{ .memoryRequests | default "512Mi" }} + limits: + {{- if .cpuLimits}} + cpu: {{ .cpuLimits }} + {{- end }} + {{- if .memoryLimits}} + memory: {{ .memoryLimits }} + {{- end }} + volumeMounts: + - name: "weblogic-operator-cm-volume" + mountPath: "/operator/config" + - name: "weblogic-operator-debug-cm-volume" + mountPath: "/operator/debug-config" + - name: "weblogic-operator-secrets-volume" + mountPath: "/operator/secrets" + readOnly: true + {{- if .elkIntegrationEnabled }} + - mountPath: "/logs" + name: "log-dir" + readOnly: false + {{- end }} + {{- if not .remoteDebugNodePortEnabled }} + livenessProbe: + exec: + command: + - "bash" + - "/operator/livenessProbe.sh" + initialDelaySeconds: 20 + periodSeconds: 5 + readinessProbe: + exec: + command: + - "bash" + - "/operator/readinessProbe.sh" + initialDelaySeconds: 2 + periodSeconds: 10 + {{- end }} + {{- if .elkIntegrationEnabled }} + - name: "logstash" + image: {{ .logStashImage | quote }} + args: [ "-f", "/logs/logstash.conf" ] + volumeMounts: + - name: "log-dir" + mountPath: "/logs" + env: + - name: "ELASTICSEARCH_HOST" + value: {{ .elasticSearchHost | quote }} + - name: "ELASTICSEARCH_PORT" + value: {{ .elasticSearchPort | quote }} + {{- end }} + {{- if .imagePullSecrets }} + imagePullSecrets: + {{ .imagePullSecrets | toYaml }} + {{- end }} + volumes: + - name: "weblogic-operator-cm-volume" + configMap: + name: "weblogic-operator-cm" + - name: "weblogic-operator-debug-cm-volume" + configMap: + name: "weblogic-operator-debug-cm" + optional: true + - name: "weblogic-operator-secrets-volume" + secret: + secretName: "weblogic-operator-secrets" + {{- if .elkIntegrationEnabled }} + - name: "log-dir" + emptyDir: + medium: "Memory" + {{- end }} +{{- end }} diff --git a/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-external-svc.tpl b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-external-svc.tpl new file mode 100755 index 000000000..44bfc1191 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-external-svc.tpl @@ -0,0 +1,30 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{- define "operator.operatorExternalService" }} +{{- if or .externalRestEnabled .remoteDebugNodePortEnabled }} +--- +apiVersion: "v1" +kind: "Service" +metadata: + name: "external-weblogic-operator-svc" + namespace: {{ .Release.Namespace | quote }} + labels: + weblogic.operatorName: {{ .Release.Namespace | quote }} +spec: + type: "NodePort" + selector: + app: "weblogic-operator" + ports: + {{- if .externalRestEnabled }} + - name: "rest" + port: 8081 + nodePort: {{ .externalRestHttpsPort }} + {{- end }} + {{- if .remoteDebugNodePortEnabled }} + - name: "debug" + port: {{ .internalDebugHttpPort }} + nodePort: {{ .externalDebugHttpPort }} + {{- end }} +{{- end }} +{{- end }} diff --git a/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-internal-svc.tpl b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-internal-svc.tpl new file mode 100755 index 000000000..0108738de --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-internal-svc.tpl @@ -0,0 +1,20 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{- define "operator.operatorInternalService" }} +--- +apiVersion: "v1" +kind: "Service" +metadata: + name: "internal-weblogic-operator-svc" + namespace: {{ .Release.Namespace | quote }} + labels: + weblogic.operatorName: {{ .Release.Namespace | quote }} +spec: + type: "ClusterIP" + selector: + app: "weblogic-operator" + ports: + - port: 8082 + name: "rest" +{{- end }} diff --git a/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-role.tpl b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-role.tpl new file mode 100755 index 000000000..e0c386b98 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-role.tpl @@ -0,0 +1,17 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{- define "operator.operatorRole" }} +--- +kind: "Role" +apiVersion: "rbac.authorization.k8s.io/v1" +metadata: + name: "weblogic-operator-role" + namespace: {{ .Release.Namespace | quote }} + labels: + weblogic.operatorName: {{ .Release.Namespace | quote }} +rules: +- apiGroups: [""] + resources: ["events", "secrets", "configmaps"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete", "deletecollection"] +{{- end }} diff --git a/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-rolebinding-namespace.tpl b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-rolebinding-namespace.tpl new file mode 100755 index 000000000..d55ed3f47 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-rolebinding-namespace.tpl @@ -0,0 +1,35 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{- define "operator.operatorRoleBindingNamespace" }} +--- +{{- if .enableClusterRoleBinding }} +kind: "ClusterRoleBinding" +{{- else }} +kind: "RoleBinding" +{{- end }} +apiVersion: "rbac.authorization.k8s.io/v1" +metadata: + {{- if .enableClusterRoleBinding }} + name: {{ list .Release.Namespace "weblogic-operator-clusterrolebinding-namespace" | join "-" | quote }} + {{- else }} + name: "weblogic-operator-rolebinding-namespace" + namespace: {{ .domainNamespace | quote }} + {{- end }} + labels: + weblogic.operatorName: {{ .Release.Namespace | quote }} +subjects: +- kind: "ServiceAccount" + name: {{ .serviceAccount | quote }} + namespace: {{ .Release.Namespace | quote }} + apiGroup: "" +roleRef: + {{- if (or (eq (default "List" .domainNamespaceSelectionStrategy) "Dedicated") (and .dedicated (eq (default "List" .domainNamespaceSelectionStrategy) "List"))) }} + kind: "Role" + name: "weblogic-operator-role-namespace" + {{- else }} + kind: "ClusterRole" + name: {{ list .Release.Namespace "weblogic-operator-clusterrole-namespace" | join "-" | quote }} + {{- end }} + apiGroup: "rbac.authorization.k8s.io" +{{- end }} diff --git a/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-rolebinding.tpl b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-rolebinding.tpl new file mode 100755 index 000000000..98a09424e --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-rolebinding.tpl @@ -0,0 +1,22 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{- define "operator.operatorRoleBinding" }} +--- +kind: "RoleBinding" +apiVersion: "rbac.authorization.k8s.io/v1" +metadata: + name: "weblogic-operator-rolebinding" + namespace: {{ .Release.Namespace | quote }} + labels: + weblogic.operatorName: {{ .Release.Namespace | quote }} +subjects: +- kind: "ServiceAccount" + name: {{ .serviceAccount | quote }} + namespace: {{ .Release.Namespace | quote }} + apiGroup: "" +roleRef: + kind: "Role" + name: "weblogic-operator-role" + apiGroup: "rbac.authorization.k8s.io" +{{- end }} diff --git a/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-secret.tpl b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-secret.tpl new file mode 100755 index 000000000..6a7442718 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-secret.tpl @@ -0,0 +1,25 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{- define "operator.operatorSecrets" }} +--- +apiVersion: "v1" +kind: "Secret" +data: + {{- if (and .externalRestEnabled (hasKey . "externalOperatorKey")) }} + externalOperatorKey: {{ .externalOperatorKey | quote }} + {{- end }} + {{- $secret := (lookup "v1" "Secret" .Release.Namespace "weblogic-operator-secrets") }} + {{- if (and $secret $secret.data) }} + {{- $internalOperatorKey := index $secret.data "internalOperatorKey" }} + {{- if $internalOperatorKey }} + internalOperatorKey: {{ $internalOperatorKey }} + {{- end }} + {{- end }} +metadata: + labels: + weblogic.operatorName: {{ .Release.Namespace | quote }} + name: "weblogic-operator-secrets" + namespace: {{ .Release.Namespace | quote }} +type: "Opaque" +{{- end }} diff --git a/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator.tpl b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator.tpl new file mode 100755 index 000000000..c24d7eebf --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator.tpl @@ -0,0 +1,30 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{- define "operator.operator" -}} +{{- include "operator.operatorClusterRoleGeneral" . }} +{{- include "operator.operatorClusterRoleNamespace" . }} +{{- if not (or (eq (default "List" .domainNamespaceSelectionStrategy) "Dedicated") (and .dedicated (eq (default "List" .domainNamespaceSelectionStrategy) "List"))) }} +{{- include "operator.operatorClusterRoleNonResource" . }} +{{- end }} +{{- include "operator.operatorClusterRoleOperatorAdmin" . }} +{{- include "operator.operatorClusterRoleDomainAdmin" . }} +{{- include "operator.clusterRoleBindingGeneral" . }} +{{- include "operator.clusterRoleBindingAuthDelegator" . }} +{{- include "operator.clusterRoleBindingDiscovery" . }} +{{- if not (or (eq (default "List" .domainNamespaceSelectionStrategy) "Dedicated") (and .dedicated (eq (default "List" .domainNamespaceSelectionStrategy) "List"))) }} +{{- include "operator.clusterRoleBindingNonResource" . }} +{{- end }} +{{- include "operator.operatorRole" . }} +{{- include "operator.operatorRoleBinding" . }} +{{- include "operator.operatorConfigMap" . }} +{{- include "operator.operatorSecrets" . }} +{{- include "operator.operatorDeployment" . }} +{{- include "operator.operatorInternalService" . }} +{{- include "operator.operatorExternalService" . }} +{{- if .enableClusterRoleBinding }} +{{- include "operator.operatorRoleBindingNamespace" . }} +{{- else }} +{{- include "operator.domainNamespaces" . }} +{{- end }} +{{- end }} diff --git a/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_utils.tpl b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_utils.tpl new file mode 100755 index 000000000..9f2ed825c --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_utils.tpl @@ -0,0 +1,493 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{/* +Start validation +*/}} +{{- define "utils.startValidation" -}} +{{- $scope := . -}} +{{- $context := dict "scope" $scope "path" list -}} +{{- $stack := list $context -}} +{{- $ignore := set $scope "validationContextStack" $stack -}} +{{- $ignore := include "utils.setCurrentValidationContext" $scope -}} +{{- end -}} + +{{/* +End validation +If there were any validation errors, report them and kill the helm chart installation. +*/}} +{{- define "utils.endValidation" -}} +{{- $scope := . -}} +{{- if hasKey $scope "validationErrors" -}} +{{- fail $scope.validationErrors -}} +{{- end -}} +{{- end -}} + +{{/* +Push a new validation context +*/}} +{{- define "utils.pushValidationContext" -}} +{{- $scope := index . 0 }} +{{- $scopeName := index . 1 }} +{{- $newScope := index $scope.validationScope $scopeName -}} +{{- $newPath := append $scope.validationPath $scopeName -}} +{{- $newContext := dict "scope" $newScope "path" $newPath -}} +{{- $newStack := append $scope.validationContextStack $newContext -}} +{{- $ignore := set $scope "validationContextStack" $newStack -}} +{{- $ignore := include "utils.setCurrentValidationContext" $scope -}} +{{- end -}} + +{{/* +Pop the validation context +*/}} +{{- define "utils.popValidationContext" -}} +{{- $scope := . }} +{{- $stack := $scope.validationContextStack -}} +{{- $ignore := set $scope "validationContextStack" (initial $stack) -}} +{{- $ignore := include "utils.setCurrentValidationContext" $scope -}} +{{- end -}} + +{{/* +Set the current validation context from the stack +*/}} +{{- define "utils.setCurrentValidationContext" -}} +{{- $scope := . }} +{{- $context := $scope.validationContextStack | last -}} +{{- $ignore := set $scope "validationScope" (index $context "scope") -}} +{{- $ignore := set $scope "validationPath" (index $context "path") -}} +{{- end -}} + +{{/* +Record a validation error (it will get reported later by utils.reportValidationErrors) +*/}} +{{- define "utils.recordValidationError" -}} +{{- $scope := index . 0 -}} +{{- $errorMsg := index . 1 -}} +{{- $path := $scope.validationPath -}} +{{- $pathStr := $path | join "." | trim -}} +{{- $scopedErrorMsg := (list "\n" $pathStr $errorMsg) | compact | join " " -}} +{{- if hasKey $scope "validationErrors" -}} +{{- $newValidationErrors := cat $scope.validationErrors $scopedErrorMsg -}} +{{- $ignore := set $scope "validationErrors" $newValidationErrors -}} +{{- else -}} +{{- $newValidationErrors := $scopedErrorMsg -}} +{{- $ignore := set $scope "validationErrors" $newValidationErrors -}} +{{- end -}} +{{- end -}} + +{{/* +Returns whether any errors have been reported +*/}} +{{- define "utils.haveValidationErrors" -}} +{{- if hasKey . "validationErrors" -}} + true +{{- end -}} +{{- end -}} + +{{/* +Determine whether a dictionary has a non-null value for a key +*/}} +{{- define "utils.dictionaryHasNonNullValue" -}} +{{- $dict := index . 0 -}} +{{- $name := index . 1 -}} +{{- if and (hasKey $dict $name) (not ( eq (typeOf (index $dict $name)) "" )) -}} + true +{{- end -}} +{{- end -}} + +{{/* +Verify that a value of a specific kind has been specified. +*/}} +{{- define "utils.verifyValue" -}} +{{- $requiredKind := index . 0 -}} +{{- $scope := index . 1 -}} +{{- $name := index . 2 -}} +{{- $isRequired := index . 3 -}} +{{- if $scope.trace -}} +{{- $errorMsg := cat "TRACE" $name $requiredKind $isRequired -}} +{{- $ignore := include "utils.recordValidationError" (list $scope $errorMsg) -}} +{{- end -}} +{{- $parent := $scope.validationScope -}} +{{- if include "utils.dictionaryHasNonNullValue" (list $parent $name) -}} +{{- $value := index $parent $name -}} +{{- $actualKind := kindOf $value -}} +{{- if eq $requiredKind $actualKind -}} + true +{{- else -}} +{{- $errorMsg := cat $name "must be a" $requiredKind ":" $actualKind -}} +{{- include "utils.recordValidationError" (list $scope $errorMsg) -}} +{{- end -}} +{{- else -}} +{{- if $isRequired -}} +{{- $errorMsg := cat $requiredKind $name "must be specified" -}} +{{- include "utils.recordValidationError" (list $scope $errorMsg) -}} +{{- else -}} + true +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Verify that a list value has been specified +*/}} +{{- define "utils.verifyListValue" -}} +{{- $requiredKind := index . 0 -}} +{{- $scope := index . 1 -}} +{{- $name := index . 2 -}} +{{- $isRequired := index . 3 -}} +{{- $parent := $scope.validationScope -}} +{{- $args := . -}} +{{- if include "utils.verifyValue" (list "slice" $scope $name $isRequired) -}} +{{- $status := dict -}} +{{- if hasKey $parent $name -}} +{{- $list := index $parent $name -}} +{{- range $value := $list -}} +{{- $actualKind := kindOf $value -}} +{{- if not (eq $requiredKind $actualKind) -}} +{{- $errorMsg := cat $name "must only contain" $requiredKind "elements:" $actualKind -}} +{{- include "utils.recordValidationError" (list $scope $errorMsg) -}} +{{- $ignore := set $status "error" true -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- if not (hasKey $status "error") -}} + true +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Verify a string value +*/}} +{{- define "utils.baseVerifyString" -}} +{{- include "utils.verifyValue" (prepend . "string") -}} +{{- end -}} + +{{/* +Verify a required string value +*/}} +{{- define "utils.verifyString" -}} +{{- include "utils.baseVerifyString" (append . true) -}} +{{- end -}} + +{{/* +Verify an optional string value +*/}} +{{- define "utils.verifyOptionalString" -}} +{{- include "utils.baseVerifyString" (append . false) -}} +{{- end -}} + +{{/* +Verify a boolean value +*/}} +{{- define "utils.baseVerifyBoolean" -}} +{{- include "utils.verifyValue" (prepend . "bool") -}} +{{- end -}} + +{{/* +Verify a required boolean value +*/}} +{{- define "utils.verifyBoolean" -}} +{{- include "utils.baseVerifyBoolean" (append . true) -}} +{{- end -}} + +{{/* +Verify an optional boolean value +*/}} +{{- define "utils.verifyOptionalBoolean" -}} +{{- include "utils.baseVerifyBoolean" (append . false) -}} +{{- end -}} + +{{/* +Verify an integer value +*/}} +{{- define "utils.baseVerifyInteger" -}} +{{- include "utils.verifyValue" (prepend . "float64") -}} +{{- end -}} + +{{/* +Verify a required integer value +*/}} +{{- define "utils.verifyInteger" -}} +{{- include "utils.baseVerifyInteger" (append . true) -}} +{{- end -}} + +{{/* +Verify an optional required integer value +*/}} +{{- define "utils.verifyOptionalInteger" -}} +{{- include "utils.baseVerifyInteger" (append . false) -}} +{{- end -}} + +{{/* +Verify a dictionary value +*/}} +{{- define "utils.baseVerifyDictionary" -}} +{{- include "utils.verifyValue" (prepend . "map") -}} +{{- end -}} + +{{/* +Verify a required dictionary value +*/}} +{{- define "utils.verifyDictionary" -}} +{{- include "utils.baseVerifyDictionary" (append . true) -}} +{{- end -}} + +{{/* +Verify an optional dictionary value +*/}} +{{- define "utils.verifyOptionalDictionary" -}} +{{- include "utils.baseVerifyDictionary" (append . false) -}} +{{- end -}} + +{{/* +Verify a enum string value +*/}} +{{- define "utils.baseVerifyEnum" -}} +{{- $scope := index . 0 -}} +{{- $name := index . 1 -}} +{{- $legalValues := index . 2 -}} +{{- $isRequired := index . 3 -}} +{{- if include "utils.baseVerifyString" (list $scope $name $isRequired) -}} +{{- $parent := $scope.validationScope -}} +{{- if include "utils.dictionaryHasNonNullValue" (list $parent $name) -}} +{{- $value := index $parent $name -}} +{{- if has $value $legalValues -}} + true +{{- else -}} +{{ $errorMsg := cat $name "must be one of the following values" $legalValues ":" $value -}} +{{- include "utils.recordValidationError" (list $scope $errorMsg) -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Verify a required enum string value +*/}} +{{- define "utils.verifyEnum" -}} +{{- include "utils.baseVerifyEnum" (append . true) -}} +{{- end -}} + +{{/* +Verify an optional enum string value +*/}} +{{- define "utils.verifyOptionalEnum" -}} +{{- include "utils.baseVerifyEnum" (append . false) -}} +{{- end -}} + +{{/* +Verify a kubernetes resource name string value +*/}} +{{- define "utils.baseVerifyResourceName" -}} +{{/* https://kubernetes.io/docs/concepts/overview/working-with-objects/names */}} +{{/* names: only lower case, numbers, dot, dash, max 253 */}} +{{/* https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set */}} +{{/* labels/selectors - upper & lower case, numbers, dot, dash, underscore, max 63 */}} +{{- $scope := index . 0 -}} +{{- $name := index . 1 -}} +{{- $max := index . 2 -}} +{{- $isRequired := index . 3 -}} +{{- if include "utils.baseVerifyString" (list $scope $name $isRequired) -}} +{{- $parent := $scope.validationScope -}} +{{- if include "utils.dictionaryHasNonNullValue" (list $parent $name) -}} +{{- $value := index $parent $name -}} +{{- $len := len $value -}} +{{- if and (le $len $max) (regexMatch "^[a-z0-9.-]+$" $value) -}} + true +{{- else -}} +{{- $errorMsg := cat $name "must only contain lower case letters, numbers, dashes and dots, and must not contain more than" $max "characters: " $value -}} +{{- include "utils.recordValidationError" (list $scope $errorMsg) -}} +{{- end -}} +{{- end -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* +Verify a required kubernetes resource name string value +*/}} +{{- define "utils.verifyResourceName" -}} +{{- include "utils.baseVerifyResourceName" (append . true) -}} +{{- end -}} + +{{/* +Verify an optional kubernetes resource name string value +*/}} +{{- define "utils.verifyOptionalResourceName" -}} +{{- include "utils.baseVerifyResourceName" (append . false) -}} +{{- end -}} + +{{/* +Verify external service name suffix string value +*/}} +{{- define "utils.verifyExternalServiceNameSuffix" -}} +{{- include "utils.baseVerifyResourceName" (append . false) -}} +{{- end -}} + +{{/* +Verify introspector job name suffix string value +*/}} +{{- define "utils.verifyIntrospectorJobNameSuffix" -}} +{{- include "utils.baseVerifyResourceName" (append . false) -}} +{{- end -}} + +{{/* +Verify a list of strings value +*/}} +{{- define "utils.baseVerifyStringList" -}} +{{- include "utils.verifyListValue" (prepend . "string") -}} +{{- end -}} + +{{/* +Verify a required list of strings value +*/}} +{{- define "utils.verifyStringList" -}} +{{- include "utils.baseVerifyStringList" (append . true) -}} +{{- end -}} + +{{/* +Verify an optional list of strings value +*/}} +{{- define "utils.verifyOptionalStringList" -}} +{{- include "utils.baseVerifyStringList" (append . false) -}} +{{- end -}} + +{{/* +Verify a list of dictionaries value +*/}} +{{- define "utils.baseVerifyDictionaryList" -}} +{{- include "utils.verifyListValue" (prepend . "map") -}} +{{- end -}} + +{{/* +Verify a required list of dictionaries value +*/}} +{{- define "utils.verifyDictionaryList" -}} +{{- include "utils.baseVerifyDictionaryList" (append . true) -}} +{{- end -}} + +{{/* +Verify an optional list of dictionaries value +*/}} +{{- define "utils.verifyOptionalDictionaryList" -}} +{{- include "utils.baseVerifyDictionaryList" (append . false) -}} +{{- end -}} + +{{/* +Merge a set of dictionaries into a single dictionary. + +The scope must be a list of dictionaries, starting with the least specific +and ending with the most specific. + +First it makes an empty destinaction dictionary, then iterates over the dictionaries, +overlaying their values on the destination dictionary. + +If a value is null, then it removes that key from the destination dictionary. + +If the value is already present in the destination dictionary, and the old and +new values are both dictionaries, it merges them into the destination. +*/}} +{{- define "utils.mergeDictionaries" -}} +{{- $dest := dict -}} +{{- range $src := . -}} +{{- if not (empty $src) -}} +{{- range $key, $value := $src -}} +{{- $ignore := include "utils.mergeDictionaryValue" (list $dest $key $value) -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- toYaml $dest -}} +{{- end -}} + +{{/* +Merge a value into a dictionary. +This is like helm's 'merge' function, except that it handles null entries too. +*/}} +{{- define "utils.mergeDictionaryValue" -}} +{{- $dest := index . 0 -}} +{{- $key := index . 1 -}} +{{- $newValue := index . 2 -}} +{{- $newType := typeOf $newValue -}} +{{- if hasKey $dest $key -}} +{{- if eq $newType "" -}} +{{/* # if the value already existed, and the new value is null, remove the old value */}} +{{- $ignore := unset $dest $key -}} +{{- else -}} +{{- $oldValue := index $dest $key -}} +{{- $oldKind := kindOf $oldValue -}} +{{- $newKind := kindOf $newValue -}} +{{- if (and (eq $oldKind "map") (eq $newKind "map")) -}} +{{/* # if both values are maps, merge them */}} +{{- $merged := include "utils.mergeDictionaries" (list $oldValue $newValue) | fromYaml -}} +{{- $ignore := set $dest $key $merged -}} +{{- else -}} +{{/* # replace the old value with the new one */}} +{{- $ignore := set $dest $key $newValue -}} +{{- end -}} +{{- end -}} +{{- else -}} +{{- if not (eq $newType "") -}} +{{/* #if there was no old value, and the new value isn't null, use the new value */}} +{{- $ignore := set $dest $key $newValue -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Make a writable copy of a dictionary. +TBD - does helm provide a clone method we can use instead? +*/}} +{{- define "utils.cloneDictionary" -}} +{{- include "utils.mergeDictionaries" (list .) -}} +{{- end -}} + +{{/* +Verify that a list of values (exclude) can not be defined if another value (key) is already defined +*/}} +{{- define "utils.mutexValue" -}} +{{- $scope := index . 0 -}} +{{- $key := index . 1 -}} +{{- $exclude := index . 2 -}} +{{- $type := index . 3 -}} +{{- $parent := $scope.validationScope -}} +{{- $args := . -}} +{{- $status := dict -}} +{{- if hasKey $parent $key -}} +{{- range $value := $exclude -}} +{{- if hasKey $parent $value -}} +{{- $errorMsg := cat $value "can not be present when" $key "is defined" " " -}} +{{- include "utils.recordValidationError" (list $scope $errorMsg) -}} +{{- $ignore := set $status "error" true -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- if not (hasKey $status "error") -}} + true +{{- end -}} +{{- end -}} + +{{/* +Verify that a list of strings can not be defined if another string is already defined +*/}} +{{- define "utils.mutexString" -}} +{{- include "utils.mutexValue" (append . "string") -}} +{{- end -}} + +{{/* +Verify that a Kubernetes resource exists in a given namespace +*/}} +{{- define "utils.verifyK8SResource" -}} +{{- $scope := index . 0 -}} +{{- $name := index . 1 -}} +{{- $type := index . 2 -}} +{{- $namespace := index . 3 -}} +{{- $foundNS := (lookup "v1" "Namespace" "" $namespace) }} +{{- if $foundNS }} +{{- $foundResource := (lookup "v1" $type $namespace $name) }} +{{- if not $foundResource }} +{{- $errorMsg := cat $type $name " not found in namespace " $namespace -}} +{{- include "utils.recordValidationError" (list $scope $errorMsg) -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_validate-inputs.tpl b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_validate-inputs.tpl new file mode 100755 index 000000000..a6ee7dd02 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_validate-inputs.tpl @@ -0,0 +1,63 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{- define "operator.validateInputs" -}} +{{- $scope := include "utils.cloneDictionary" . | fromYaml -}} +{{- $ignore:= include "utils.startValidation" $scope -}} +{{- $ignore := include "utils.pushValidationContext" (list $scope "Release") -}} +{{- $ignore := include "utils.verifyResourceName" (list $scope "Namespace" 63) -}} +{{- $ignore := include "utils.popValidationContext" $scope -}} +{{- $ignore := include "utils.verifyString" (list $scope "serviceAccount") -}} +{{- $ignore := include "utils.verifyK8SResource" (list $scope .serviceAccount "ServiceAccount" .Release.Namespace) -}} +{{- $ignore := include "utils.verifyString" (list $scope "image") -}} +{{- $ignore := include "utils.verifyEnum" (list $scope "imagePullPolicy" (list "Always" "IfNotPresent" "Never")) -}} +{{- $ignore := include "utils.verifyOptionalDictionaryList" (list $scope "imagePullSecrets") -}} +{{- $ignore := include "utils.verifyEnum" (list $scope "javaLoggingLevel" (list "SEVERE" "WARNING" "INFO" "CONFIG" "FINE" "FINER" "FINEST")) -}} +{{- if include "utils.verifyBoolean" (list $scope "externalRestEnabled") -}} +{{- if $scope.externalRestEnabled -}} +{{- $ignore := include "utils.verifyInteger" (list $scope "externalRestHttpsPort") -}} +{{- $ignore := include "utils.mutexString" (list $scope "externalRestIdentitySecret" (list "externalOperatorKey" "externalOperatorCert")) -}} +{{- if (or (hasKey $scope "externalOperatorCert") (hasKey $scope "externalOperatorKey")) -}} +{{- $ignore := include "utils.verifyString" (list $scope "externalOperatorCert") -}} +{{- $ignore := include "utils.verifyString" (list $scope "externalOperatorKey") -}} +{{- else }} +{{- $ignore := include "utils.verifyString" (list $scope "externalRestIdentitySecret") -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- if include "utils.verifyBoolean" (list $scope "remoteDebugNodePortEnabled") -}} +{{- if $scope.remoteDebugNodePortEnabled -}} +{{- $ignore := include "utils.verifyBoolean" (list $scope "suspendOnDebugStartup") -}} +{{- $ignore := include "utils.verifyInteger" (list $scope "internalDebugHttpPort") -}} +{{- $ignore := include "utils.verifyInteger" (list $scope "externalDebugHttpPort") -}} +{{- end -}} +{{- end -}} +{{- $ignore := include "utils.verifyOptionalBoolean" (list $scope "enableClusterRoleBinding") -}} +{{- if and .enableClusterRoleBinding (or (eq (default "List" .domainNamespaceSelectionStrategy) "Dedicated") (and .dedicated (eq (default "List" .domainNamespaceSelectionStrategy) "List"))) }} +{{- $errorMsg := "The enableClusterRoleBinding value may not be true when either dedicated is true or domainNamespaceSelectionStrategy is Dedicated" -}} +{{- include "utils.recordValidationError" (list $scope $errorMsg) -}} +{{- end -}} +{{- if eq (default "List" $scope.domainNamespaceSelectionStrategy) "List" -}} +{{- $ignore := include "utils.verifyStringList" (list $scope "domainNamespaces") -}} +{{- end -}} +{{- if include "utils.verifyBoolean" (list $scope "elkIntegrationEnabled") -}} +{{- if $scope.elkIntegrationEnabled -}} +{{- $ignore := include "utils.verifyString" (list $scope "logStashImage") -}} +{{- $ignore := include "utils.verifyString" (list $scope "elasticSearchHost") -}} +{{- $ignore := include "utils.verifyInteger" (list $scope "elasticSearchPort") -}} +{{- end -}} +{{- end -}} +{{- $ignore := include "utils.verifyOptionalBoolean" (list $scope "dedicated") -}} +{{- $ignore := include "utils.verifyOptionalEnum" (list $scope "domainNamespaceSelectionStrategy" (list "List" "LabelSelector" "RegExp" "Dedicated")) -}} +{{- if eq (default "List" $scope.domainNamespaceSelectionStrategy) "LabelSelector" -}} +{{- $ignore := include "utils.verifyString" (list $scope "domainNamespaceLabelSelector") -}} +{{- end -}} +{{- if eq (default "List" $scope.domainNamespaceSelectionStrategy) "RegExp" -}} +{{- $ignore := include "utils.verifyString" (list $scope "domainNamespaceRegExp") -}} +{{- end -}} +{{- $ignore := include "utils.verifyOptionalBoolean" (list $scope "mockWLS") -}} +{{- $ignore := include "utils.verifyIntrospectorJobNameSuffix" (list $scope "introspectorJobNameSuffix" 25) -}} +{{- $ignore := include "utils.verifyExternalServiceNameSuffix" (list $scope "externalServiceNameSuffix" 10) -}} +{{- $ignore := include "utils.verifyOptionalBoolean" (list $scope "clusterSizePaddingValidationEnabled") -}} +{{- $ignore := include "utils.endValidation" $scope -}} +{{- end -}} diff --git a/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/main.yaml b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/main.yaml new file mode 100755 index 000000000..fb7e731f9 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/main.yaml @@ -0,0 +1,11 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{- $scope := include "utils.cloneDictionary" .Values | fromYaml -}} +{{- $ignore := set $scope "Files" .Files -}} +{{- $ignore := set $scope "Chart" .Chart -}} +{{- $ignore := set $scope "Release" .Release -}} +{{- $ignore := set $scope "APIVersions" .Capabilities.APIVersions -}} + +{{ include "operator.validateInputs" $scope }} +{{- include "operator.operator" $scope }} diff --git a/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/values.yaml b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/values.yaml new file mode 100755 index 000000000..dac9a5382 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/values.yaml @@ -0,0 +1,224 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +# serviceAccount specifies the name of the ServiceAccount in the operator's namespace that the +# operator will use to make requests to the Kubernetes API server. +# The customer is responsible for creating the ServiceAccount in the same namespace as this Helm release. +# If not specified, the the operator will use the Helm release namespace's 'default' ServiceAccount. +serviceAccount: "default" + +# domainNamespaceSelectionStrategy specifies how the operator will select the set of namespaces +# that it will manage. Legal values are: List, LabelSelector, RegExp, and Dedicated. If set to 'List', +# then the operator will manage the set of namespaces listed by the 'domainNamespaces' value. +# If set to 'LabelSelector', then the operator will manage the set of namespaces discovered by a list +# of namespaces using the value specified by 'domainNamespaceLabelSelector' as a label selector. +# If set to 'RegExp', then the operator will manage the set of namespaces discovered by a list +# of namespaces using the value specified by 'domainNamespaceRegExp' as a regular expression matched +# against the namespace names. +# If set to 'Dedicated', then operator will manage WebLogic Domains only in the same namespace +# where the operator itself is deployed, which is the namespace of the Helm release. +domainNamespaceSelectionStrategy: List + +# This value is deprecated. Please use 'domainNamespaceSelectionStrategy: Dedicated'. +# dedicated specifies if this operator will manage WebLogic Domains only in the same namespace in +# which the operator itself is deployed. If set to 'true', then the 'domainNamespaces' value below +# is ignored. This value is ignored if 'domainNamespaceSelectionStrategy' is set to a value other +# than 'List'. +# dedicated: false + +# domainNamespaces specifies list of WebLogic Domain namespaces that this operator manages. This value +# is ignored if 'domainNamespaceSelectionStrategy' is not 'List'. The customer is responsible for creating these +# namespaces. If not specified, then the operator will manage WebLogic Domains in the Kubernetes 'default' namespace. +# +# Example: In the configuration below, the operator will manage namespace1 and namespace2. +# +# domainNamespaces: +# - "namespace1" +# - "namespace2" +domainNamespaces: +- "default" + +# domainNamespaceLabelSelector specifies the label selector value that the operator will use when listing +# namespaces in search of the namespaces that contain WebLogic Domains that this operator will manage. Ignored +# if 'domainNamespaceSelectionStrategy' is not 'LabelSelector'. +# +# Example: manage any namespace with a label named "weblogic-operator". +# +# domainNamespaceLabelSelector: "weblogic-operator" +# +# domainNamespaceLabelSelector: + +# domainNamespaceRegExp specifies a regular expression that will be matched against namespace names when listing +# namespaces in search of the namespaces that contain WebLogic Domains that this operator will manage. Ignored +# if 'domainNamespaceSelectionStrategy' is not 'RegExp'. +# +# Example: manage any namespace where the namespace name starts with "prod". +# +# domainNamespaceRegExp: "^prod" +# +# domainNamespaceRegExp: + +# enableClusterRoleBinding specifies whether the roles necessary for the operator to manage domains +# will be granted using a ClusterRoleBinding rather than using RoleBindings in each managed namespace. +enableClusterRoleBinding: false + +# image specifies the container image containing the operator. +image: "ghcr.io/oracle/weblogic-kubernetes-operator:3.3.0" + +# imagePullPolicy specifies the image pull policy for the operator's container image. +imagePullPolicy: IfNotPresent + +# imagePullSecrets contains an optional list of Kubernetes Secrets, in the operator's namespace, +# that are needed to access the registry containing the operator's container image. +# The customer is responsible for creating the Secret. +# If no Secrets are required, then omit this property. +# +# Example: a Secret is needed, and has been stored in 'my-operator-secret' +# +# imagePullSecrets: +# - name: "my-operator-secret" + +# externalRestEnabled specifies whether the the operator's REST interface is exposed +# outside of the Kubernetes cluster on the port specified by the 'externalRestHttpsPort' +# property. +# +# If set to true, then the customer must provide the SSL certificate and private key for +# the operator's external REST interface by specifying the 'externalOperatorCert' and +# 'externalOperatorKey' properties. +externalRestEnabled: false + +# externalRestHttpsPort specifies the node port that should be allocated for the external operator REST HTTPS interface. +# This parameter is required if 'externalRestEnabled' is true. +# Otherwise, it is ignored. +externalRestHttpsPort: 31001 + +# The name of the Secret used to store the certificate and private key to use for the external operator REST HTTPS interface. +# The Secret has to be created in the same namespace of the WebLogic operator. +# This parameter is required if 'externalRestEnabled' is true. Otherwise, it is ignored. +# As example, an external REST identity can be created using the following sample script +# kubernetes/samples/scripts/rest/generate-external-rest-identity.sh +# externalRestIdentitySecret: + +# elkIntegrationEnabled specifies whether or not ELK integration is enabled. +elkIntegrationEnabled: false + +# logStashImage specifies the container image containing logstash. +# This parameter is ignored if 'elkIntegrationEnabled' is false. +logStashImage: "logstash:6.6.0" + +# elasticSearchHost specifies the hostname of where elasticsearch is running. +# This parameter is ignored if 'elkIntegrationEnabled' is false. +elasticSearchHost: "elasticsearch.default.svc.cluster.local" + +# elasticSearchPort specifies the port number of where elasticsearch is running. +# This parameter is ignored if 'elkIntegrationEnabled' is false. +elasticSearchPort: 9200 + +# featureGates specifies a set of key=value pairs separated by commas that describe whether a given +# operator feature is enabled. You enable a feature by including a key=value pair where the key is the +# feature name and the value is "true". This will allow the operator team to release features that +# are not yet ready to be enabled by default, but that are ready for testing by customers. Once a feature is +# stable then it will be enabled by default and can not be disabled using this configuration. +# featureGates: "...,AuxiliaryImage=true" + +# javaLoggingLevel specifies the Java logging level for the operator. This affects the operator pod's +# log output and the contents of log files in the container's /logs/ directory. +# Valid values are: "SEVERE", "WARNING", "INFO", "CONFIG", "FINE", "FINER", and "FINEST". +javaLoggingLevel: "INFO" + +# javaLoggingFileSizeLimit specifies the maximum size in bytes for an individual Java logging file in the operator container's +# /logs/ directory. +javaLoggingFileSizeLimit: 20000000 + +# javaLoggingFileCount specifies the number of Java logging files to preserve in the operator container's /logs/ +# directory as the files are rotated. +javaLoggingFileCount: 10 + +# labels specifies a set of key-value labels that will be added to each pod running the operator. +# See https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +#labels: + +# annotations specifies a set of key-value annotations that will be added to each pod running the operator. +# See https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +#annotations: + +# nodeSelector specifies a matching rule that the Kubernetes scheduler will use when selecting the node +# where the operator will run. If the nodeSelector value is specified, then this content will be added to +# the operator's deployment. See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector +# for more information on node selectors. +#nodeSelector: + +# affinity specifies a set of matching rules related to the presence of other workloads that the Kubernetes scheduler +# will use when selecting the node where the operator will run. If the affinity value is specified, then this content +# will be added to the operator's deployment. See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity +# for more information on affinity and anti-affinity. +#affinity: + +# Values related to debugging the operator. +# Customers should not need to use the following properties + +# remoteDebugNodePortEnabled specifies whether or not the operator will provide a Java remote debug interface on the +# provided port. If the 'suspendOnDebugStartup' property is specified, the operator will suspend execution +# until a remote debugger has attached. +# The 'internalDebugHttpPort' property controls the port number inside the Kubernetes +# cluster and the 'externalDebugHttpPort' property controls the port number outside +# the Kubernetes cluster. +remoteDebugNodePortEnabled: false + +#suspendOnDebugStartup specifies whether the operator will suspend on startup when a Java remote debugging is enabled. +suspendOnDebugStartup: false + +# internalDebugHttpPort specifies the port number inside the Kubernetes cluster for the operator's Java +# remote debug interface. +# This parameter is required if 'remoteDebugNodePortEnabled' is true. +# Otherwise, it is ignored. +internalDebugHttpPort: 30999 + +# externalDebugHttpPort specifies the node port that should be allocated for the operator's +# Java remote debug interface. +# This parameter is required if 'remoteDebugNodePortEnabled' is true. +# Otherwise, it is ignored. +externalDebugHttpPort: 30999 + +# dns1123Fields overrides the default list of field names that the operator +# converts to DNS-1123 legal values when replacing variable references in the +# Domain resource. The default list can be found inside the class LegalNames +# in the oracle.kubernetes.operator.helpers package. +# Supply a comma separated list of field names to customize the list of fields +# such as "name, claimName, volumeName", or leave it commented out to use +# the default list of field names. +# dns1123Fields: "" + +# introspectorJobNameSuffix overrides the default suffix that the operator uses +# to append to the domainUID to form the name of the domain introspector job name. +# Note that the resultant job name should not be more than 58 characters due to +# the Kubernetes limit to the name of a job and Kubernetes appends five additional +# characters to the name of the pod that is created by the job controller. +# The default suffix is '-introspector'. +# The default suffix in pre-3.1.0 is "-introspect-domain-job" +introspectorJobNameSuffix: "-introspector" + +# externalServiceNameSuffix overrides the default suffix that the operator uses +# to append to the domainUID and the WebLogic admin server name, to form the name +# of the domain's admin server external service. +# Note that the resultant name should not be more than 63 characters due to +# the Kubernetes limit to the name of a service. +# The default suffix is '-ext'. +# The default suffix in pre-3.1.0 is "-external". +externalServiceNameSuffix: "-ext" + +# clusterSizePaddingValidationEnabled specifies if additional one or two characters +# need to be reserved to account for longer managed server names because of an increased +# cluster size. +# The default value is true. +clusterSizePaddingValidationEnabled: true + +# tokenReviewAuthentication, if set to true, specifies whether the the operator's REST API should use +# 1. Kubernetes token review API for authenticating users, and +# 2. Kubernetes subject access review API for authorizing a user's operation (get, list, +# patch, etc) on a resource. +# 3. Update the Domain resource using the operator's privileges. +# This parameter, if set to false, will use the caller's bearer token for any update +# to the Domain resource so that it is done using the caller's privileges. +# The default value is false. +#tokenReviewAuthentication: false diff --git a/OracleIdentityGovernance/kubernetes/common/createFMWJRFDomain.py b/OracleIdentityGovernance/kubernetes/common/createFMWJRFDomain.py new file mode 100755 index 000000000..bde936ca5 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/common/createFMWJRFDomain.py @@ -0,0 +1,332 @@ +# Copyright (c) 2014, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +import os +import sys + +import com.oracle.cie.domain.script.jython.WLSTException as WLSTException + +class Infra12213Provisioner: + + MACHINES = { + 'machine1' : { + 'NMType': 'SSL', + 'ListenAddress': 'localhost', + 'ListenPort': 5658 + } + } + + JRF_12213_TEMPLATES = { + 'baseTemplate' : '@@ORACLE_HOME@@/wlserver/common/templates/wls/wls.jar', + 'extensionTemplates' : [ + '@@ORACLE_HOME@@/oracle_common/common/templates/wls/oracle.jrf_template.jar', + '@@ORACLE_HOME@@/oracle_common/common/templates/wls/oracle.jrf.ws.async_template.jar', + '@@ORACLE_HOME@@/oracle_common/common/templates/wls/oracle.wsmpm_template.jar', + '@@ORACLE_HOME@@/oracle_common/common/templates/wls/oracle.ums_template.jar', + '@@ORACLE_HOME@@/em/common/templates/wls/oracle.em_wls_template.jar' + ], + 'serverGroupsToTarget' : [ 'JRF-MAN-SVR', 'WSMPM-MAN-SVR' ] + } + + def __init__(self, oracleHome, javaHome, domainParentDir, adminListenPort, adminName, managedNameBase, managedServerPort, prodMode, managedCount, clusterName): + self.oracleHome = self.validateDirectory(oracleHome) + self.javaHome = self.validateDirectory(javaHome) + self.domainParentDir = self.validateDirectory(domainParentDir, create=True) + return + + def createInfraDomain(self, domainName, user, password, db, dbPrefix, dbPassword, adminListenPort, adminName, + managedNameBase, managedServerPort, prodMode, managedCount, clusterName, + exposeAdminT3Channel=None, t3ChannelPublicAddress=None, t3ChannelPort=None): + domainHome = self.createBaseDomain(domainName, user, password, adminListenPort, adminName, managedNameBase, + managedServerPort, prodMode, managedCount, clusterName + ) + self.extendDomain(domainHome, db, dbPrefix, dbPassword, exposeAdminT3Channel, t3ChannelPublicAddress, + t3ChannelPort) + + def createBaseDomain(self, domainName, user, password, adminListenPort, adminName, managedNameBase, managedServerPort, prodMode, managedCount, clusterName): + baseTemplate = self.replaceTokens(self.JRF_12213_TEMPLATES['baseTemplate']) + + readTemplate(baseTemplate) + setOption('DomainName', domainName) + setOption('JavaHome', self.javaHome) + if (prodMode == 'true'): + setOption('ServerStartMode', 'prod') + else: + setOption('ServerStartMode', 'dev') + set('Name', domainName) + + admin_port = int(adminListenPort) + ms_port = int(managedServerPort) + ms_count = int(managedCount) + + # Create Admin Server + # ======================= + print 'Creating Admin Server...' + cd('/Servers/AdminServer') + #set('ListenAddress', '%s-%s' % (domain_uid, admin_server_name_svc)) + set('ListenPort', admin_port) + set('Name', adminName) + + # Define the user password for weblogic + # ===================================== + cd('/Security/' + domainName + '/User/weblogic') + set('Name', user) + set('Password', password) + + # Create a cluster + # ====================== + print 'Creating cluster...' + cd('/') + cl=create(clusterName, 'Cluster') + + # Create managed servers + for index in range(0, ms_count): + cd('/') + msIndex = index+1 + cd('/') + name = '%s%s' % (managedNameBase, msIndex) + create(name, 'Server') + cd('/Servers/%s/' % name ) + print('managed server name is %s' % name); + set('ListenPort', ms_port) + set('NumOfRetriesBeforeMSIMode', 0) + set('RetryIntervalBeforeMSIMode', 1) + set('Cluster', clusterName) + + # Create Node Manager + # ======================= + print 'Creating Node Managers...' + for machine in self.MACHINES: + cd('/') + create(machine, 'Machine') + cd('Machine/' + machine) + create(machine, 'NodeManager') + cd('NodeManager/' + machine) + for param in self.MACHINES[machine]: + set(param, self.MACHINES[machine][param]) + + + setOption('OverwriteDomain', 'true') + domainHome = self.domainParentDir + '/' + domainName + print 'Will create Base domain at ' + domainHome + + print 'Writing base domain...' + writeDomain(domainHome) + closeTemplate() + print 'Base domain created at ' + domainHome + return domainHome + + + def extendDomain(self, domainHome, db, dbPrefix, dbPassword, exposeAdminT3Channel, t3ChannelPublicAddress, + t3ChannelPort): + print 'Extending domain at ' + domainHome + print 'Database ' + db + readDomain(domainHome) + setOption('AppDir', self.domainParentDir + '/applications') + + print 'ExposeAdminT3Channel %s with %s:%s ' % (exposeAdminT3Channel, t3ChannelPublicAddress, t3ChannelPort) + if 'true' == exposeAdminT3Channel: + self.enable_admin_channel(t3ChannelPublicAddress, t3ChannelPort) + + print 'Applying JRF templates...' + for extensionTemplate in self.JRF_12213_TEMPLATES['extensionTemplates']: + addTemplate(self.replaceTokens(extensionTemplate)) + + print 'Extension Templates added' + + print 'Configuring the Service Table DataSource...' + fmwDb = 'jdbc:oracle:thin:@' + db + print 'fmwDatabase ' + fmwDb + cd('/JDBCSystemResource/LocalSvcTblDataSource/JdbcResource/LocalSvcTblDataSource') + cd('JDBCDriverParams/NO_NAME_0') + set('DriverName', 'oracle.jdbc.OracleDriver') + set('URL', fmwDb) + set('PasswordEncrypted', dbPassword) + + stbUser = dbPrefix + '_STB' + cd('Properties/NO_NAME_0/Property/user') + set('Value', stbUser) + + print 'Getting Database Defaults...' + getDatabaseDefaults() + + print 'Targeting Server Groups...' + managedName= '%s%s' % (managedNameBase, 1) + print "Set CoherenceClusterSystemResource to defaultCoherenceCluster for server:" + managedName + serverGroupsToTarget = list(self.JRF_12213_TEMPLATES['serverGroupsToTarget']) + cd('/') + setServerGroups(managedName, serverGroupsToTarget) + print "Set CoherenceClusterSystemResource to defaultCoherenceCluster for server:" + managedName + cd('/Servers/' + managedName) + set('CoherenceClusterSystemResource', 'defaultCoherenceCluster') + + print 'Targeting Cluster ...' + cd('/') + print "Set CoherenceClusterSystemResource to defaultCoherenceCluster for cluster:" + clusterName + cd('/Cluster/' + clusterName) + set('CoherenceClusterSystemResource', 'defaultCoherenceCluster') + print "Set WLS clusters as target of defaultCoherenceCluster:" + clusterName + cd('/CoherenceClusterSystemResource/defaultCoherenceCluster') + set('Target', clusterName) + + print 'Preparing to update domain...' + updateDomain() + print 'Domain updated successfully' + closeDomain() + return + + + ########################################################################### + # Helper Methods # + ########################################################################### + + def validateDirectory(self, dirName, create=False): + directory = os.path.realpath(dirName) + if not os.path.exists(directory): + if create: + os.makedirs(directory) + else: + message = 'Directory ' + directory + ' does not exist' + raise WLSTException(message) + elif not os.path.isdir(directory): + message = 'Directory ' + directory + ' is not a directory' + raise WLSTException(message) + return self.fixupPath(directory) + + + def fixupPath(self, path): + result = path + if path is not None: + result = path.replace('\\', '/') + return result + + + def replaceTokens(self, path): + result = path + if path is not None: + result = path.replace('@@ORACLE_HOME@@', oracleHome) + return result + + def enable_admin_channel(self, admin_channel_address, admin_channel_port): + if admin_channel_address == None or admin_channel_port == 'None': + return + cd('/') + admin_server_name = get('AdminServerName') + print('setting admin server t3channel for ' + admin_server_name) + cd('/Servers/' + admin_server_name) + create('T3Channel', 'NetworkAccessPoint') + cd('/Servers/' + admin_server_name + '/NetworkAccessPoint/T3Channel') + set('ListenPort', int(admin_channel_port)) + set('PublicPort', int(admin_channel_port)) + set('PublicAddress', admin_channel_address) + +############################# +# Entry point to the script # +############################# + +def usage(): + print sys.argv[0] + ' -oh -jh -parent -name ' + \ + '-user -password ' + \ + '-rcuDb -rcuPrefix -rcuSchemaPwd ' \ + '-adminListenPort -adminName ' \ + '-managedNameBase -managedServerPort -prodMode ' \ + '-managedServerCount -clusterName ' \ + '-exposeAdminT3Channel -t3ChannelPublicAddress
' \ + '-t3ChannelPort ' + sys.exit(0) + +# Uncomment for Debug only +#print str(sys.argv[0]) + " called with the following sys.argv array:" +#for index, arg in enumerate(sys.argv): +# print "sys.argv[" + str(index) + "] = " + str(sys.argv[index]) + +if len(sys.argv) < 16: + usage() + +#oracleHome will be passed by command line parameter -oh. +oracleHome = None +#javaHome will be passed by command line parameter -jh. +javaHome = None +#domainParentDir will be passed by command line parameter -parent. +domainParentDir = None +#domainUser is hard-coded to weblogic. You can change to other name of your choice. Command line paramter -user. +domainUser = 'weblogic' +#domainPassword will be passed by Command line parameter -password. +domainPassword = None +#rcuDb will be passed by command line parameter -rcuDb. +rcuDb = None +#change rcuSchemaPrefix to your infra schema prefix. Command line parameter -rcuPrefix. +rcuSchemaPrefix = 'DEV12' +#change rcuSchemaPassword to your infra schema password. Command line parameter -rcuSchemaPwd. +rcuSchemaPassword = None +exposeAdminT3Channel = None +t3ChannelPort = None +t3ChannelPublicAddress = None +i = 1 +while i < len(sys.argv): + if sys.argv[i] == '-oh': + oracleHome = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-jh': + javaHome = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-parent': + domainParentDir = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-name': + domainName = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-user': + domainUser = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-password': + domainPassword = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-rcuDb': + rcuDb = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-rcuPrefix': + rcuSchemaPrefix = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-rcuSchemaPwd': + rcuSchemaPassword = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-adminListenPort': + adminListenPort = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-adminName': + adminName = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-managedNameBase': + managedNameBase = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-managedServerPort': + managedServerPort = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-prodMode': + prodMode = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-managedServerCount': + managedCount = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-clusterName': + clusterName = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-t3ChannelPublicAddress': + t3ChannelPublicAddress = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-t3ChannelPort': + t3ChannelPort = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-exposeAdminT3Channel': + exposeAdminT3Channel = sys.argv[i + 1] + i += 2 + else: + print 'Unexpected argument switch at position ' + str(i) + ': ' + str(sys.argv[i]) + usage() + sys.exit(1) + +provisioner = Infra12213Provisioner(oracleHome, javaHome, domainParentDir, adminListenPort, adminName, managedNameBase, managedServerPort, prodMode, managedCount, clusterName) +provisioner.createInfraDomain(domainName, domainUser, domainPassword, rcuDb, rcuSchemaPrefix, rcuSchemaPassword, + adminListenPort, adminName, managedNameBase, managedServerPort, prodMode, managedCount, + clusterName, exposeAdminT3Channel, t3ChannelPublicAddress, t3ChannelPort) \ No newline at end of file diff --git a/OracleIdentityGovernance/kubernetes/common/createFMWRestrictedJRFDomain.py b/OracleIdentityGovernance/kubernetes/common/createFMWRestrictedJRFDomain.py new file mode 100755 index 000000000..acfe5da80 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/common/createFMWRestrictedJRFDomain.py @@ -0,0 +1,291 @@ +# Copyright (c) 2014, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +import os +import sys + +import com.oracle.cie.domain.script.jython.WLSTException as WLSTException + +class Infra12213Provisioner: + + MACHINES = { + 'machine1' : { + 'NMType': 'SSL', + 'ListenAddress': 'localhost', + 'ListenPort': 5658 + } + } + + JRF_12213_TEMPLATES = { + 'baseTemplate' : '@@ORACLE_HOME@@/wlserver/common/templates/wls/wls.jar', + 'extensionTemplates' : [ + '@@ORACLE_HOME@@/oracle_common/common/templates/wls/oracle.jrf_restricted_template.jar', + '@@ORACLE_HOME@@/em/common/templates/wls/oracle.em_wls_restricted_template.jar' + ], + 'serverGroupsToTarget' : [ 'JRF-MAN-SVR', 'WSMPM-MAN-SVR' ] + } + + def __init__(self, oracleHome, javaHome, domainParentDir, adminListenPort, adminName, managedNameBase, managedServerPort, prodMode, managedCount, clusterName): + self.oracleHome = self.validateDirectory(oracleHome) + self.javaHome = self.validateDirectory(javaHome) + self.domainParentDir = self.validateDirectory(domainParentDir, create=True) + return + + def createInfraDomain(self, domainName, user, password, adminListenPort, adminName, + managedNameBase, managedServerPort, prodMode, managedCount, clusterName, + exposeAdminT3Channel=None, t3ChannelPublicAddress=None, t3ChannelPort=None): + domainHome = self.createBaseDomain(domainName, user, password, adminListenPort, adminName, managedNameBase, + managedServerPort, prodMode, managedCount, clusterName + ) + self.extendDomain(domainHome, exposeAdminT3Channel, t3ChannelPublicAddress, t3ChannelPort) + + def createBaseDomain(self, domainName, user, password, adminListenPort, adminName, managedNameBase, managedServerPort, prodMode, managedCount, clusterName): + baseTemplate = self.replaceTokens(self.JRF_12213_TEMPLATES['baseTemplate']) + + readTemplate(baseTemplate) + setOption('DomainName', domainName) + setOption('JavaHome', self.javaHome) + if (prodMode == 'true'): + setOption('ServerStartMode', 'prod') + else: + setOption('ServerStartMode', 'dev') + set('Name', domainName) + + admin_port = int(adminListenPort) + ms_port = int(managedServerPort) + ms_count = int(managedCount) + + # Create Admin Server + # ======================= + print 'Creating Admin Server...' + cd('/Servers/AdminServer') + #set('ListenAddress', '%s-%s' % (domain_uid, admin_server_name_svc)) + set('ListenPort', admin_port) + set('Name', adminName) + + # Define the user password for weblogic + # ===================================== + cd('/Security/' + domainName + '/User/weblogic') + set('Name', user) + set('Password', password) + + # Create a cluster + # ====================== + print 'Creating cluster...' + cd('/') + cl=create(clusterName, 'Cluster') + + # Create managed servers + for index in range(0, ms_count): + cd('/') + msIndex = index+1 + cd('/') + name = '%s%s' % (managedNameBase, msIndex) + create(name, 'Server') + cd('/Servers/%s/' % name ) + print('managed server name is %s' % name); + set('ListenPort', ms_port) + set('NumOfRetriesBeforeMSIMode', 0) + set('RetryIntervalBeforeMSIMode', 1) + set('Cluster', clusterName) + + # Create Node Manager + # ======================= + print 'Creating Node Managers...' + for machine in self.MACHINES: + cd('/') + create(machine, 'Machine') + cd('Machine/' + machine) + create(machine, 'NodeManager') + cd('NodeManager/' + machine) + for param in self.MACHINES[machine]: + set(param, self.MACHINES[machine][param]) + + + setOption('OverwriteDomain', 'true') + domainHome = self.domainParentDir + '/' + domainName + print 'Will create Base domain at ' + domainHome + + print 'Writing base domain...' + writeDomain(domainHome) + closeTemplate() + print 'Base domain created at ' + domainHome + return domainHome + + + def extendDomain(self, domainHome, exposeAdminT3Channel, t3ChannelPublicAddress, + t3ChannelPort): + print 'Extending domain at ' + domainHome + readDomain(domainHome) + setOption('AppDir', self.domainParentDir + '/applications') + + print 'ExposeAdminT3Channel %s with %s:%s ' % (exposeAdminT3Channel, t3ChannelPublicAddress, t3ChannelPort) + if 'true' == exposeAdminT3Channel: + self.enable_admin_channel(t3ChannelPublicAddress, t3ChannelPort) + + print 'Applying JRF templates...' + for extensionTemplate in self.JRF_12213_TEMPLATES['extensionTemplates']: + addTemplate(self.replaceTokens(extensionTemplate)) + + print 'Extension Templates added' + + print 'Preparing to update domain...' + updateDomain() + print 'Domain updated successfully' + closeDomain() + return + + + ########################################################################### + # Helper Methods # + ########################################################################### + + def validateDirectory(self, dirName, create=False): + directory = os.path.realpath(dirName) + if not os.path.exists(directory): + if create: + os.makedirs(directory) + else: + message = 'Directory ' + directory + ' does not exist' + raise WLSTException(message) + elif not os.path.isdir(directory): + message = 'Directory ' + directory + ' is not a directory' + raise WLSTException(message) + return self.fixupPath(directory) + + + def fixupPath(self, path): + result = path + if path is not None: + result = path.replace('\\', '/') + return result + + + def replaceTokens(self, path): + result = path + if path is not None: + result = path.replace('@@ORACLE_HOME@@', oracleHome) + return result + + def enable_admin_channel(self, admin_channel_address, admin_channel_port): + if admin_channel_address == None or admin_channel_port == 'None': + return + cd('/') + admin_server_name = get('AdminServerName') + print('setting admin server t3channel for ' + admin_server_name) + cd('/Servers/' + admin_server_name) + create('T3Channel', 'NetworkAccessPoint') + cd('/Servers/' + admin_server_name + '/NetworkAccessPoint/T3Channel') + set('ListenPort', int(admin_channel_port)) + set('PublicPort', int(admin_channel_port)) + set('PublicAddress', admin_channel_address) + +############################# +# Entry point to the script # +############################# + +def usage(): + print sys.argv[0] + ' -oh -jh -parent -name ' + \ + '-user -password ' + \ + '-rcuDb -rcuPrefix -rcuSchemaPwd ' \ + '-adminListenPort -adminName ' \ + '-managedNameBase -managedServerPort -prodMode ' \ + '-managedServerCount -clusterName ' \ + '-exposeAdminT3Channel -t3ChannelPublicAddress
' \ + '-t3ChannelPort ' + sys.exit(0) + +# Uncomment for Debug only +#print str(sys.argv[0]) + " called with the following sys.argv array:" +#for index, arg in enumerate(sys.argv): +# print "sys.argv[" + str(index) + "] = " + str(sys.argv[index]) + +if len(sys.argv) < 16: + usage() + +#oracleHome will be passed by command line parameter -oh. +oracleHome = None +#javaHome will be passed by command line parameter -jh. +javaHome = None +#domainParentDir will be passed by command line parameter -parent. +domainParentDir = None +#domainUser is hard-coded to weblogic. You can change to other name of your choice. Command line paramter -user. +domainUser = 'weblogic' +#domainPassword will be passed by Command line parameter -password. +domainPassword = None +#rcuDb will be passed by command line parameter -rcuDb. +rcuDb = None +#change rcuSchemaPrefix to your infra schema prefix. Command line parameter -rcuPrefix. +rcuSchemaPrefix = 'DEV12' +#change rcuSchemaPassword to your infra schema password. Command line parameter -rcuSchemaPwd. +rcuSchemaPassword = None +exposeAdminT3Channel = None +t3ChannelPort = None +t3ChannelPublicAddress = None +i = 1 +while i < len(sys.argv): + if sys.argv[i] == '-oh': + oracleHome = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-jh': + javaHome = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-parent': + domainParentDir = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-name': + domainName = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-user': + domainUser = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-password': + domainPassword = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-rcuDb': + rcuDb = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-rcuPrefix': + rcuSchemaPrefix = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-rcuSchemaPwd': + rcuSchemaPassword = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-adminListenPort': + adminListenPort = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-adminName': + adminName = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-managedNameBase': + managedNameBase = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-managedServerPort': + managedServerPort = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-prodMode': + prodMode = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-managedServerCount': + managedCount = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-clusterName': + clusterName = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-t3ChannelPublicAddress': + t3ChannelPublicAddress = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-t3ChannelPort': + t3ChannelPort = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-exposeAdminT3Channel': + exposeAdminT3Channel = sys.argv[i + 1] + i += 2 + else: + print 'Unexpected argument switch at position ' + str(i) + ': ' + str(sys.argv[i]) + usage() + sys.exit(1) + +provisioner = Infra12213Provisioner(oracleHome, javaHome, domainParentDir, adminListenPort, adminName, managedNameBase, managedServerPort, prodMode, managedCount, clusterName) +provisioner.createInfraDomain(domainName, domainUser, domainPassword, adminListenPort, adminName, managedNameBase, managedServerPort, prodMode, managedCount, + clusterName, exposeAdminT3Channel, t3ChannelPublicAddress, t3ChannelPort) diff --git a/OracleIdentityGovernance/kubernetes/common/domain-template.yaml b/OracleIdentityGovernance/kubernetes/common/domain-template.yaml new file mode 100755 index 000000000..2d081de7d --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/common/domain-template.yaml @@ -0,0 +1,119 @@ +# Copyright (c) 2017, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# This is an example of how to define a Domain resource. +# +apiVersion: "weblogic.oracle/v8" +kind: Domain +metadata: + name: %DOMAIN_UID% + namespace: %NAMESPACE% + labels: + weblogic.domainUID: %DOMAIN_UID% +spec: + # The WebLogic Domain Home + domainHome: %DOMAIN_HOME% + + # The domain home source type + # Set to PersistentVolume for domain-in-pv, Image for domain-in-image, or FromModel for model-in-image + domainHomeSourceType: %DOMAIN_HOME_SOURCE_TYPE% + + # The WebLogic Server image that the Operator uses to start the domain + image: "%WEBLOGIC_IMAGE%" + + # imagePullPolicy defaults to "Always" if image version is :latest + imagePullPolicy: "%WEBLOGIC_IMAGE_PULL_POLICY%" + + # Identify which Secret contains the credentials for pulling an image + %WEBLOGIC_IMAGE_PULL_SECRET_PREFIX%imagePullSecrets: + %WEBLOGIC_IMAGE_PULL_SECRET_PREFIX%- name: %WEBLOGIC_IMAGE_PULL_SECRET_NAME% + + # Identify which Secret contains the WebLogic Admin credentials (note that there is an example of + # how to create that Secret at the end of this file) + webLogicCredentialsSecret: + name: %WEBLOGIC_CREDENTIALS_SECRET_NAME% + + # Whether to include the server out file into the pod's stdout, default is true + includeServerOutInPodLog: %INCLUDE_SERVER_OUT_IN_POD_LOG% + + # Whether to enable log home + %LOG_HOME_ON_PV_PREFIX%logHomeEnabled: %LOG_HOME_ENABLED% + + # Whether to write HTTP access log file to log home + %LOG_HOME_ON_PV_PREFIX%httpAccessLogInLogHome: %HTTP_ACCESS_LOG_IN_LOG_HOME% + + # The in-pod location for domain log, server logs, server out, introspector out, and Node Manager log files + %LOG_HOME_ON_PV_PREFIX%logHome: %LOG_HOME% + # An (optional) in-pod location for data storage of default and custom file stores. + # If not specified or the value is either not set or empty (e.g. dataHome: "") then the + # data storage directories are determined from the WebLogic domain home configuration. + dataHome: "%DATA_HOME%" + + + # serverStartPolicy legal values are "NEVER", "IF_NEEDED", or "ADMIN_ONLY" + # This determines which WebLogic Servers the Operator will start up when it discovers this Domain + # - "NEVER" will not start any server in the domain + # - "ADMIN_ONLY" will start up only the administration server (no managed servers will be started) + # - "IF_NEEDED" will start all non-clustered servers, including the administration server and clustered servers up to the replica count + serverStartPolicy: "%SERVER_START_POLICY%" + + serverPod: + # an (optional) list of environment variable to be set on the servers + env: + - name: JAVA_OPTIONS + value: "%JAVA_OPTIONS%" + - name: USER_MEM_ARGS + value: "-Djava.security.egd=file:/dev/./urandom -Xms256m -Xmx512m " + %OPTIONAL_SERVERPOD_RESOURCES% + %LOG_HOME_ON_PV_PREFIX%volumes: + %LOG_HOME_ON_PV_PREFIX%- name: weblogic-domain-storage-volume + %LOG_HOME_ON_PV_PREFIX% persistentVolumeClaim: + %LOG_HOME_ON_PV_PREFIX% claimName: %DOMAIN_PVC_NAME% + %LOG_HOME_ON_PV_PREFIX%volumeMounts: + %LOG_HOME_ON_PV_PREFIX%- mountPath: %DOMAIN_ROOT_DIR% + %LOG_HOME_ON_PV_PREFIX% name: weblogic-domain-storage-volume + + # adminServer is used to configure the desired behavior for starting the administration server. + adminServer: + # serverStartState legal values are "RUNNING" or "ADMIN" + # "RUNNING" means the listed server will be started up to "RUNNING" mode + # "ADMIN" means the listed server will be start up to "ADMIN" mode + serverStartState: "RUNNING" + %EXPOSE_ANY_CHANNEL_PREFIX%adminService: + %EXPOSE_ANY_CHANNEL_PREFIX% channels: + # The Admin Server's NodePort + %EXPOSE_ADMIN_PORT_PREFIX% - channelName: default + %EXPOSE_ADMIN_PORT_PREFIX% nodePort: %ADMIN_NODE_PORT% + # Uncomment to export the T3Channel as a service + %EXPOSE_T3_CHANNEL_PREFIX% - channelName: T3Channel + + # clusters is used to configure the desired behavior for starting member servers of a cluster. + # If you use this entry, then the rules will be applied to ALL servers that are members of the named clusters. + clusters: + - clusterName: %CLUSTER_NAME% + serverStartState: "RUNNING" + serverPod: + # Instructs Kubernetes scheduler to prefer nodes for new cluster members where there are not + # already members of the same cluster. + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: "weblogic.clusterName" + operator: In + values: + - $(CLUSTER_NAME) + topologyKey: "kubernetes.io/hostname" + replicas: %INITIAL_MANAGED_SERVER_REPLICAS% + # The number of managed servers to start for unlisted clusters + # replicas: 1 + + # Istio + %ISTIO_PREFIX%configuration: + %ISTIO_PREFIX% istio: + %ISTIO_PREFIX% enabled: %ISTIO_ENABLED% + %ISTIO_PREFIX% readinessPort: %ISTIO_READINESS_PORT% + diff --git a/OracleIdentityGovernance/kubernetes/common/jrf-domain-template.yaml b/OracleIdentityGovernance/kubernetes/common/jrf-domain-template.yaml new file mode 100755 index 000000000..ccd35b84f --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/common/jrf-domain-template.yaml @@ -0,0 +1,123 @@ +# Copyright (c) 2017, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# This is an example of how to define a Domain resource. +# +apiVersion: "weblogic.oracle/v8" +kind: Domain +metadata: + name: %DOMAIN_UID% + namespace: %NAMESPACE% + labels: + weblogic.domainUID: %DOMAIN_UID% +spec: + # The WebLogic Domain Home + domainHome: %DOMAIN_HOME% + + # The domain home source type + # Set to PersistentVolume for domain-in-pv, Image for domain-in-image, or FromModel for model-in-image + domainHomeSourceType: %DOMAIN_HOME_SOURCE_TYPE% + + # The WebLogic Server image that the Operator uses to start the domain + image: "%WEBLOGIC_IMAGE%" + + # imagePullPolicy defaults to "Always" if image version is :latest + imagePullPolicy: "%WEBLOGIC_IMAGE_PULL_POLICY%" + + # Identify which Secret contains the credentials for pulling an image + %WEBLOGIC_IMAGE_PULL_SECRET_PREFIX%imagePullSecrets: + %WEBLOGIC_IMAGE_PULL_SECRET_PREFIX%- name: %WEBLOGIC_IMAGE_PULL_SECRET_NAME% + + # Identify which Secret contains the WebLogic Admin credentials (note that there is an example of + # how to create that Secret at the end of this file) + webLogicCredentialsSecret: + name: %WEBLOGIC_CREDENTIALS_SECRET_NAME% + + # Whether to include the server out file into the pod's stdout, default is true + includeServerOutInPodLog: %INCLUDE_SERVER_OUT_IN_POD_LOG% + + # Whether to enable log home + %LOG_HOME_ON_PV_PREFIX%logHomeEnabled: %LOG_HOME_ENABLED% + + # Whether to write HTTP access log file to log home + %LOG_HOME_ON_PV_PREFIX%httpAccessLogInLogHome: %HTTP_ACCESS_LOG_IN_LOG_HOME% + + # The in-pod location for domain log, server logs, server out, introspector out, and Node Manager log files + %LOG_HOME_ON_PV_PREFIX%logHome: %LOG_HOME% + # An (optional) in-pod location for data storage of default and custom file stores. + # If not specified or the value is either not set or empty (e.g. dataHome: "") then the + # data storage directories are determined from the WebLogic domain home configuration. + dataHome: "%DATA_HOME%" + + # serverStartPolicy legal values are "NEVER", "IF_NEEDED", or "ADMIN_ONLY" + # This determines which WebLogic Servers the Operator will start up when it discovers this Domain + # - "NEVER" will not start any server in the domain + # - "ADMIN_ONLY" will start up only the administration server (no managed servers will be started) + # - "IF_NEEDED" will start all non-clustered servers, including the administration server and clustered servers up to the replica count + serverStartPolicy: "%SERVER_START_POLICY%" + + serverPod: + # an (optional) list of environment variable to be set on the servers + env: + - name: JAVA_OPTIONS + value: "%JAVA_OPTIONS%" + - name: USER_MEM_ARGS + value: "-Djava.security.egd=file:/dev/./urandom -Xms256m -Xmx1024m " + %OPTIONAL_SERVERPOD_RESOURCES% + %LOG_HOME_ON_PV_PREFIX%volumes: + %LOG_HOME_ON_PV_PREFIX%- name: weblogic-domain-storage-volume + %LOG_HOME_ON_PV_PREFIX% persistentVolumeClaim: + %LOG_HOME_ON_PV_PREFIX% claimName: %DOMAIN_PVC_NAME% + %LOG_HOME_ON_PV_PREFIX%volumeMounts: + %LOG_HOME_ON_PV_PREFIX%- mountPath: %DOMAIN_ROOT_DIR% + %LOG_HOME_ON_PV_PREFIX% name: weblogic-domain-storage-volume + + # adminServer is used to configure the desired behavior for starting the administration server. + adminServer: + # serverStartState legal values are "RUNNING" or "ADMIN" + # "RUNNING" means the listed server will be started up to "RUNNING" mode + # "ADMIN" means the listed server will be start up to "ADMIN" mode + serverStartState: "RUNNING" + %EXPOSE_ANY_CHANNEL_PREFIX%adminService: + %EXPOSE_ANY_CHANNEL_PREFIX% channels: + # The Admin Server's NodePort + %EXPOSE_ADMIN_PORT_PREFIX% - channelName: default + %EXPOSE_ADMIN_PORT_PREFIX% nodePort: %ADMIN_NODE_PORT% + # Uncomment to export the T3Channel as a service + %EXPOSE_T3_CHANNEL_PREFIX% - channelName: T3Channel + serverPod: + # an (optional) list of environment variable to be set on the admin servers + env: + - name: USER_MEM_ARGS + value: "-Djava.security.egd=file:/dev/./urandom -Xms512m -Xmx1024m " + + # clusters is used to configure the desired behavior for starting member servers of a cluster. + # If you use this entry, then the rules will be applied to ALL servers that are members of the named clusters. + clusters: + - clusterName: %CLUSTER_NAME% + serverStartState: "RUNNING" + serverPod: + # Instructs Kubernetes scheduler to prefer nodes for new cluster members where there are not + # already members of the same cluster. + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: "weblogic.clusterName" + operator: In + values: + - $(CLUSTER_NAME) + topologyKey: "kubernetes.io/hostname" + replicas: %INITIAL_MANAGED_SERVER_REPLICAS% + # The number of managed servers to start for unlisted clusters + # replicas: 1 + + # Istio + %ISTIO_PREFIX%configuration: + %ISTIO_PREFIX% istio: + %ISTIO_PREFIX% enabled: %ISTIO_ENABLED% + %ISTIO_PREFIX% readinessPort: %ISTIO_READINESS_PORT% + diff --git a/OracleIdentityGovernance/kubernetes/common/utility.sh b/OracleIdentityGovernance/kubernetes/common/utility.sh new file mode 100755 index 000000000..979207be2 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/common/utility.sh @@ -0,0 +1,928 @@ +#!/usr/bin/env bash +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +# +# Utility functions that are shared by multiple scripts +# + +# +# Function to exit and print an error message +# $1 - text of message +function fail { + printError $* + exit 1 +} + +# Function to print an error message +function printError { + echo [ERROR] $* +} + +# Function to see if there is more than 1 input file. +# This could happen if the user has a properties file from +# running wdt discover domain on a on-prem domain +function checkInputFiles { + if [[ "${valuesInputFile}" =~ [,] ]] ; then + echo "Found a comma separated list of input files" + IFS=',' + read -a temp <<< "${valuesInputFile}" + + # We want to keep valuesInputFile pointing to the yaml since + # the validate function expects it. + local extension=$(echo "${temp[0]}" | sed 's/^.*\.//') + if [ ${extension} == 'yaml' ]; then + valuesInputFile=${temp[0]} + valuesInputFile1=${temp[1]} + else + valuesInputFile=${temp[1]} + valuesInputFile1=${temp[0]} + fi + fi +} + +# +# Function to parse a yaml file and generate the bash exports +# $1 - Input filename +# $2 - Output filename +function parseYaml { + local s='[[:space:]]*' w='[a-zA-Z0-9_]*' fs=$(echo @|tr @ '\034') + sed -ne "s|^\($s\):|\1|" \ + -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \ + -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $1 | + awk -F$fs '{ + if (length($3) > 0) { + # javaOptions may contain tokens that are not allowed in export command + # we need to handle it differently. + if ($2=="javaOptions") { + printf("%s=%s\n", $2, $3); + } else { + printf("export %s=\"%s\"\n", $2, $3); + } + } + }' > $2 +} + +# +# Function to parse a properties file and generate the bash exports +# $1 - Input filename +# $2 - Output filename +function parseProperties { + while IFS='=' read -r key value + do + echo "export ${key}=\"${value}\"" >> $2 + done < $1 +} + +# +# Function to remove a file if it exists +# +function removeFileIfExists { + if [ -f $1 ]; then + rm $1 + fi +} + +# +# Function to parse the common parameter inputs file +# +function parseCommonInputs { + exportValuesFile=$(mktemp /tmp/export-values-XXXXXXXXX.sh) + tmpFile=$(mktemp /tmp/javaoptions_tmp-XXXXXXXXX.dat) + parseYaml ${valuesInputFile} ${exportValuesFile} + + if [ ! -z ${valuesInputFile1} ]; then + parseProperties ${valuesInputFile1} ${exportValuesFile} + fi + + if [ ! -f ${exportValuesFile} ]; then + echo Unable to locate the parsed output of ${valuesInputFile}. + fail 'The file ${exportValuesFile} could not be found.' + fi + + # Define the environment variables that will be used to fill in template values + echo Input parameters being used + cat ${exportValuesFile} + echo + + # If we have 2 input files, we need to create a combined inputs file + # exportsValueFile contains all the properties already + # We just need to remove the term export from the file + if [ ! -z ${valuesInputFile1} ]; then + propsFile="domain.properties" + cat ${exportValuesFile} > ${propsFile} + sed -i 's/export //g' ${propsFile} + sed -i 's/"//g' ${propsFile} + valuesInputFile=${propsFile} + cat ${valuesInputFile} + fi + + # javaOptions may contain tokens that are not allowed in export command + # we need to handle it differently. + # we set the javaOptions variable that can be used later + tmpStr=`grep "javaOptions" ${exportValuesFile}` + javaOptions=${tmpStr//"javaOptions="/} + + # We exclude javaOptions from the exportValuesFile + grep -v "javaOptions" ${exportValuesFile} > ${tmpFile} + source ${tmpFile} + + rm ${exportValuesFile} ${tmpFile} +} + +# +# Function to delete a kubernetes object +# $1 object type +# $2 object name +# $3 yaml file +function deleteK8sObj { + # If the yaml file does not exist yet, unable to do the delete + if [ ! -f $3 ]; then + fail "Unable to delete object type $1 with name $2 because file $3 does not exist" + fi + + echo Checking if object type $1 with name $2 exists + K8SOBJ=`kubectl get $1 -n ${namespace} | grep $2 | wc | awk ' { print $1; }'` + if [ "${K8SOBJ}" = "1" ]; then + echo Deleting $2 using $3 + kubectl delete -f $3 + fi +} + +# +# Function to lowercase a value +# $1 - value to convert to lowercase +function toLower { + local lc=`echo $1 | tr "[:upper:]" "[:lower:]"` + echo "$lc" +} + +# +# Function to lowercase a value and make it a legal DNS1123 name +# $1 - value to convert to lowercase +function toDNS1123Legal { + local val=`echo $1 | tr "[:upper:]" "[:lower:]"` + val=${val//"_"/"-"} + echo "$val" +} + +# +# Check the state of a persistent volume. +# $1 - name of volume +# $2 - expected state of volume +function checkPvState { + + echo "Checking if the persistent volume ${1:?} is ${2:?}" + local pv_state=`kubectl get pv $1 -o jsonpath='{.status.phase}'` + attempts=0 + while [ ! "$pv_state" = "$2" ] && [ ! $attempts -eq 10 ]; do + attempts=$((attempts + 1)) + sleep 1 + pv_state=`kubectl get pv $1 -o jsonpath='{.status.phase}'` + done + if [ "$pv_state" != "$2" ]; then + fail "The persistent volume state should be $2 but is $pv_state" + fi +} + +# +# Function to check if a persistent volume exists +# $1 - name of volume +function checkPvExists { + + echo "Checking if the persistent volume ${1} exists" + PV_EXISTS=`kubectl get pv | grep ${1} | wc | awk ' { print $1; } '` + if [ "${PV_EXISTS}" = "1" ]; then + echo "The persistent volume ${1} already exists" + PV_EXISTS="true" + else + echo "The persistent volume ${1} does not exist" + PV_EXISTS="false" + fi +} + +# +# Function to check if a persistent volume claim exists +# $1 - name of persistent volume claim +# $2 - NameSpace +function checkPvcExists { + echo "Checking if the persistent volume claim ${1} in NameSpace ${2} exists" + PVC_EXISTS=`kubectl get pvc -n ${2} | grep ${1} | wc | awk ' { print $1; } '` + if [ "${PVC_EXISTS}" = "1" ]; then + echo "The persistent volume claim ${1} already exists in NameSpace ${2}" + PVC_EXISTS="true" + else + echo "The persistent volume claim ${1} does not exist in NameSpace ${2}" + PVC_EXISTS="false" + fi +} + +# Copy the inputs file from the command line into the output directory +# for the domain/operator unless the output directory already has an +# inputs file and the file is the same as the one from the commandline. +# $1 the inputs file from the command line +# $2 the file in the output directory that needs to be made the same as $1 +function copyInputsFileToOutputDirectory { + local from=$1 + local to=$2 + local doCopy="true" + if [ -f "${to}" ]; then + local difference=`diff ${from} ${to}` + if [ -z "${difference}" ]; then + # the output file already exists and is the same as the inputs file. + # don't make a copy. + doCopy="false" + fi + fi + if [ "${doCopy}" = "true" ]; then + cp ${from} ${to} + fi +} + +# +# Function to obtain the IP address of the kubernetes cluster. This information +# is used to form the URL's for accessing services that were deployed. +# +function getKubernetesClusterIP { + + # Get name of the current context + local CUR_CTX=`kubectl config current-context | awk ' { print $1; } '` + + # Get the name of the current cluster + local CUR_CLUSTER_CMD="kubectl config view -o jsonpath='{.contexts[?(@.name == \"${CUR_CTX}\")].context.cluster}' | awk ' { print $1; } '" + local CUR_CLUSTER=`eval ${CUR_CLUSTER_CMD}` + + # Get the server address for the current cluster + local SVR_ADDR_CMD="kubectl config view -o jsonpath='{.clusters[?(@.name == \"${CUR_CLUSTER}\")].cluster.server}' | awk ' { print $1; } '" + local SVR_ADDR=`eval ${SVR_ADDR_CMD}` + + # Server address is expected to be of the form http://address:port. Delimit + # string on the colon to obtain the address. + local array=(${SVR_ADDR//:/ }) + K8S_IP="${array[1]/\/\//}" + +} + +# +# Function to set the serverPodResources variable for including into the generated +# domain.yaml, base on the serverPod resource requests and limits input values, +# if specified. +# The serverPodResources variable remains unset if none of the input values are provided. +# +function buildServerPodResources { + + if [ -n "${serverPodMemoryRequest}" ]; then + local memoryRequest=" memory\: \"${serverPodMemoryRequest}\"\n" + fi + if [ -n "${serverPodCpuRequest}" ]; then + local cpuRequest=" cpu\: \"${serverPodCpuRequest}\"\n" + fi + if [ -n "${memoryRequest}" ] || [ -n "${cpuRequest}" ]; then + local requests=" requests\: \n$memoryRequest $cpuRequest" + fi + + if [ -n "${serverPodMemoryLimit}" ]; then + local memoryLimit=" memory\: \"${serverPodMemoryLimit}\"\n" + fi + if [ -n "${serverPodCpuLimit}" ]; then + local cpuLimit=" cpu\: \"${serverPodCpuLimit}\"\n" + fi + if [ -n "${memoryLimit}" ] || [ -n "${cpuLimit}" ]; then + local limits=" limits\: \n$memoryLimit $cpuLimit" + fi + + if [ -n "${requests}" ] || [ -n "${limits}" ]; then + # build resources element and remove last '\n' + serverPodResources=$(echo "resources\:\n${requests}${limits}" | sed -e 's/\\n$//') + fi +} + +# +# Function to generate the properties and yaml files for creating a domain +# +function createFiles { + + update=false + if [ "$#" == 1 ]; then + echo Trying to update the domain + update=true + fi + + # Make sure the output directory has a copy of the inputs file. + # The user can either pre-create the output directory, put the inputs + # file there, and create the domain from it, or the user can put the + # inputs file some place else and let this script create the output directory + # (if needed) and copy the inputs file there. + echo createFiles - valuesInputFile is ${valuesInputFile} + copyInputsFileToOutputDirectory ${valuesInputFile} "${domainOutputDir}/create-domain-inputs.yaml" + + if [ "${domainHomeInImage}" == "true" ]; then + if [ -z "${domainHomeImageBase}" ]; then + fail "Please specify domainHomeImageBase in your input YAML" + fi + else + if [ -z "${image}" ]; then + fail "Please specify image in your input YAML" + fi + fi + + dcrOutput="${domainOutputDir}/domain.yaml" + + domainName=${domainUID} + + enabledPrefix="" # uncomment the feature + disabledPrefix="# " # comment out the feature + + exposeAnyChannelPrefix="${disabledPrefix}" + if [ "${exposeAdminT3Channel}" = true ]; then + exposeAdminT3ChannelPrefix="${enabledPrefix}" + exposeAnyChannelPrefix="${enabledPrefix}" + # set t3PublicAddress if not set + if [ -z "${t3PublicAddress}" ]; then + getKubernetesClusterIP + t3PublicAddress="${K8S_IP}" + fi + else + exposeAdminT3ChannelPrefix="${disabledPrefix}" + fi + + if [ "${exposeAdminNodePort}" = true ]; then + exposeAdminNodePortPrefix="${enabledPrefix}" + exposeAnyChannelPrefix="${enabledPrefix}" + else + exposeAdminNodePortPrefix="${disabledPrefix}" + fi + + if [ "${istioEnabled}" == "true" ]; then + istioPrefix="${enabledPrefix}" + else + istioPrefix="${disabledPrefix}" + fi + + # The FromModel, MII (model-in-image), and WDT_DOMAIN_TYPE updates in this script + # must remain even though they are not referenced by a sample. They're used by the + # Operator integration test code. If you're interested in MII, + # see './kubernetes/samples/scripts/create-weblogic-domain/model-in-image'. + + # MII settings are used for model-in-image integration testing + if [ "${domainHomeSourceType}" == "FromModel" ]; then + miiPrefix="${enabledPrefix}" + else + miiPrefix="${disabledPrefix}" + fi + + # MII settings are used for model-in-image integration testing + if [ -z "${miiConfigMap}" ]; then + miiConfigMapPrefix="${disabledPrefix}" + else + miiConfigMapPrefix="${enabledPrefix}" + fi + + # For some parameters, use the default value if not defined. + if [ -z "${domainPVMountPath}" ]; then + domainPVMountPath="/shared" + fi + + if [ -z "${logHome}" ]; then + logHome="${domainPVMountPath}/logs/${domainUID}" + fi + + if [ -z "${httpAccessLogInLogHome}" ]; then + httpAccessLogInLogHome="true" + fi + + if [ -z "${dataHome}" ]; then + dataHome="" + fi + + if [ -z "${persistentVolumeClaimName}" ]; then + persistentVolumeClaimName="${domainUID}-weblogic-sample-pvc" + fi + + if [ -z "${weblogicCredentialsSecretName}" ]; then + weblogicCredentialsSecretName="${domainUID}-weblogic-credentials" + fi + + if [ "${domainHomeInImage}" == "true" ]; then + domainPropertiesOutput="${domainOutputDir}/domain.properties" + domainHome="${domainHome:-/u01/oracle/user_projects/domains/${domainName}}" + + # Generate the properties file that will be used when creating the weblogic domain + echo Generating ${domainPropertiesOutput} from ${domainPropertiesInput} + + cp ${domainPropertiesInput} ${domainPropertiesOutput} + sed -i -e "s:%DOMAIN_NAME%:${domainName}:g" ${domainPropertiesOutput} + sed -i -e "s:%DOMAIN_HOME%:${domainHome}:g" ${domainPropertiesOutput} + sed -i -e "s:%ADMIN_PORT%:${adminPort}:g" ${domainPropertiesOutput} + sed -i -e "s:%ADMIN_SERVER_SSL_PORT%:${adminServerSSLPort}:g" ${domainPropertiesOutput} + sed -i -e "s:%ADMIN_SERVER_NAME%:${adminServerName}:g" ${domainPropertiesOutput} + sed -i -e "s:%MANAGED_SERVER_PORT%:${managedServerPort}:g" ${domainPropertiesOutput} + sed -i -e "s:%MANAGED_SERVER_SSL_PORT%:${managedServerSSLPort}:g" ${domainPropertiesOutput} + sed -i -e "s:%MANAGED_SERVER_NAME_BASE%:${managedServerNameBase}:g" ${domainPropertiesOutput} + sed -i -e "s:%CONFIGURED_MANAGED_SERVER_COUNT%:${configuredManagedServerCount}:g" ${domainPropertiesOutput} + sed -i -e "s:%CLUSTER_NAME%:${clusterName}:g" ${domainPropertiesOutput} + sed -i -e "s:%SSL_ENABLED%:${sslEnabled}:g" ${domainPropertiesOutput} + sed -i -e "s:%PRODUCTION_MODE_ENABLED%:${productionModeEnabled}:g" ${domainPropertiesOutput} + sed -i -e "s:%CLUSTER_TYPE%:${clusterType}:g" ${domainPropertiesOutput} + sed -i -e "s;%JAVA_OPTIONS%;${javaOptions};g" ${domainPropertiesOutput} + sed -i -e "s:%T3_CHANNEL_PORT%:${t3ChannelPort}:g" ${domainPropertiesOutput} + sed -i -e "s:%T3_PUBLIC_ADDRESS%:${t3PublicAddress}:g" ${domainPropertiesOutput} + sed -i -e "s:%EXPOSE_T3_CHANNEL%:${exposeAdminT3Channel}:g" ${domainPropertiesOutput} + sed -i -e "s:%FMW_DOMAIN_TYPE%:${fmwDomainType}:g" ${domainPropertiesOutput} + sed -i -e "s:%WDT_DOMAIN_TYPE%:${wdtDomainType}:g" ${domainPropertiesOutput} + sed -i -e "s:%ADMIN_USER_NAME%:${username}:g" ${domainPropertiesOutput} + sed -i -e "s:%ADMIN_USER_PASS%:${password}:g" ${domainPropertiesOutput} + sed -i -e "s:%RCU_SCHEMA_PREFIX%:${rcuSchemaPrefix}:g" ${domainPropertiesOutput} + sed -i -e "s:%RCU_SCHEMA_PASSWORD%:${rcuSchemaPassword}:g" ${domainPropertiesOutput} + sed -i -e "s|%RCU_DB_CONN_STRING%|${rcuDatabaseURL}|g" ${domainPropertiesOutput} + + if [ -z "${image}" ]; then + # calculate the internal name to tag the generated image + defaultImageName="domain-home-in-image" + baseTag=${domainHomeImageBase#*:} + defaultImageName=${defaultImageName}:${baseTag:-"latest"} + sed -i -e "s|%IMAGE_NAME%|${defaultImageName}|g" ${domainPropertiesOutput} + export BUILD_IMAGE_TAG=${defaultImageName} + else + sed -i -e "s|%IMAGE_NAME%|${image}|g" ${domainPropertiesOutput} + export BUILD_IMAGE_TAG=${image} + fi + else + # we're in the domain in PV case + + wdtVersion="${WDT_VERSION:-${wdtVersion}}" + httpsProxy="${https_proxy}" + + createJobOutput="${domainOutputDir}/create-domain-job.yaml" + deleteJobOutput="${domainOutputDir}/delete-domain-job.yaml" + + if [ -z "${domainHome}" ]; then + domainHome="${domainPVMountPath}/domains/${domainUID}" + fi + + # Use the default value if not defined. + if [ -z "${createDomainScriptsMountPath}" ]; then + createDomainScriptsMountPath="/u01/weblogic" + fi + + if [ "${update}" == "true" ]; then + createDomainScriptName="update-domain-job.sh" + elif [ -z "${createDomainScriptName}" ]; then + createDomainScriptName="create-domain-job.sh" + fi + echo createDomainScriptName is ${createDomainScriptName} + + # Must escape the ':' value in image for sed to properly parse and replace + image=$(echo ${image} | sed -e "s/\:/\\\:/g") + + # Generate the yaml to create the kubernetes job that will create the weblogic domain + echo Generating ${createJobOutput} + + cp ${createJobInput} ${createJobOutput} + sed -i -e "s:%NAMESPACE%:$namespace:g" ${createJobOutput} + sed -i -e "s:%WEBLOGIC_CREDENTIALS_SECRET_NAME%:${weblogicCredentialsSecretName}:g" ${createJobOutput} + sed -i -e "s:%WEBLOGIC_IMAGE%:${image}:g" ${createJobOutput} + sed -i -e "s:%WEBLOGIC_IMAGE_PULL_POLICY%:${imagePullPolicy}:g" ${createJobOutput} + sed -i -e "s:%WEBLOGIC_IMAGE_PULL_SECRET_NAME%:${imagePullSecretName}:g" ${createJobOutput} + sed -i -e "s:%WEBLOGIC_IMAGE_PULL_SECRET_PREFIX%:${imagePullSecretPrefix}:g" ${createJobOutput} + sed -i -e "s:%DOMAIN_UID%:${domainUID}:g" ${createJobOutput} + sed -i -e "s:%DOMAIN_NAME%:${domainName}:g" ${createJobOutput} + sed -i -e "s:%DOMAIN_HOME%:${domainHome}:g" ${createJobOutput} + sed -i -e "s:%SSL_ENABLED%:${sslEnabled}:g" ${createJobOutput} + sed -i -e "s:%PRODUCTION_MODE_ENABLED%:${productionModeEnabled}:g" ${createJobOutput} + sed -i -e "s:%ADMIN_SERVER_NAME%:${adminServerName}:g" ${createJobOutput} + sed -i -e "s:%ADMIN_SERVER_NAME_SVC%:${adminServerNameSVC}:g" ${createJobOutput} + sed -i -e "s:%ADMIN_PORT%:${adminPort}:g" ${createJobOutput} + sed -i -e "s:%ADMIN_SERVER_SSL_PORT%:${adminServerSSLPort}:g" ${createJobOutput} + sed -i -e "s:%CONFIGURED_MANAGED_SERVER_COUNT%:${configuredManagedServerCount}:g" ${createJobOutput} + sed -i -e "s:%MANAGED_SERVER_NAME_BASE%:${managedServerNameBase}:g" ${createJobOutput} + sed -i -e "s:%MANAGED_SERVER_NAME_BASE_SVC%:${managedServerNameBaseSVC}:g" ${createJobOutput} + sed -i -e "s:%MANAGED_SERVER_PORT%:${managedServerPort}:g" ${createJobOutput} + sed -i -e "s:%MANAGED_SERVER_SSL_PORT%:${managedServerSSLPort}:g" ${createJobOutput} + sed -i -e "s:%T3_CHANNEL_PORT%:${t3ChannelPort}:g" ${createJobOutput} + sed -i -e "s:%T3_PUBLIC_ADDRESS%:${t3PublicAddress}:g" ${createJobOutput} + sed -i -e "s:%CLUSTER_NAME%:${clusterName}:g" ${createJobOutput} + sed -i -e "s:%CLUSTER_TYPE%:${clusterType}:g" ${createJobOutput} + sed -i -e "s:%DOMAIN_PVC_NAME%:${persistentVolumeClaimName}:g" ${createJobOutput} + sed -i -e "s:%DOMAIN_ROOT_DIR%:${domainPVMountPath}:g" ${createJobOutput} + sed -i -e "s:%CREATE_DOMAIN_SCRIPT_DIR%:${createDomainScriptsMountPath}:g" ${createJobOutput} + sed -i -e "s:%CREATE_DOMAIN_SCRIPT%:${createDomainScriptName}:g" ${createJobOutput} + # extra entries for FMW Infra domains + sed -i -e "s:%RCU_CREDENTIALS_SECRET_NAME%:${rcuCredentialsSecret}:g" ${createJobOutput} + sed -i -e "s:%CUSTOM_RCUPREFIX%:${rcuSchemaPrefix}:g" ${createJobOutput} + sed -i -e "s|%CUSTOM_CONNECTION_STRING%|${rcuDatabaseURL}|g" ${createJobOutput} + sed -i -e "s:%EXPOSE_T3_CHANNEL_PREFIX%:${exposeAdminT3Channel}:g" ${createJobOutput} + sed -i -e "s:%FRONTEND_HOST%:${frontEndHost}:g" ${createJobOutput} + sed -i -e "s:%FRONTEND_PORT%:${frontEndPort}:g" ${createJobOutput} + # entries for Istio + sed -i -e "s:%ISTIO_PREFIX%:${istioPrefix}:g" ${createJobOutput} + sed -i -e "s:%ISTIO_ENABLED%:${istioEnabled}:g" ${createJobOutput} + sed -i -e "s:%ISTIO_READINESS_PORT%:${istioReadinessPort}:g" ${createJobOutput} + sed -i -e "s:%WDT_VERSION%:${wdtVersion}:g" ${createJobOutput} + sed -i -e "s|%DOMAIN_TYPE%|${domain_type}|g" ${createJobOutput} + sed -i -e "s|%PROXY_VAL%|${httpsProxy}|g" ${createJobOutput} + + # Generate the yaml to create the kubernetes job that will delete the weblogic domain_home folder + echo Generating ${deleteJobOutput} + + cp ${deleteJobInput} ${deleteJobOutput} + sed -i -e "s:%NAMESPACE%:$namespace:g" ${deleteJobOutput} + sed -i -e "s:%WEBLOGIC_IMAGE%:${image}:g" ${deleteJobOutput} + sed -i -e "s:%WEBLOGIC_IMAGE_PULL_POLICY%:${imagePullPolicy}:g" ${deleteJobOutput} + sed -i -e "s:%WEBLOGIC_CREDENTIALS_SECRET_NAME%:${weblogicCredentialsSecretName}:g" ${deleteJobOutput} + sed -i -e "s:%WEBLOGIC_IMAGE_PULL_SECRET_NAME%:${imagePullSecretName}:g" ${deleteJobOutput} + sed -i -e "s:%WEBLOGIC_IMAGE_PULL_SECRET_PREFIX%:${imagePullSecretPrefix}:g" ${deleteJobOutput} + sed -i -e "s:%DOMAIN_UID%:${domainUID}:g" ${deleteJobOutput} + sed -i -e "s:%DOMAIN_NAME%:${domainName}:g" ${deleteJobOutput} + sed -i -e "s:%DOMAIN_HOME%:${domainHome}:g" ${deleteJobOutput} + sed -i -e "s:%DOMAIN_PVC_NAME%:${persistentVolumeClaimName}:g" ${deleteJobOutput} + sed -i -e "s:%DOMAIN_ROOT_DIR%:${domainPVMountPath}:g" ${deleteJobOutput} + fi + + if [ "${domainHomeSourceType}" == "FromModel" ]; then + echo domainHomeSourceType is FromModel + # leave domainHomeSourceType to FromModel + if [ "${logHomeOnPV}" == "true" ]; then + logHomeOnPVPrefix="${enabledPrefix}" + else + logHomeOnPVPrefix="${disabledPrefix}" + fi + elif [ "${domainHomeInImage}" == "true" ]; then + domainHomeSourceType="Image" + if [ "${logHomeOnPV}" == "true" ]; then + logHomeOnPVPrefix="${enabledPrefix}" + else + logHomeOnPVPrefix="${disabledPrefix}" + fi + else + domainHomeSourceType="PersistentVolume" + logHomeOnPVPrefix="${enabledPrefix}" + logHomeOnPV=true + fi + + # Generate the yaml file for creating the domain resource + # We want to use wdt's extractDomainResource.sh to get the domain resource + # for domain on pv use case. For others, generate domain resource here + + if [ "${domainHomeSourceType}" != "PersistentVolume" ] || [ "${wdtDomainType}" != "WLS" ] || + [ "${useWdt}" != true ]; then + echo Generating ${dcrOutput} + + cp ${dcrInput} ${dcrOutput} + sed -i -e "s:%DOMAIN_UID%:${domainUID}:g" ${dcrOutput} + sed -i -e "s:%NAMESPACE%:$namespace:g" ${dcrOutput} + sed -i -e "s:%DOMAIN_HOME%:${domainHome}:g" ${dcrOutput} + sed -i -e "s:%DOMAIN_HOME_SOURCE_TYPE%:${domainHomeSourceType}:g" ${dcrOutput} + sed -i -e "s:%WEBLOGIC_IMAGE_PULL_POLICY%:${imagePullPolicy}:g" ${dcrOutput} + sed -i -e "s:%WEBLOGIC_IMAGE_PULL_SECRET_PREFIX%:${imagePullSecretPrefix}:g" ${dcrOutput} + sed -i -e "s:%WEBLOGIC_IMAGE_PULL_SECRET_NAME%:${imagePullSecretName}:g" ${dcrOutput} + sed -i -e "s:%WEBLOGIC_CREDENTIALS_SECRET_NAME%:${weblogicCredentialsSecretName}:g" ${dcrOutput} + sed -i -e "s:%INCLUDE_SERVER_OUT_IN_POD_LOG%:${includeServerOutInPodLog}:g" ${dcrOutput} + sed -i -e "s:%LOG_HOME_ON_PV_PREFIX%:${logHomeOnPVPrefix}:g" ${dcrOutput} + sed -i -e "s:%LOG_HOME_ENABLED%:${logHomeOnPV}:g" ${dcrOutput} + sed -i -e "s:%LOG_HOME%:${logHome}:g" ${dcrOutput} + sed -i -e "s:%HTTP_ACCESS_LOG_IN_LOG_HOME%:${httpAccessLogInLogHome}:g" ${dcrOutput} + sed -i -e "s:%DATA_HOME%:${dataHome}:g" ${dcrOutput} + sed -i -e "s:%SERVER_START_POLICY%:${serverStartPolicy}:g" ${dcrOutput} + sed -i -e "s;%JAVA_OPTIONS%;${javaOptions};g" ${dcrOutput} + sed -i -e "s:%DOMAIN_PVC_NAME%:${persistentVolumeClaimName}:g" ${dcrOutput} + sed -i -e "s:%DOMAIN_ROOT_DIR%:${domainPVMountPath}:g" ${dcrOutput} + + if [ "${istioEnabled}" == "true" ]; then + exposeAdminNodePortPrefix="${disabledPrefix}" + fi + + sed -i -e "s:%EXPOSE_T3_CHANNEL_PREFIX%:${exposeAdminT3ChannelPrefix}:g" ${dcrOutput} + sed -i -e "s:%EXPOSE_ANY_CHANNEL_PREFIX%:${exposeAnyChannelPrefix}:g" ${dcrOutput} + sed -i -e "s:%EXPOSE_ADMIN_PORT_PREFIX%:${exposeAdminNodePortPrefix}:g" ${dcrOutput} + sed -i -e "s:%ADMIN_NODE_PORT%:${adminNodePort}:g" ${dcrOutput} + sed -i -e "s:%CLUSTER_NAME%:${clusterName}:g" ${dcrOutput} + sed -i -e "s:%INITIAL_MANAGED_SERVER_REPLICAS%:${initialManagedServerReplicas}:g" ${dcrOutput} + sed -i -e "s:%ISTIO_PREFIX%:${istioPrefix}:g" ${dcrOutput} + sed -i -e "s:%ISTIO_ENABLED%:${istioEnabled}:g" ${dcrOutput} + sed -i -e "s:%ISTIO_READINESS_PORT%:${istioReadinessPort}:g" ${dcrOutput} + # MII settings are used for model-in-image integration testing + sed -i -e "s:%MII_PREFIX%:${miiPrefix}:g" ${dcrOutput} + sed -i -e "s:%MII_CONFIG_MAP_PREFIX%:${miiConfigMapPrefix}:g" ${dcrOutput} + sed -i -e "s:%MII_CONFIG_MAP%:${miiConfigMap}:g" ${dcrOutput} + sed -i -e "s:%WDT_DOMAIN_TYPE%:${wdtDomainType}:g" ${dcrOutput} + + buildServerPodResources + if [ -z "${serverPodResources}" ]; then + sed -i -e "/%OPTIONAL_SERVERPOD_RESOURCES%/d" ${dcrOutput} + else + if [[ $(uname) -eq "Darwin" ]]; then + serverPodResources=$(echo "${serverPodResources}" | sed -e 's/\\n/%NEWLINE%/g') + sed -i -e "s:%OPTIONAL_SERVERPOD_RESOURCES%:${serverPodResources}:g" ${dcrOutput} + sed -i -e $'s|%NEWLINE%|\\\n|g' ${dcrOutput} + else + sed -i -e "s:%OPTIONAL_SERVERPOD_RESOURCES%:${serverPodResources}:g" ${dcrOutput} + fi + fi + + if [ "${domainHomeInImage}" == "true" ]; then + + # now we know which image to use, update the domain yaml file + if [ -z $image ]; then + sed -i -e "s|%WEBLOGIC_IMAGE%|${defaultImageName}|g" ${dcrOutput} + else + sed -i -e "s|%WEBLOGIC_IMAGE%|${image}|g" ${dcrOutput} + fi + else + sed -i -e "s:%WEBLOGIC_IMAGE%:${image}:g" ${dcrOutput} + fi + fi + + # Remove any "...yaml-e" and "...properties-e" files left over from running sed + rm -f ${domainOutputDir}/*.yaml-e + rm -f ${domainOutputDir}/*.properties-e + +} + + +# +# Function to markup the wdt model file +# +function updateModelFile { + # Update the wdt model file with kubernetes section + modelFile="${domainOutputDir}/tmp/wdt_model.yaml" + cat ${scriptDir}/wdt_k8s_model_template.yaml >> ${modelFile} + + sed -i -e "s:%DOMAIN_UID%:${domainUID}:g" ${modelFile} + sed -i -e "s:%NAMESPACE%:$namespace:g" ${modelFile} + sed -i -e "s:%DOMAIN_HOME%:${domainHome}:g" ${modelFile} + sed -i -e "s:%DOMAIN_HOME_SOURCE_TYPE%:${domainHomeSourceType}:g" ${modelFile} + sed -i -e "s:%WEBLOGIC_IMAGE_PULL_POLICY%:${imagePullPolicy}:g" ${modelFile} + sed -i -e "s:%WEBLOGIC_IMAGE_PULL_SECRET_PREFIX%:${imagePullSecretPrefix}:g" ${modelFile} + sed -i -e "s:%WEBLOGIC_IMAGE_PULL_SECRET_NAME%:${imagePullSecretName}:g" ${modelFile} + sed -i -e "s:%WEBLOGIC_CREDENTIALS_SECRET_NAME%:${weblogicCredentialsSecretName}:g" ${modelFile} + sed -i -e "s:%INCLUDE_SERVER_OUT_IN_POD_LOG%:${includeServerOutInPodLog}:g" ${modelFile} + sed -i -e "s:%LOG_HOME_ON_PV_PREFIX%:${logHomeOnPVPrefix}:g" ${modelFile} + sed -i -e "s:%LOG_HOME_ENABLED%:${logHomeOnPV}:g" ${modelFile} + sed -i -e "s:%LOG_HOME%:${logHome}:g" ${modelFile} + sed -i -e "s:%HTTP_ACCESS_LOG_IN_LOG_HOME%:${httpAccessLogInLogHome}:g" ${modelFile} + sed -i -e "s:%DATA_HOME%:${dataHome}:g" ${modelFile} + sed -i -e "s:%SERVER_START_POLICY%:${serverStartPolicy}:g" ${modelFile} + sed -i -e "s;%JAVA_OPTIONS%;${javaOptions};g" ${modelFile} + sed -i -e "s:%DOMAIN_PVC_NAME%:${persistentVolumeClaimName}:g" ${modelFile} + sed -i -e "s:%DOMAIN_ROOT_DIR%:${domainPVMountPath}:g" ${modelFile} + + if [ "${istioEnabled}" == "true" ]; then + exposeAdminNodePortPrefix="${disabledPrefix}" + fi + + sed -i -e "s:%EXPOSE_T3_CHANNEL_PREFIX%:${exposeAdminT3ChannelPrefix}:g" ${modelFile} + sed -i -e "s:%EXPOSE_ANY_CHANNEL_PREFIX%:${exposeAnyChannelPrefix}:g" ${modelFile} + sed -i -e "s:%EXPOSE_ADMIN_PORT_PREFIX%:${exposeAdminNodePortPrefix}:g" ${modelFile} + sed -i -e "s:%ADMIN_NODE_PORT%:${adminNodePort}:g" ${modelFile} + sed -i -e "s:%CLUSTER_NAME%:${clusterName}:g" ${modelFile} + sed -i -e "s:%INITIAL_MANAGED_SERVER_REPLICAS%:${initialManagedServerReplicas}:g" ${modelFile} + sed -i -e "s:%ISTIO_PREFIX%:${istioPrefix}:g" ${modelFile} + sed -i -e "s:%ISTIO_ENABLED%:${istioEnabled}:g" ${modelFile} + sed -i -e "s:%ISTIO_READINESS_PORT%:${istioReadinessPort}:g" ${modelFile} + # MII settings are used for model-in-image integration testing + sed -i -e "s:%MII_PREFIX%:${miiPrefix}:g" ${modelFile} + sed -i -e "s:%MII_CONFIG_MAP_PREFIX%:${miiConfigMapPrefix}:g" ${modelFile} + sed -i -e "s:%MII_CONFIG_MAP%:${miiConfigMap}:g" ${modelFile} + sed -i -e "s:%WDT_DOMAIN_TYPE%:${wdtDomainType}:g" ${modelFile} + + buildServerPodResources + if [ -z "${serverPodResources}" ]; then + sed -i -e "/%OPTIONAL_SERVERPOD_RESOURCES%/d" ${modelFile} + else + if [[ $(uname) -eq "Darwin" ]]; then + serverPodResources=$(echo "${serverPodResources}" | sed -e 's/\\n/%NEWLINE%/g') + sed -i -e "s:%OPTIONAL_SERVERPOD_RESOURCES%:${serverPodResources}:g" ${modelFile} + sed -i -e $'s|%NEWLINE%|\\\n|g' ${modelFile} + else + sed -i -e "s:%OPTIONAL_SERVERPOD_RESOURCES%:${serverPodResources}:g" ${modelFile} + fi + fi + + sed -i -e "s:%WEBLOGIC_IMAGE%:${image}:g" ${modelFile} +} + +# +# Function to create the domain recource +# +function createDomainResource { + kubectl apply -f ${dcrOutput} + + attempts=0 + while [ "$DCR_AVAIL" != "1" ] && [ ! $attempts -eq 10 ]; do + attempts=$((attempts + 1)) + sleep 1 + DCR_AVAIL=`kubectl get domain ${domainUID} -n ${namespace} | grep ${domainUID} | wc | awk ' { print $1; } '` + done + if [ "${DCR_AVAIL}" != "1" ]; then + fail "The domain resource ${domainUID} was not found" + fi +} + +# +# Function to create a domain +# $1 - boolean value indicating the location of the domain home +# true means domain home in image +# false means domain home on PV +# +function createDomain { + if [ "$#" != 1 ]; then + fail "The function must be called with domainHomeInImage parameter." + fi + + domainHomeInImage="${1}" + if [ "true" != "${domainHomeInImage}" ] && [ "false" != "${domainHomeInImage}" ]; then + fail "The value of domainHomeInImage must be true or false: ${domainHomeInImage}" + fi + + # Setup the environment for running this script and perform initial validation checks + initialize + + # Generate files for creating the domain + createFiles + + # Check that the domain secret exists and contains the required elements + validateDomainSecret + + # Validate the domain's persistent volume claim + if [ "${doValidation}" == true ] && [ "${domainHomeInImage}" == false -o "${logHomeOnPV}" == true ]; then + validateDomainPVC + fi + + # Create the WebLogic domain home + createDomainHome + + if [ "${executeIt}" = true ]; then + createDomainResource + fi + + # Print a summary + printSummary +} + +# +# Function to update a domain +# $1 - boolean value indicating the location of the domain home +# true means domain home in image +# false means domain home on PV +# +function updateDomain { + + domainHomeInImage="false" + + # Setup the environment for running this script and perform initial validation checks + initialize + + # Generate files for creating the domain + createFiles update + + # Check that the domain secret exists and contains the required elements + validateDomainSecret + + # Validate the domain's persistent volume claim + if [ "${doValidation}" == true ]; then + validateDomainPVC + fi + + # Create the WebLogic domain home + updateDomainHome + + if [ "${executeIt}" = true ]; then + createDomainResource + fi + + # Print a summary + printSummary +} + +# checks if a given pod in a NameSpace has been deleted +function checkPodDelete(){ + + pod=$1 + ns=$2 + status="Terminating" + + if [ -z ${1} ]; then + echo "No Pod name provided " + exit -1 + fi + + if [ -z ${2} ]; then + echo "No NameSpace provided " + exit -2 + fi + + echo "Checking Status for Pod [$pod] in namespace [${ns}]" + max=10 + count=1 + while [ $count -le $max ] ; do + sleep 5 + pod=`kubectl get po/$1 -n ${ns} | grep -v NAME | awk '{print $1}'` + if [ -z ${pod} ]; then + status="Terminated" + echo "Pod [$1] removed from nameSpace [${ns}]" + break; + fi + count=`expr $count + 1` + echo "Pod [$pod] Status [${status}]" + done + + if [ $count -gt $max ] ; then + echo "[ERROR] The Pod[$1] in NameSpace [$ns] could not be deleted in 50s"; + exit 1 + fi +} + +# Checks if all container(s) in a pod are running state based on READY column +#NAME READY STATUS RESTARTS AGE +#domain1-adminserver 1/1 Running 0 4m + +function checkPodState(){ + + status="NotReady" + max=60 + count=1 + + pod=$1 + ns=$2 + state=${3:-1/1} + + echo "Checking Pod READY column for State [$state]" + pname=`kubectl get po -n ${ns} | grep -w ${pod} | awk '{print $1}'` + if [ -z ${pname} ]; then + echo "No such pod [$pod] exists in NameSpace [$ns] " + exit -1 + fi + + rcode=`kubectl get po ${pname} -n ${ns} | grep -w ${pod} | awk '{print $2}'` + [[ ${rcode} -eq "${state}" ]] && status="Ready" + + while [ ${status} != "Ready" -a $count -le $max ] ; do + sleep 5 + rcode=`kubectl get po/$pod -n ${ns} | grep -v NAME | awk '{print $2}'` + [[ ${rcode} -eq "1/1" ]] && status="Ready" + echo "Pod [$1] Status is ${status} Iter [$count/$max]" + count=`expr $count + 1` + done + if [ $count -gt $max ] ; then + echo "[ERROR] Unable to start the Pod [$pod] after 300s "; + exit 1 + fi + + pname=`kubectl get po -n ${ns} | grep -w ${pod} | awk '{print $1}'` + kubectl -n ${ns} get po ${pname} +} + +# Checks if a pod is available in a given namespace +function checkPod(){ + + max=20 + count=1 + + pod=$1 + ns=$2 + + pname=`kubectl get po -n ${ns} | grep -w ${pod} | awk '{print $1}'` + if [ -z ${pname} ]; then + echo "No such pod [$pod] exists in NameSpace [$ns]" + sleep 10 + fi + + rcode=`kubectl get po -n ${ns} | grep -w ${pod} | awk '{print $1}'` + if [ ! -z ${rcode} ]; then + echo "[$pod] already initialized .. " + return 0 + fi + + echo "The POD [${pod}] has not been initialized ..." + while [ -z ${rcode} ]; do + [[ $count -gt $max ]] && break + echo "Pod[$pod] is being initialized ..." + sleep 5 + rcode=`kubectl get po -n ${ns} | grep $pod | awk '{print $1}'` + count=`expr $count + 1` + done + + if [ $count -gt $max ] ; then + echo "[ERROR] Could not find Pod [$pod] after 120s"; + exit 1 + fi +} + +# Checks if a service is available in a given namespace +function checkService(){ + svc=$1 + ns=$2 + startSecs=$SECONDS + maxWaitSecs=20 + while [ -z "`kubectl get service -n ${ns} | grep -w ${svc}`" ]; do + if [ $((SECONDS - startSecs)) -lt $maxWaitSecs ]; then + echo "Service [$svc] not found after $((SECONDS - startSecs)) seconds, retrying ..." + sleep 5 + else + echo "[Error] Could not find Service [$svc] after $((SECONDS - startSecs)) seconds" + exit 1 + fi + done + echo "Service [$svc] found" +} diff --git a/OracleIdentityGovernance/kubernetes/common/validate.sh b/OracleIdentityGovernance/kubernetes/common/validate.sh new file mode 100755 index 000000000..4bbbffb71 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/common/validate.sh @@ -0,0 +1,481 @@ +#!/usr/bin/env bash +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# Description +# Common validation functions shared by all other scripts that process inputs properties. +# + +# +# Function to note that a validate error has occurred +# +function validationError { + printError $* + validateErrors=true +} + +# +# Function to cause the script to fail if there were any validation errors +# +function failIfValidationErrors { + if [ "$validateErrors" = true ]; then + fail 'The errors listed above must be resolved before the script can continue' + fi +} + +# +# Function to validate that a list of required input parameters were specified +# +function validateInputParamsSpecified { + for p in $*; do + local name=$p + local val=${!name} + if [ -z "$val" ]; then + validationError "The ${name} parameter in ${valuesInputFile} is missing, null or empty" + fi + done +} + +# +# Function to validate that a list of input parameters have boolean values. +# It assumes that validateInputParamsSpecified will also be called for these params. +# +function validateBooleanInputParamsSpecified { + validateInputParamsSpecified $* + for p in $*; do + local name=$p + local val=${!name} + if ! [ -z $val ]; then + if [ "true" != "$val" ] && [ "false" != "$val" ]; then + validationError "The value of $name must be true or false: $val" + fi + fi + done +} + +# +# Function to validate that a list of input parameters have integer values. +# +function validateIntegerInputParamsSpecified { + validateInputParamsSpecified $* + for p in $*; do + local name=$p + local val=${!name} + if ! [ -z $val ]; then + local intVal="" + printf -v intVal '%d' "$val" 2>/dev/null + if ! [ "${val}" == "${intVal}" ]; then + validationError "The value of $name must be an integer: $val" + fi + fi + done +} + +# +# Function to check if a value is lowercase +# $1 - name of object being checked +# $2 - value to check +function validateLowerCase { + local lcVal=$(toLower $2) + if [ "$lcVal" != "$2" ]; then + validationError "The value of $1 must be lowercase: $2" + fi +} + +# +# Function to check if a value is lowercase and legal DNS name +# $1 - name of object being checked +# $2 - value to check +function validateDNS1123LegalName { + local val=$(toDNS1123Legal $2) + if [ "$val" != "$2" ]; then + validationError "The value of $1 contains invalid charaters: $2" + fi +} + +# +# Function to validate the namespace +# +function validateNamespace { + validateLowerCase "namespace" ${namespace} +} + +# +# Function to validate the version of the inputs file +# +function validateVersion { + local requiredVersion=${requiredInputsVersion} + if [ "${version}" != "${requiredVersion}" ]; then + validationError "Invalid version: \"${version}\". Must be ${requiredVersion}." + fi +} + +# +# Function to ensure the domain uid is a legal DNS name +# +function validateDomainUid { + validateLowerCase "domainUID" ${domainUID} + validateDNS1123LegalName domainUID ${domainUID} +} + +# +# Function to ensure the namespace is lowercase +# +function validateNamespace { + validateLowerCase "namespace" ${namespace} +} + +# +# Create an instance of clusterName to be used in cases where a legal DNS name is required. +# +function validateClusterName { + clusterNameSVC=$(toDNS1123Legal $clusterName) +} + +# +# Create an instance of adminServerName to be used in cases where a legal DNS name is required. +# +function validateAdminServerName { + adminServerNameSVC=$(toDNS1123Legal $adminServerName) +} + +# +# Create an instance of adminServerName to be used in cases where a legal DNS name is required. +# +function validateManagedServerNameBase { + managedServerNameBaseSVC=$(toDNS1123Legal $managedServerNameBase) +} + +# +# Function to validate the secret name +# +function validateWeblogicCredentialsSecretName { + validateLowerCase "weblogicCredentialsSecretName" ${weblogicCredentialsSecretName} +} + +# +# Function to validate the weblogic image pull policy +# +function validateWeblogicImagePullPolicy { + if [ ! -z ${imagePullPolicy} ]; then + case ${imagePullPolicy} in + "IfNotPresent") + ;; + "Always") + ;; + "Never") + ;; + *) + validationError "Invalid value for imagePullPolicy: ${imagePullPolicy}. Valid values are IfNotPresent, Always, and Never." + ;; + esac + else + # Set the default + imagePullPolicy="IfNotPresent" + fi + failIfValidationErrors +} + +# +# Function to validate the fmwDomainType +# +function validateFmwDomainType { + if [ ! -z ${fmwDomainType} ]; then + case ${fmwDomainType} in + "JRF") + ;; + "RestrictedJRF") + ;; + *) + validationError "Invalid value for fmwDomainType: ${fmwDomainType}. Valid values are JRF or restrictedJRF." + ;; + esac + else + # Set the default + fmwDomainType="JRF" + fi + failIfValidationErrors +} + +# +# Function to validate the weblogic image pull secret name +# +function validateWeblogicImagePullSecretName { + if [ ! -z ${imagePullSecretName} ]; then + validateLowerCase imagePullSecretName ${imagePullSecretName} + imagePullSecretPrefix="" + if [ "${generateOnly}" = false ]; then + validateWeblogicImagePullSecret + fi + else + # Set name blank when not specified, and comment out the yaml + imagePullSecretName="" + imagePullSecretPrefix="#" + fi +} + +# +# Function to validate the weblogic image pull secret exists +# +function validateWeblogicImagePullSecret { + # The kubernetes secret for pulling images from a container registry is optional. + # If it was specified, make sure it exists. + validateSecretExists ${imagePullSecretName} ${namespace} + failIfValidationErrors +} + +# try to execute kubectl to see whether kubectl is available +function validateKubectlAvailable { + if ! [ -x "$(command -v kubectl)" ]; then + validationError "kubectl is not installed" + fi +} + +# Function to validate the server start policy value +# +function validateServerStartPolicy { + validateInputParamsSpecified serverStartPolicy + if [ ! -z "${serverStartPolicy}" ]; then + case ${serverStartPolicy} in + "NEVER") + ;; + "ALWAYS") + ;; + "IF_NEEDED") + ;; + "ADMIN_ONLY") + ;; + *) + validationError "Invalid value for serverStartPolicy: ${serverStartPolicy}. Valid values are 'NEVER', 'ALWAYS', 'IF_NEEDED', and 'ADMIN_ONLY'." + ;; + esac + fi +} + +# +# Function to validate the weblogic domain storage reclaim policy +# +function validateWeblogicDomainStorageReclaimPolicy { + validateInputParamsSpecified weblogicDomainStorageReclaimPolicy + if [ ! -z "${weblogicDomainStorageReclaimPolicy}" ]; then + case ${weblogicDomainStorageReclaimPolicy} in + "Retain") + ;; + "Delete") + if [ "${weblogicDomainStoragePath:0:5}" != "/tmp/" ]; then + validationError "ERROR - Invalid value for weblogicDomainStorageReclaimPolicy ${weblogicDomainStorageReclaimPolicy} with weblogicDomainStoragePath ${weblogicDomainStoragePath} that is not /tmp/" + fi + ;; + "Recycle") + ;; + *) + validationError "Invalid value for weblogicDomainStorageReclaimPolicy: ${weblogicDomainStorageReclaimPolicy}. Valid values are Retain, Delete and Recycle." + ;; + esac + fi +} + +# +# Function to validate the weblogic domain storage type +# +function validateWeblogicDomainStorageType { + validateInputParamsSpecified weblogicDomainStorageType + if [ ! -z "${weblogicDomainStorageType}" ]; then + case ${weblogicDomainStorageType} in + "HOST_PATH") + ;; + "NFS") + validateInputParamsSpecified weblogicDomainStorageNFSServer + ;; + *) + validationError "Invalid value for weblogicDomainStorageType: ${weblogicDomainStorageType}. Valid values are HOST_PATH and NFS." + ;; + esac + fi +} + +# +# Function to validate the load balancer value +# +function validateLoadBalancer { + validateInputParamsSpecified loadBalancer + if [ ! -z "${loadBalancer}" ]; then + case ${loadBalancer} in + "TRAEFIK") + ;; + "APACHE") + ;; + "VOYAGER") + ;; + "NONE") + ;; + *) + validationError "Invalid value for loadBalancer: ${loadBalancer}. Valid values are APACHE, TRAEFIK, VOYAGER and NONE." + ;; + esac + fi +} + +# +# Function to validate a kubernetes secret exists +# $1 - the name of the secret +# $2 - namespace +function validateSecretExists { + echo "Checking to see if the secret ${1} exists in namespace ${2}" + local SECRET=`kubectl get secret ${1} -n ${2} | grep ${1} | wc | awk ' { print $1; }'` + if [ "${SECRET}" != "1" ]; then + validationError "The secret ${1} was not found in namespace ${2}" + fi +} + +# +# Function to validate the domain secret +# +function validateDomainSecret { + # Verify the secret exists + validateSecretExists ${weblogicCredentialsSecretName} ${namespace} + failIfValidationErrors + + # Verify the secret contains a username + SECRET=`kubectl get secret ${weblogicCredentialsSecretName} -n ${namespace} -o jsonpath='{.data}' | tr -d '"' | grep username: | wc | awk ' { print $1; }'` + if [ "${SECRET}" != "1" ]; then + validationError "The domain secret ${weblogicCredentialsSecretName} in namespace ${namespace} does not contain a username" + fi + + # Verify the secret contains a password + SECRET=`kubectl get secret ${weblogicCredentialsSecretName} -n ${namespace} -o jsonpath='{.data}' | tr -d '"'| grep password: | wc | awk ' { print $1; }'` + if [ "${SECRET}" != "1" ]; then + validationError "The domain secret ${weblogicCredentialsSecretName} in namespace ${namespace} does not contain a password" + fi + failIfValidationErrors +} + +# +# function to validate if we will be using wdt or wlst to create the domain +# +function validateDomainFilesDir { + useWdt=true + if [ -z "${createDomainFilesDir}" ] || [ "${createDomainFilesDir}" == "wlst" ]; then + useWdt=false + fi +} + +# +# Function to validate the common input parameters +# +function validateCommonInputs { + sample_name=${1:-"other"} + + # Parse the common inputs file + parseCommonInputs + + validateInputParamsSpecified \ + adminServerName \ + domainUID \ + clusterName \ + managedServerNameBase \ + namespace \ + includeServerOutInPodLog \ + version + + validateIntegerInputParamsSpecified \ + adminPort \ + initialManagedServerReplicas \ + managedServerPort \ + t3ChannelPort \ + adminNodePort + + if [ ! "${sample_name}" == "fmw-domain-home-in-image" ]; then + validateIntegerInputParamsSpecified configuredManagedServerCount + fi + + validateBooleanInputParamsSpecified \ + productionModeEnabled \ + exposeAdminT3Channel \ + exposeAdminNodePort \ + includeServerOutInPodLog + + export requiredInputsVersion="create-weblogic-sample-domain-inputs-v1" + validateVersion + + validateDomainUid + validateNamespace + validateAdminServerName + validateManagedServerNameBase + validateClusterName + validateWeblogicCredentialsSecretName + validateServerStartPolicy + validateWeblogicImagePullPolicy + validateWeblogicImagePullSecretName + validateFmwDomainType + validateDomainFilesDir + # Below three validate methods are used for MII integration testing + validateWdtDomainType + validateWdtModelFile + validateWdtModelPropertiesFile + + failIfValidationErrors +} + +# +# Function to validate the domain's persistent volume claim has been created +# +function validateDomainPVC { + # Check if the persistent volume claim is already available + checkPvcExists ${persistentVolumeClaimName} ${namespace} + if [ "${PVC_EXISTS}" = "false" ]; then + validationError "The domain persistent volume claim ${persistentVolumeClaimName} does not exist in namespace ${namespace}" + fi + failIfValidationErrors +} + +# +# Function to validate the WDT model file exists +# used for MII integration testing +# +function validateWdtModelFile { + # Check if the model file exists + if [ ! -z $wdtModelFile ]; then + if [ ! -f $wdtModelFile ]; then + validationError "The WDT model file ${wdtModelFile} does not exist" + fi + fi + failIfValidationErrors +} + +# +# Function to validate the WDT model property file exists +# used for MII integration testing +# +function validateWdtModelPropertiesFile { + # Check if the model property file exists + if [ ! -z $wdtModelPropertiesFile ]; then + if [ ! -f $wdtModelPropertiesFile ]; then + validationError "The WDT model property file ${wdtModelPropertiesFile} does not exist" + fi + fi + failIfValidationErrors +} + +# Function to validate the wdtDomainType +# used for MII integration testing +function validateWdtDomainType { + if [ ! -z ${wdtDomainType} ]; then + case ${wdtDomainType} in + "WLS") + ;; + "JRF") + ;; + "RestrictedJRF") + ;; + *) + validationError "Invalid value for wdtDomainType: ${wdtDomainType}. Valid values are WLS or JRF or restrictedJRF." + ;; + esac + else + # Set the default + wdtDomainType="WLS" + fi + failIfValidationErrors +} + diff --git a/OracleIdentityGovernance/kubernetes/common/wdt-and-wit-utility.sh b/OracleIdentityGovernance/kubernetes/common/wdt-and-wit-utility.sh new file mode 100755 index 000000000..aa9cc691c --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/common/wdt-and-wit-utility.sh @@ -0,0 +1,439 @@ +#!/bin/bash +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# Description: +# +# This script contains functions for installing WebLogic Deploy Tool (WDT) and +# WebLogic Image Tool (WIT), and for running WDT. +# +# +# Usage: +# +# Export customized values for the input shell environment variables as needed +# before calling this script. +# +# Outputs: +# +# WDT install: WDT_DIR/weblogic-deploy/... +# +# Copy of wdt model: WDT_DIR/$(basename WDT_MODEL_FILE) +# Copy of wdt vars: WDT_DIR/$(basename WDT_VAR_FILE) +# +# WDT logs: WDT_DIR/weblogic-deploy/logs/... +# WDT stdout: WDT_DIR/createDomain.sh.out +# +# WebLogic domain home: DOMAIN_HOME_DIR +# default: /shared/domains/ +# +# Input environment variables: +# +# ORACLE_HOME Oracle home with a WebLogic install. +# default: /u01/oracle +# +# DOMAIN_HOME_DIR Target location for generated domain. +# +# WDT_MODEL_FILE Full path to WDT model file. +# default: the directory that contains this script +# plus "/wdt_model.yaml" +# +# WDT_VAR_FILE Full path to WDT variable file (java properties format). +# default: the directory that contains this script +# plus "/create-domain-inputs.yaml" +# +# WDT_DIR Target location to install and run WDT, and to keep a copy of +# $WDT_MODEL_FILE and $WDT_MODEL_VARS. Also the location +# of WDT log files. +# default: /shared/wdt +# +# WDT_VERSION WDT version to download. +# default: LATEST +# +# WDT_INSTALL_ZIP_FILE Filename of WDT install zip. +# default: weblogic-deploy.zip +# +# WDT_INSTALL_ZIP_URL URL for downloading WDT install zip +# default: https://github.com/oracle/weblogic-deploy-tooling/releases/latest/download/$WDT_INSTALL_ZIP_FILE +# +# WIT_DIR Target location to install WIT +# default: /shared/imagetool +# +# WIT_VERSION WIT version to download. +# default: LATEST +# +# WIT_INSTALL_ZIP_FILE Filename of WIT install zip. +# default: imagetool.zip +# +# WIT_INSTALL_ZIP_URL URL for downloading WIT install zip +# default: https://github.com/oracle/weblogic-image-tool/releases/latest/download/$WIT_INSTALL_ZIP_FILE +# + + +# Initialize globals + +export ORACLE_HOME=${ORACLE_HOME:-/u01/oracle} + +SCRIPTPATH="$( cd "$(dirname "$0")" > /dev/null 2>&1 ; pwd -P )" +WDT_MODEL_FILE=${WDT_MODEL_FILE:-"$SCRIPTPATH/wdt_model.yaml"} +WDT_VAR_FILE=${WDT_VAR_FILE:-"$SCRIPTPATH/create-domain-inputs.yaml"} + +WDT_DIR=${WDT_DIR:-/shared/wdt} +WDT_VERSION=${WDT_VERSION:-LATEST} + +WIT_DIR=${WIT_DIR:-/shared/imagetool} +WIT_VERSION=${WIT_VERSION:-LATEST} + +DOMAIN_TYPE="${DOMAIN_TYPE:-WLS}" + +function download { + local fileUrl="${1}" + + local curl_res=1 + max=20 + count=0 + while [ $curl_res -ne 0 -a $count -lt $max ] ; do + sleep 1 + count=`expr $count + 1` + for proxy in "${https_proxy}" "${https_proxy2}"; do + echo @@ "Info: Downloading $fileUrl with https_proxy=\"$proxy\"" + https_proxy="${proxy}" \ + curl --silent --show-error --connect-timeout 10 -O -L $fileUrl + curl_res=$? + [ $curl_res -eq 0 ] && break + done + done + if [ $curl_res -ne 0 ]; then + echo @@ "Error: Download failed." + return 1 + fi +} + +function run_wdt { + # + # Run WDT using WDT_VAR_FILE, WDT_MODEL_FILE, and ORACLE_HOME. + # Output: + # - result domain will be in DOMAIN_HOME_DIR + # - logging output is in $WDT_DIR/createDomain.sh.out and $WDT_DIR/weblogic-deploy/logs + # - WDT_VAR_FILE & WDT_MODEL_FILE will be copied to WDT_DIR. + # + + local action="${1}" + + # Input files and directories. + + local inputs_orig="$WDT_VAR_FILE" + local model_orig="$WDT_MODEL_FILE" + local oracle_home="$ORACLE_HOME" + local domain_type="$DOMAIN_TYPE" + local wdt_bin_dir="$WDT_DIR/weblogic-deploy/bin" + local wdt_createDomain_script="$wdt_bin_dir/createDomain.sh" + + if [ ${action} = "create" ]; then + local wdt_domain_script="$wdt_bin_dir/createDomain.sh" + else + local wdt_domain_script="$wdt_bin_dir/updateDomain.sh" + fi + + local domain_home_dir="$DOMAIN_HOME_DIR" + if [ -z "${domain_home_dir}" ]; then + local domain_dir="/shared/domains" + local domain_uid=`egrep 'domainUID' $inputs_orig | awk '{print $2}'` + local domain_home_dir=$domain_dir/$domain_uid + fi + + mkdir -p $domain_home_dir + + # Output files and directories. + + local inputs_final=$WDT_DIR/$(basename "$inputs_orig") + local model_final=$WDT_DIR/$(basename "$model_orig") + if [ ${action} = "create" ]; then + local out_file=$WDT_DIR/createDomain.sh.out + else + local out_file=$WDT_DIR/updateDomain.sh.out + fi + local wdt_log_dir="$WDT_DIR/weblogic-deploy/logs" + + echo @@ "Info: About to run WDT ${wdt_domain_script}" + + for directory in wdt_bin_dir SCRIPTPATH WDT_DIR oracle_home; do + if [ ! -d "${!directory}" ]; then + echo @@ "Error: Could not find ${directory} directory ${!directory}." + return 1 + fi + done + + for fil in inputs_orig model_orig wdt_createDomain_script; do + if [ ! -f "${!fil}" ]; then + echo @@ "Error: Could not find ${fil} file ${!fil}." + return 1 + fi + done + + cp $model_orig $model_final || return 1 + cp $inputs_orig $inputs_final || return 1 + + local save_dir=`pwd` + cd $WDT_DIR || return 1 + + cmd=" + $wdt_domain_script + -oracle_home $oracle_home + -domain_type $domain_type + -domain_home $domain_home_dir + -model_file $model_final + -variable_file $inputs_final + " + + echo @@ "Info: About to run the following WDT command:" + echo "${cmd}" + echo @@ "Info: WDT output will be in $out_file and $wdt_log_dir" + eval $cmd > $out_file 2>&1 + local wdt_res=$? + + cd $save_dir + + if [ $wdt_res -ne 0 ]; then + if [ ${action} = "create" ]; then + cat $WDT_DIR/createDomain.sh.out + echo @@ "Info: WDT createDomain.sh output is in $out_file and $wdt_log_dir" + echo @@ "Error: WDT createDomain.sh failed." + return 1 + else + cat $WDT_DIR/updateDomain.sh.out + echo @@ "Info: WDT updateDomain.sh output is in $out_file and $wdt_log_dir" + echo @@ "Error: WDT updateDomain.sh failed." + return 1 + fi + fi + + cd $WDT_DIR || return 1 + + cmd=" + $wdt_bin_dir/extractDomainResource.sh + -oracle_home $oracle_home + -domain_resource_file domain${action}.yaml + -domain_home $domain_home_dir + -model_file $model_final + -variable_file $inputs_final + " + echo @@ "Info: About to run the following WDT command:" + echo "${cmd}" + echo @@ "Info: WDT output will be in extract${action}.out and $wdt_log_dir" + eval $cmd > extract${action}.out 2>&1 + local wdt_res=$? + + cd $save_dir + + if [ $wdt_res -ne 0 ]; then + cat $WDT_DIR/extract${action}.out + echo @@ "Info: WDT extractDomainResource output is in extract${action}.out and $wdt_log_dir" + echo @@ "Error: WDT createDomain.sh failed." + return 1 + fi + + if [ ${action} = "create" ]; then + # chmod -R g+w $domain_home_dir || return 1 + echo @@ "Info: WDT createDomain.sh succeeded." + else + echo @@ "Info: WDT updateDomain.sh succeeded." + fi + + return 0 +} + +function setup_wdt_shared_dir { + mkdir -p $WDT_DIR || return 1 +} + +# +# Install Weblogic Server Deploy Tooling to ${WDT_DIR} +# +function install_wdt { + + WDT_INSTALL_ZIP_FILE="${WDT_INSTALL_ZIP_FILE:-weblogic-deploy.zip}" + + if [ "$WDT_VERSION" == "LATEST" ]; then + WDT_INSTALL_ZIP_URL=${WDT_INSTALL_ZIP_URL:-"https://github.com/oracle/weblogic-deploy-tooling/releases/latest/download/$WDT_INSTALL_ZIP_FILE"} + else + WDT_INSTALL_ZIP_URL=${WDT_INSTALL_ZIP_URL:-"https://github.com/oracle/weblogic-deploy-tooling/releases/download/release-$WDT_VERSION/$WDT_INSTALL_ZIP_FILE"} + fi + + local save_dir=`pwd` + cd $WDT_DIR || return 1 + + echo @@ "Info: Downloading $WDT_INSTALL_ZIP_URL " + download $WDT_INSTALL_ZIP_URL || return 1 + + if [ ! -f $WDT_INSTALL_ZIP_FILE ]; then + cd $save_dir + echo @@ "Error: Download failed or $WDT_INSTALL_ZIP_FILE not found." + return 1 + fi + + echo @@ "Info: Archive downloaded to $WDT_DIR/$WDT_INSTALL_ZIP_FILE, about to unzip via 'jar xf'." + + jar xf $WDT_INSTALL_ZIP_FILE + local jar_res=$? + + cd $save_dir + + if [ $jar_res -ne 0 ]; then + echo @@ "Error: Install failed while unzipping $WDT_DIR/$WDT_INSTALL_ZIP_FILE" + return $jar_res + fi + + if [ ! -d "$WDT_DIR/weblogic-deploy/bin" ]; then + echo @@ "Error: Install failed: directory '$WDT_DIR/weblogic-deploy/bin' not found." + return 1 + fi + + chmod 775 $WDT_DIR/weblogic-deploy/bin/* || return 1 + + echo @@ "Info: Install succeeded, wdt install is in the $WDT_DIR/weblogic-deploy directory." + return 0 +} + +# +# Install WebLogic Image Tool to ${WIT_DIR}. Used by install_wit_if_needed. +# Do not call this function directory. +# +function install_wit { + + WIT_INSTALL_ZIP_FILE="${WIT_INSTALL_ZIP_FILE:-imagetool.zip}" + + if [ "$WIT_VERSION" == "LATEST" ]; then + WIT_INSTALL_ZIP_URL=${WDT_INSTALL_ZIP_URL:-"https://github.com/oracle/weblogic-image-tool/releases/latest/download/$WIT_INSTALL_ZIP_FILE"} + else + WIT_INSTALL_ZIP_URL=${WIT_INSTALL_ZIP_URL:-"https://github.com/oracle/weblogic-image-tool/releases/download/release-$WIT_VERSION/$WIT_INSTALL_ZIP_FILE"} + fi + + + + local save_dir=`pwd` + + echo @@ "imagetool.sh not found in ${imagetoolBinDir}. Installing imagetool..." + + echo @@ "Info: Downloading $WIT_INSTALL_ZIP_URL " + download $WIT_INSTALL_ZIP_URL || return 1 + + if [ ! -f $WIT_INSTALL_ZIP_FILE ]; then + cd $save_dir + echo @@ "Error: Download failed or $WIT_INSTALL_ZIP_FILE not found." + return 1 + fi + echo @@ "Info: Archive downloaded to $WIT_DIR/$WIT_INSTALL_ZIP_FILE, about to unzip via 'jar xf'." + + jar xf $WIT_INSTALL_ZIP_FILE + local jar_res=$? + + cd $save_dir + + if [ $jar_res -ne 0 ]; then + echo @@ "Error: Install failed while unzipping $WIT_DIR/$WIT_INSTALL_ZIP_FILE" + return $jar_res + fi + + if [ ! -d "$WIT_DIR/imagetool/bin" ]; then + echo @@ "Error: Install failed: directory '$WIT_DIR/imagetool/bin' not found." + return 1 + fi + + chmod 775 $WIT_DIR/imagetool/bin/* || return 1 +} + +# +# Checks whether WebLogic Image Tool is already installed under ${WIT_DIR}, and install +# it if not. +# +function install_wit_if_needed { + + local save_dir=`pwd` + + mkdir -p $WIT_DIR || return 1 + cd $WIT_DIR || return 1 + + imagetoolBinDir=$WIT_DIR/imagetool/bin + if [ -f $imagetoolBinDir/imagetool.sh ]; then + echo @@ "Info: imagetool.sh already exist in ${imagetoolBinDir}. Skipping WIT installation." + else + install_wit + fi + + export WLSIMG_CACHEDIR="$WIT_DIR/imagetool-cache" + + # Check existing imageTool cache entry for WDT: + # - if there is already an entry, and the WDT installer file specified in the cache entry exists, skip WDT installation + # - if file in cache entry doesn't exist, delete cache entry, install WDT, and add WDT installer to cache + # - if entry does not exist, install WDT, and add WDT installer to cache + if [ "$WDT_VERSION" == "LATEST" ]; then + wdtCacheVersion="latest" + else + wdtCacheVersion=$WDT_VERSION + fi + + local listItems=$( ${imagetoolBinDir}/imagetool.sh cache listItems | grep "wdt_${wdtCacheVersion}" ) + + if [ ! -z "$listItems" ]; then + local wdt_file_path_in_cache=$(echo $listItems | sed 's/.*=\(.*\)/\1/') + if [ -f "$wdt_file_path_in_cache" ]; then + skip_wdt_install=true + else + echo @@ "Info: imageTool cache contains an entry for WDT zip at $wdt_file_path_in_cache which does not exist. Removing from cache entry." + ${imagetoolBinDir}/imagetool.sh cache deleteEntry \ + --key wdt_${wdtCacheVersion} + fi + fi + + if [ -z "$skip_wdt_install" ]; then + echo @@ "Info: imageTool cache does not contain a valid entry for wdt_${wdtCacheVersion}. Installing WDT" + setup_wdt_shared_dir || return 1 + install_wdt || return 1 + ${imagetoolBinDir}/imagetool.sh cache addInstaller \ + --type wdt \ + --version $WDT_VERSION \ + --path $WDT_DIR/$WDT_INSTALL_ZIP_FILE || return 1 + else + echo @@ "Info: imageTool cache already contains entry ${listItems}. Skipping WDT installation." + fi + + cd $save_dir + + echo @@ "Info: Install succeeded, imagetool install is in the $WIT_DIR/imagetool directory." + return 0 +} + +function encrypt_model { + # + # run encryptModel.sh from WDT to encrypt model and properties files + # + local domainOutputDirFullPath=${1} # full path to directory where the model, encrypt file, and domain properties files are + local model_file=${2} # path to file containing encryption key relative to ${domainOutputDirFullPath} + local encrypt_key_file=${3} # path to file containing encryption key relative to ${domainOutputDirFullPath} + local domain_properties_file=${4} # path to domain properties file relative to ${domainOutputDirFullPath} + local oracle_home="$ORACLE_HOME" + + echo @@ "Info: encrypt passwords in the variables file at ${domainOutputDirFullPath}/${domain_properties_file} using encryption key from create-domain.sh argument written to file: ${encrypt_key_file}" + + cmd=" + cat /shared/${encrypt_key_file} /shared/${encrypt_key_file} | + /wdt/bin/encryptModel.sh \ + -oracle_home ${oracle_home} \ + -model_file /shared/${model_file} \ + -variable_file /shared/${domain_properties_file} + " + echo $cmd > ${domainOutputDirFullPath}/cmd.sh + chmod 755 ${domainOutputDirFullPath}/cmd.sh + echo @@ "Info: Encrypt Model: About to run the following command in container with image ${domainHomeImageBase}:" + cat ${domainOutputDirFullPath}/cmd.sh + + chmod 766 ${domainOutputDirFullPath}/${domain_properties_file} + docker run -it --rm -v ${domainOutputDirFullPath}:/shared -v ${WDT_DIR}/weblogic-deploy:/wdt ${domainHomeImageBase} /bin/bash -c /shared/cmd.sh || return 1 + + # clean up the generated files + rm ${domainOutputDirFullPath}/cmd.sh + + echo @@ "Info: encrypt_model Completed" +} + + diff --git a/OracleIdentityGovernance/kubernetes/create-kubernetes-secrets/create-azure-storage-credentials-secret.sh b/OracleIdentityGovernance/kubernetes/create-kubernetes-secrets/create-azure-storage-credentials-secret.sh new file mode 100755 index 000000000..8e6d3d947 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/create-kubernetes-secrets/create-azure-storage-credentials-secret.sh @@ -0,0 +1,92 @@ +#!/usr/bin/env bash +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# Description +# This sample script creates a Kubernetes secret for Azure Storage to use Azure file share on AKS. +# +# The following pre-requisites must be handled prior to running this script: +# * The kubernetes namespace must already be created +# + +script="${BASH_SOURCE[0]}" + +# +# Function to exit and print an error message +# $1 - text of message +function fail { + echo [ERROR] $* + exit 1 +} + +# Try to execute kubectl to see whether kubectl is available +function validateKubectlAvailable { + if ! [ -x "$(command -v kubectl)" ]; then + fail "kubectl is not installed" + fi +} + +function usage { + echo usage: ${script} -c storageAccountName -k storageAccountKey [-s secretName] [-n namespace] [-h] + echo " -a storage account name, must be specified." + echo " -k storage account key, must be specified." + echo " -s secret name, optional. Use azure-secret if not specified." + echo " -n namespace, optional. Use the default namespace if not specified." + echo " -h Help" + exit $1 +} + +# +# Parse the command line options +# +secretName=azure-secret +namespace=default +while getopts "ha:k:s:n:" opt; do + case $opt in + a) storageAccountName="${OPTARG}" + ;; + k) storageAccountKey="${OPTARG}" + ;; + s) secretName="${OPTARG}" + ;; + n) namespace="${OPTARG}" + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +if [ -z ${storageAccountName} ]; then + echo "${script}: -e must be specified." + missingRequiredOption="true" +fi + +if [ -z ${storageAccountKey} ]; then + echo "${script}: -p must be specified." + missingRequiredOption="true" +fi + +if [ "${missingRequiredOption}" == "true" ]; then + usage 1 +fi + +# check and see if the secret already exists +result=`kubectl get secret ${secretName} -n ${namespace} --ignore-not-found=true | grep ${secretName} | wc | awk ' { print $1; }'` +if [ "${result:=Error}" != "0" ]; then + fail "The secret ${secretName} already exists in namespace ${namespace}." +fi + +# create the secret +kubectl -n $namespace create secret generic $secretName \ + --from-literal=azurestorageaccountname=$storageAccountName \ + --from-literal=azurestorageaccountkey=$storageAccountKey + +# Verify the secret exists +SECRET=`kubectl get secret ${secretName} -n ${namespace} | grep ${secretName} | wc | awk ' { print $1; }'` +if [ "${SECRET}" != "1" ]; then + fail "The secret ${secretName} was not found in namespace ${namespace}" +fi + +echo "The secret ${secretName} has been successfully created in the ${namespace} namespace." diff --git a/OracleIdentityGovernance/kubernetes/create-kubernetes-secrets/create-docker-credentials-secret.sh b/OracleIdentityGovernance/kubernetes/create-kubernetes-secrets/create-docker-credentials-secret.sh new file mode 100755 index 000000000..48f113b93 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/create-kubernetes-secrets/create-docker-credentials-secret.sh @@ -0,0 +1,106 @@ +#!/usr/bin/env bash +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# Description +# This sample script creates a Kubernetes secret for container registry credentials for use with the WLS Operator on AKS. +# +# The following pre-requisites must be handled prior to running this script: +# * The kubernetes namespace must already be created +# + +script="${BASH_SOURCE[0]}" + +# +# Function to exit and print an error message +# $1 - text of message +function fail { + echo [ERROR] $* + exit 1 +} + +# Try to execute kubectl to see whether kubectl is available +function validateKubectlAvailable { + if ! [ -x "$(command -v kubectl)" ]; then + fail "kubectl is not installed" + fi +} + +function usage { + echo usage: ${script} -e email -p password -u username [-s secretName] [-d dockerServer] [-n namespace] [-h] + echo " -e email, must be specified." + echo " -p password, must be specified." + echo " -u username, must be specified." + echo " -s secret name, optional, Use regcred if not specified." + echo " -d docker server, optional, Use docker.io if not specified." + echo " -n namespace, optional. Use the default namespace if not specified" + echo " -h Help" + exit $1 +} + +# +# Parse the command line options +# +secretName=regcred +namespace=default +dockerServer=container-registry.oracle.com +while getopts "he:p:u:n:d:s:d:" opt; do + case $opt in + e) email="${OPTARG}" + ;; + p) password="${OPTARG}" + ;; + u) username="${OPTARG}" + ;; + s) secretName="${OPTARG}" + ;; + d) dockerServer="${OPTARG}" + ;; + n) namespace="${OPTARG}" + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +if [ -z ${email} ]; then + echo "${script}: -e must be specified." + missingRequiredOption="true" +fi + +if [ -z ${password} ]; then + echo "${script}: -p must be specified." + missingRequiredOption="true" +fi + +if [ -z ${username} ]; then + echo "${script}: -u must be specified." + missingRequiredOption="true" +fi + +if [ "${missingRequiredOption}" == "true" ]; then + usage 1 +fi + +# check and see if the secret already exists +result=`kubectl get secret ${secretName} -n ${namespace} --ignore-not-found=true | grep ${secretName} | wc | awk ' { print $1; }'` +if [ "${result:=Error}" != "0" ]; then + fail "The secret ${secretName} already exists in namespace ${namespace}." +fi + +# create the secret +kubectl -n $namespace create secret docker-registry $secretName \ + --docker-email=$email \ + --docker-password=$password \ + --docker-server=$dockerServer \ + --docker-username=$username + +# Verify the secret exists +SECRET=`kubectl get secret ${secretName} -n ${namespace} | grep ${secretName} | wc | awk ' { print $1; }'` +if [ "${SECRET}" != "1" ]; then + fail "The secret ${secretName} was not found in namespace ${namespace}" +fi + +echo "The secret ${secretName} has been successfully created in the ${namespace} namespace." diff --git a/OracleIdentityGovernance/kubernetes/3.0.1/create-oim-domain/domain-home-on-pv/common/create-domain-job.sh b/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/common/create-domain-job.sh similarity index 100% rename from OracleIdentityGovernance/kubernetes/3.0.1/create-oim-domain/domain-home-on-pv/common/create-domain-job.sh rename to OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/common/create-domain-job.sh diff --git a/OracleIdentityGovernance/kubernetes/3.0.1/create-oim-domain/domain-home-on-pv/common/createFMWDomain.py b/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/common/createFMWDomain.py similarity index 100% rename from OracleIdentityGovernance/kubernetes/3.0.1/create-oim-domain/domain-home-on-pv/common/createFMWDomain.py rename to OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/common/createFMWDomain.py diff --git a/OracleIdentityGovernance/kubernetes/3.0.1/create-oim-domain/domain-home-on-pv/common/utility.sh b/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/common/utility.sh similarity index 100% rename from OracleIdentityGovernance/kubernetes/3.0.1/create-oim-domain/domain-home-on-pv/common/utility.sh rename to OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/common/utility.sh diff --git a/OracleIdentityGovernance/kubernetes/3.0.1/create-oim-domain/domain-home-on-pv/create-domain-inputs.yaml b/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/create-domain-inputs.yaml similarity index 100% rename from OracleIdentityGovernance/kubernetes/3.0.1/create-oim-domain/domain-home-on-pv/create-domain-inputs.yaml rename to OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/create-domain-inputs.yaml diff --git a/OracleIdentityGovernance/kubernetes/3.0.1/create-oim-domain/domain-home-on-pv/create-domain-job-template.yaml b/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/create-domain-job-template.yaml similarity index 100% rename from OracleIdentityGovernance/kubernetes/3.0.1/create-oim-domain/domain-home-on-pv/create-domain-job-template.yaml rename to OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/create-domain-job-template.yaml diff --git a/OracleIdentityGovernance/kubernetes/3.0.1/create-oim-domain/domain-home-on-pv/create-domain.sh b/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/create-domain.sh similarity index 97% rename from OracleIdentityGovernance/kubernetes/3.0.1/create-oim-domain/domain-home-on-pv/create-domain.sh rename to OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/create-domain.sh index e531d7a5b..a5864ca10 100755 --- a/OracleIdentityGovernance/kubernetes/3.0.1/create-oim-domain/domain-home-on-pv/create-domain.sh +++ b/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/create-domain.sh @@ -30,6 +30,7 @@ function usage { echo " -o Output directory for the generated yaml files, must be specified." echo " -e Also create the resources in the generated yaml files, optional." echo " -v Validate the existence of persistentVolumeClaim, optional." + echo " -t Timeout (in seconds) for create domain job execution, optional." echo " -h Help" exit $1 } @@ -38,8 +39,9 @@ function usage { # Parse the command line options # doValidation=false +timeout=1200 executeIt=false -while getopts "evhi:o:" opt; do +while getopts "evhi:o:t" opt; do case $opt in i) valuesInputFile="${OPTARG}" ;; @@ -49,6 +51,8 @@ while getopts "evhi:o:" opt; do ;; e) executeIt=true ;; + t) timeout="${OPTARG}" + ;; h) usage 0 ;; *) usage 1 @@ -70,6 +74,10 @@ if [ "${missingRequiredOption}" == "true" ]; then usage 1 fi +if [ -z ${timeout} ]; then + timeout=1200 +fi + # # Function to initialize and validate the output directory # for the generated yaml files for this domain. @@ -195,7 +203,7 @@ function createDomainHome { echo "Waiting for the job to complete..." JOB_STATUS="0" - max=40 + max=`expr ${timeout} / 30` count=0 while [ "$JOB_STATUS" != "Completed" -a $count -lt $max ] ; do sleep 30 diff --git a/OracleIdentityGovernance/kubernetes/3.0.1/create-oim-domain/domain-home-on-pv/delete-domain-job-template.yaml b/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/delete-domain-job-template.yaml similarity index 100% rename from OracleIdentityGovernance/kubernetes/3.0.1/create-oim-domain/domain-home-on-pv/delete-domain-job-template.yaml rename to OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/delete-domain-job-template.yaml diff --git a/OracleIdentityGovernance/kubernetes/3.0.1/create-oim-domain/domain-home-on-pv/get-ingress-ip.sh b/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/get-ingress-ip.sh similarity index 100% rename from OracleIdentityGovernance/kubernetes/3.0.1/create-oim-domain/domain-home-on-pv/get-ingress-ip.sh rename to OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/get-ingress-ip.sh diff --git a/OracleIdentityGovernance/kubernetes/3.0.1/create-oim-domain/domain-home-on-pv/wait_for_soa.sh b/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/wait_for_soa.sh similarity index 100% rename from OracleIdentityGovernance/kubernetes/3.0.1/create-oim-domain/domain-home-on-pv/wait_for_soa.sh rename to OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/wait_for_soa.sh diff --git a/OracleIdentityGovernance/kubernetes/3.0.1/create-oim-domain/domain-home-on-pv/wlst/create-domain-script.sh b/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/wlst/create-domain-script.sh old mode 100644 new mode 100755 similarity index 99% rename from OracleIdentityGovernance/kubernetes/3.0.1/create-oim-domain/domain-home-on-pv/wlst/create-domain-script.sh rename to OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/wlst/create-domain-script.sh index ac913a760..27e2d8aa8 --- a/OracleIdentityGovernance/kubernetes/3.0.1/create-oim-domain/domain-home-on-pv/wlst/create-domain-script.sh +++ b/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/wlst/create-domain-script.sh @@ -74,19 +74,15 @@ sed -i 's/<\/ssl>/<\/ssl-->/g' $DOMAIN_HOME/config/config.xml sed -i "s/oimk8namespace/$domainName/g" $DOMAIN_HOME/config/config.xml sed -i "s/applications\/$domainName\/em.ear/domains\/applications\/$domainName\/em.ear/g" $DOMAIN_HOME/config/config.xml - if [ ! -f /u01/oracle/idm/server/ConnectorDefaultDirectory/ConnectorConfigTemplate.xml ] && [ -d /u01/oracle/idm/server/ConnectorDefaultDirectory_orig ]; then cp /u01/oracle/idm/server/ConnectorDefaultDirectory_orig/ConnectorConfigTemplate.xml /u01/oracle/idm/server/ConnectorDefaultDirectory fi - if [ ! -f /u01/oracle/idm/server/ConnectorDefaultDirectory/ConnectorSchema.xsd ] && [ -d /u01/oracle/idm/server/ConnectorDefaultDirectory_orig ]; then cp /u01/oracle/idm/server/ConnectorDefaultDirectory_orig/ConnectorSchema.xsd /u01/oracle/idm/server/ConnectorDefaultDirectory fi - if [ ! -f /u01/oracle/idm/server/ConnectorDefaultDirectory/readme.txt ] && [ -d /u01/oracle/idm/server/ConnectorDefaultDirectory_orig ]; then cp /u01/oracle/idm/server/ConnectorDefaultDirectory_orig/readme.txt /u01/oracle/idm/server/ConnectorDefaultDirectory fi - if [ ! -d /u01/oracle/idm/server/ConnectorDefaultDirectory/targetsystems-lib ] && [ -d /u01/oracle/idm/server/ConnectorDefaultDirectory_orig ]; then cp -rf /u01/oracle/idm/server/ConnectorDefaultDirectory_orig/targetsystems-lib /u01/oracle/idm/server/ConnectorDefaultDirectory fi diff --git a/OracleIdentityGovernance/kubernetes/create-oracle-db-service/README.md b/OracleIdentityGovernance/kubernetes/create-oracle-db-service/README.md new file mode 100755 index 000000000..a89dad0c9 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/create-oracle-db-service/README.md @@ -0,0 +1,76 @@ +# Managing Oracle Database Service for OracleIdentityGovernance + +The sample scripts in this directory demonstrate how to: +* Start an Oracle Database (DB) service in a Kubernetes cluster. +* Stop an Oracle DB service in a Kubernetes cluster. + +## Start an Oracle Database service in a Kubernetes cluster + +Use this script to create an Oracle Database service in a Kubernetes Namespace with the default credentials, in the Oracle Database Slim image. + +The script assumes that either the image, `container-registry.oracle.com/database/enterprise:12.2.0.1-slim`, is available in the Docker repository, or an `ImagePullSecret` is created for `container-registry.oracle.com`. To create a secret for accessing `container-registry.oracle.com`, see the script `create-image-pull-secret.sh`. + +``` + +$ ./start-db-service.sh -h +usage: ./start-db-service.sh -p -i -s -n [-h] + -i Oracle DB Image (optional) + (default: container-registry.oracle.com/database/enterprise:12.2.0.1-slim) + -p DB Service NodePort (optional) + (default: 30011, set to 'none' to deploy service without a NodePort) + -s DB Image PullSecret (optional) + (default: docker-store) + -n Configurable Kubernetes NameSpace for Oracle DB Service (optional)" + (default: default) + -h Help + +$ ./start-db-service.sh +NodePort[30011] ImagePullSecret[docker-store] Image[container-registry.oracle.com/database/enterprise:12.2.0.1-slim] +deployment.extensions/oracle-db created +service/oracle-db created +[oracle-db-54667dfd5f-76sxf] already initialized .. +Checking Pod READY column for State [1/1] +Pod [oracle-db-54667dfd5f-76sxf] Status is Ready Iter [1/60] +NAME READY STATUS RESTARTS AGE +oracle-db-54667dfd5f-76sxf 1/1 Running 0 8s +NAME READY STATUS RESTARTS AGE +oracle-db-54667dfd5f-76sxf 1/1 Running 0 8s +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +kubernetes ClusterIP 10.96.0.1 443/TCP 27d +oracle-db NodePort 10.99.58.137 1521:30011/TCP 9s +Oracle DB service is RUNNING with NodePort [30011] + +``` + +For creating a OracleIdentityGovernance domain, you can use the database connection string, `oracle-db.default.svc.cluster.local:1521/devpdb.k8s`,as `rcuDatabaseURL` parameter in the `domain.input.yaml` file. + +Note: oracle-db.default.svc.cluster.local:1521/devpdb.k8s can be used as rcuDatabaseURL if the Oracle DB Service is started in `default` NameSpace. For custom NameSpace the URL need to be modified accrodingly e.g. oracle-db.[namespace].svc.cluster.local:1521/devpdb.k8s + +You can access the database through the NodePort outside of the Kubernetes cluster, using the URL `:30011/devpdb.k8s`. + +**Note**: To create a OracleIdentityGovernance domain image, the domain-in-image model needs a public database URL as an `rcuDatabaseURL` parameter. + +## Stop an Oracle Database service in a Kubernetes cluster + +Use this script to stop the Oracle Database service you created using the `start-db-service.sh` script. + +``` +$ ./stop-db-service.sh -h +usage: stop-db-service.sh -n namespace [-h] + -n Kubernetes NameSpace for Oracle DB Service to be Stopped (optional) + (default: default) + -h Help + +Note: Here the NameSpace refers to the NameSpace used in start-db-service.sh + +$ ./stop-db-service.sh +deployment.extensions "oracle-db" deleted +service "oracle-db" deleted +Checking Status for Pod [oracle-db-756f9b99fd-gvv46] in namesapce [default] +Pod [oracle-db-756f9b99fd-gvv46] Status [Terminating] +Pod [oracle-db-756f9b99fd-gvv46] Status [Terminating] +Pod [oracle-db-756f9b99fd-gvv46] Status [Terminating] +Error from server (NotFound): pods "oracle-db-756f9b99fd-gvv46" not found +Pod [oracle-db-756f9b99fd-gvv46] removed from nameSpace [default] +``` + diff --git a/OracleIdentityGovernance/kubernetes/create-oracle-db-service/common/checkDbState.sh b/OracleIdentityGovernance/kubernetes/create-oracle-db-service/common/checkDbState.sh new file mode 100755 index 000000000..9ce5aa3d3 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/create-oracle-db-service/common/checkDbState.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +logfile="/home/oracle/setup/log/setupDB.log" +max=30 +counter=0 +while [ $counter -le ${max} ] +do + grep "Done ! The database is ready for use ." $logfile + [[ $? == 0 ]] && break; + ((counter++)) + echo "[$counter/${max}] Retrying for Oracle Database Availability..." + sleep 10 +done + +if [ $counter -gt ${max} ]; then + echo "[ERRORR] Oracle DB Service is not ready after [${max}] iterations ..." + exit -1 +fi + diff --git a/OracleIdentityGovernance/kubernetes/create-oracle-db-service/common/oracle.db.yaml b/OracleIdentityGovernance/kubernetes/create-oracle-db-service/common/oracle.db.yaml new file mode 100755 index 000000000..4185471f3 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/create-oracle-db-service/common/oracle.db.yaml @@ -0,0 +1,78 @@ +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +apiVersion: v1 +kind: Service +metadata: + name: oracle-db + namespace: default +spec: + ports: + - name: tns + port: 1521 + protocol: TCP + targetPort: 1521 + nodePort: 30011 + selector: + app.kubernetes.io/instance: dev + app.kubernetes.io/name: oracle-db + sessionAffinity: None + type: NodePort +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: oracle-db + namespace: default +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/instance: dev + app.kubernetes.io/name: oracle-db + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/instance: dev + app.kubernetes.io/name: oracle-db + spec: + containers: + - env: + - name: DB_SID + value: devcdb + - name: DB_PDB + value: devpdb + - name: DB_DOMAIN + value: k8s + - name: DB_BUNDLE + value: basic + image: container-registry.oracle.com/database/enterprise:12.2.0.1-slim + imagePullPolicy: IfNotPresent + name: oracle-db + ports: + - containerPort: 1521 + name: tns + protocol: TCP + resources: + limits: + cpu: "2" + memory: "6Gi" + ephemeral-storage: "8Gi" + requests: + cpu: 500m + ephemeral-storage: "6Gi" + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: 30 + imagePullSecrets: + - name: docker-store + diff --git a/OracleIdentityGovernance/kubernetes/create-oracle-db-service/create-image-pull-secret.sh b/OracleIdentityGovernance/kubernetes/create-oracle-db-service/create-image-pull-secret.sh new file mode 100755 index 000000000..b6b2757f3 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/create-oracle-db-service/create-image-pull-secret.sh @@ -0,0 +1,60 @@ +#!/bin/bash +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# Create ImagePullSecret to pull Oracle DB and OracleIdentityGovernance Image + +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" + +function usage { + echo "usage: ${script} -u -p -e -s [-h]" + echo " -u Oracle Container Registry User Name (needed)" + echo " -p Oracle Container Registry Password (needed)" + echo " -e email (needed)" + echo " -s Generated Secret (optional) " + echo " (default: docker-store) " + echo " -h Help" + exit $1 +} + +while getopts ":u:p:s:e:" opt; do + case $opt in + u) username="${OPTARG}" + ;; + p) password="${OPTARG}" + ;; + e) email="${OPTARG}" + ;; + s) secert="${OPTARG}" + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +if [ -z ${username} ]; then + echo "${script}: -u must be specified." + usage 1 +fi + +if [ -z ${password} ]; then + echo "${script}: -p must be specified." + usage 1 +fi + +if [ -e ${email} ]; then + echo "${script}: -p must be specified." + usage 1 +fi + +if [ -z ${secret} ]; then + secret="docker-store" +fi + +kubectl delete secret/${secret} --ignore-not-found +echo "Creating ImagePullSecret on container-registry.oracle.com" +kubectl create secret docker-registry ${secret} --docker-server=container-registry.oracle.com --docker-username=${username} --docker-password=${password} --docker-email=${email} + diff --git a/OracleIdentityGovernance/kubernetes/create-oracle-db-service/start-db-service.sh b/OracleIdentityGovernance/kubernetes/create-oracle-db-service/start-db-service.sh new file mode 100755 index 000000000..9a522d4eb --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/create-oracle-db-service/start-db-service.sh @@ -0,0 +1,113 @@ +#!/bin/bash +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# Bring up Oracle DB Instance in [default] NameSpace with a NodePort Service + +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" +source ${scriptDir}/../common/utility.sh + +function usage { + echo "usage: ${script} -p -i -s -n [-h]" + echo " -i Oracle DB Image (optional)" + echo " (default: container-registry.oracle.com/database/enterprise:12.2.0.1-slim)" + echo " -p DB Service NodePort (optional)" + echo " (default: 30011, set to 'none' to deploy service without a NodePort)" + echo " -s DB Image PullSecret (optional)" + echo " (default: docker-store) " + echo " -n Configurable Kubernetes NameSpace for Oracle DB Service (optional)" + echo " (default: default) " + echo " -h Help" + exit $1 +} + +while getopts ":h:p:s:i:n:" opt; do + case $opt in + p) nodeport="${OPTARG}" + ;; + s) pullsecret="${OPTARG}" + ;; + i) dbimage="${OPTARG}" + ;; + n) namespace="${OPTARG}" + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +if [ -z ${nodeport} ]; then + nodeport=30011 +fi + +if [ -z ${pullsecret} ]; then + pullsecret="docker-store" +fi + +if [ -z ${namespace} ]; then + namespace="default" +fi + +echo "Checking Status for NameSpace [$namespace]" +domns=`kubectl get ns ${namespace} | grep ${namespace} | awk '{print $1}'` +if [ -z ${domns} ]; then + echo "Adding NameSpace[$namespace] to Kubernetes Cluster" + kubectl create namespace ${namespace} + sleep 5 +else + echo "Skipping the NameSpace[$namespace] Creation ..." +fi + +if [ -z ${dbimage} ]; then + dbimage="container-registry.oracle.com/database/enterprise:12.2.0.1-slim" +fi + +echo "NodePort[$nodeport] ImagePullSecret[$pullsecret] Image[${dbimage}] NameSpace[${namespace}]" + +# Modify ImagePullSecret and DatabaseImage based on input +sed -i -e '$d' ${scriptDir}/common/oracle.db.yaml +echo ' - name: docker-store' >> ${scriptDir}/common/oracle.db.yaml +sed -i -e "s?name: docker-store?name: ${pullsecret}?g" ${scriptDir}/common/oracle.db.yaml +sed -i -e "s?image:.*?image: ${dbimage}?g" ${scriptDir}/common/oracle.db.yaml +sed -i -e "s?namespace:.*?namespace: ${namespace}?g" ${scriptDir}/common/oracle.db.yaml + +# Modify the NodePort based on input +if [ "${nodeport}" = "none" ]; then + sed -i -e "s? nodePort:? #nodePort:?g" ${scriptDir}/common/oracle.db.yaml + sed -i -e "s? type:.*NodePort? #type: NodePort?g" ${scriptDir}/common/oracle.db.yaml +else + sed -i -e "s?[#]*nodePort:.*?nodePort: ${nodeport}?g" ${scriptDir}/common/oracle.db.yaml + sed -i -e "s?[#]*type:.*NodePort?type: NodePort?g" ${scriptDir}/common/oracle.db.yaml # default type is ClusterIP +fi + +kubectl delete service oracle-db -n ${namespace} --ignore-not-found +kubectl apply -f ${scriptDir}/common/oracle.db.yaml + +dbpod=`kubectl get po -n ${namespace} | grep oracle-db | cut -f1 -d " " ` + +checkPod ${dbpod} ${namespace} +checkPodState ${dbpod} ${namespace} "1/1" +checkService oracle-db ${namespace} + +kubectl get po -n ${namespace} +kubectl get service -n ${namespace} + +kubectl cp ${scriptDir}/common/checkDbState.sh -n ${namespace} ${dbpod}:/home/oracle/ +kubectl exec -it ${dbpod} -n ${namespace} /bin/bash /home/oracle/checkDbState.sh +if [ $? != 0 ]; then + echo "######################"; + echo "[ERROR] Could not create Oracle DB Service, check the pod log for pod ${dbpod} in namespace ${namespace}"; + echo "######################"; + exit -3; +fi + +if [ ! "${nodeport}" = "none" ]; then + echo "Oracle DB Service is RUNNING with NodePort [${nodeport}]" +else + echo "Oracle DB Service is RUNNING and does not specify a public NodePort" +fi +echo "Oracle DB Service URL [oracle-db.${namespace}.svc.cluster.local:1521/devpdb.k8s]" + diff --git a/OracleIdentityGovernance/kubernetes/create-oracle-db-service/stop-db-service.sh b/OracleIdentityGovernance/kubernetes/create-oracle-db-service/stop-db-service.sh new file mode 100755 index 000000000..7ab14928c --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/create-oracle-db-service/stop-db-service.sh @@ -0,0 +1,45 @@ +#!/bin/bash +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# Drop the DB Service created by start-db-service.sh + +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" +source ${scriptDir}/../common/utility.sh + +function usage { + echo "usage: ${script} -n namespace [-h]" + echo " -n Kubernetes NameSpace for Oracle DB Service to be Stopped (optional)" + echo " (default: default) " + echo " -h Help" + exit $1 +} + +while getopts ":h:n:" opt; do + case $opt in + n) namespace="${OPTARG}" + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + + +if [ -z ${namespace} ]; then + namespace=default +fi + + +dbpod=`kubectl get po -n ${namespace} | grep oracle-db | cut -f1 -d " " ` +kubectl delete -f ${scriptDir}/common/oracle.db.yaml --ignore-not-found + +if [ -z ${dbpod} ]; then + echo "Couldn't find oracle-db pod in [${namespace}] namesapce" +else + checkPodDelete ${dbpod} ${namespace} + kubectl delete svc/oracle-db -n ${namespace} --ignore-not-found +fi + diff --git a/OracleIdentityGovernance/kubernetes/create-rcu-credentials/README.md b/OracleIdentityGovernance/kubernetes/create-rcu-credentials/README.md new file mode 100755 index 000000000..ed5cd2666 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/create-rcu-credentials/README.md @@ -0,0 +1,56 @@ +# Creating RCU credentials for a OracleIdentityGovernance domain + +This sample demonstrates how to create a Kubernetes secret containing the +RCU credentials for a OracleIdentityGovernance domain. The operator expects this secret to be +named following the pattern `domainUID-rcu-credentials`, where `domainUID` +is the unique identifier of the domain. It must be in the same namespace +that the domain will run in. + +To use the sample, run the command: + +``` +$ ./create-rcu-credentials.sh \ + -u username \ + -p password \ + -a sys_username \ + -q sys_password \ + -d domainUID \ + -n namespace \ + -s secretName +``` + +The parameters are as follows: + +``` + -u username for schema owner (regular user), must be specified. + -p password for schema owner (regular user), must be specified. + -a username for SYSDBA user, must be specified. + -q password for SYSDBA user, must be specified. + -d domainUID, optional. The default value is oimcluster. If specified, the secret will be labeled with the domainUID unless the given value is an empty string. + -n namespace, optional. Use the oimcluster namespace if not specified. + -s secretName, optional. If not specified, the secret name will be determined based on the domainUID value. +``` + +This creates a `generic` secret containing the user name and password as literal values. + +You can check the secret with the `kubectl describe secret` command. An example is shown below, +including the output: + +``` +$ kubectl -n oimcluster describe secret oimcluster-rcu-credentials -o yaml +Name: oimcluster-rcu-credentials +Namespace: oimcluster +Labels: weblogic.domainName=oimcluster + weblogic.domainUID=oimcluster +Annotations: + +Type: Opaque + +Data +==== +password: 12 bytes +sys_password: 12 bytes +sys_username: 3 bytes +username: 4 bytes +``` + diff --git a/OracleIdentityGovernance/kubernetes/create-rcu-credentials/create-rcu-credentials.sh b/OracleIdentityGovernance/kubernetes/create-rcu-credentials/create-rcu-credentials.sh new file mode 100755 index 000000000..0adcbc8cd --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/create-rcu-credentials/create-rcu-credentials.sh @@ -0,0 +1,139 @@ +#!/usr/bin/env bash +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# Description +# This sample script creates a Kubernetes secret for RCU credentials. +# +# The following pre-requisites must be handled prior to running this script: +# * The kubernetes namespace must already be created +# +# Secret name determination +# 1) secretName - if specified +# 2) oimcluster-rcu-credentials - if secretName and domainUID are both not specified. This is the default out-of-the-box. +# 3) -rcu-credentials - if secretName is not specified, and domainUID is specified. +# 4) rcu-credentials - if secretName is not specified, and domainUID is specified as "". +# +# The generated secret will be labeled with +# weblogic.domainUID=$domainUID +# and +# weblogic.domainName=$domainUID +# Where the $domainUID is the value of the -d command line option, unless the value supplied is an empty String "" +# + +script="${BASH_SOURCE[0]}" + +# +# Function to exit and print an error message +# $1 - text of message +function fail { + echo [ERROR] $* + exit 1 +} + +# Try to execute kubectl to see whether kubectl is available +function validateKubectlAvailable { + if ! [ -x "$(command -v kubectl)" ]; then + fail "kubectl is not installed" + fi +} + +function usage { + echo usage: ${script} -u username -p password -a sysuser -q syspassword [-d domainUID] [-n namespace] [-s secretName] [-h] + echo " -u username for schema owner (regular user), must be specified." + echo " -p password for schema owner (regular user), must be specified." + echo " -a username for SYSDBA user, must be specified." + echo " -q password for SYSDBA user, must be specified." + echo " -d domainUID, optional. The default value is oimcluster. If specified, the secret will be labeled with the domainUID unless the given value is an empty string." + echo " -n namespace, optional. Use the oimcluster namespace if not specified" + echo " -s secretName, optional. If not specified, the secret name will be determined based on the domainUID value" + echo " -h Help" + exit $1 +} + +# +# Parse the command line options +# +domainUID=oimcluster +namespace=oimcluster +while getopts "hu:p:n:d:s:q:a:" opt; do + case $opt in + u) username="${OPTARG}" + ;; + p) password="${OPTARG}" + ;; + a) sys_username="${OPTARG}" + ;; + q) sys_password="${OPTARG}" + ;; + n) namespace="${OPTARG}" + ;; + d) domainUID="${OPTARG}" + ;; + s) secretName="${OPTARG}" + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +if [ -z $secretName ]; then + if [ -z $domainUID ]; then + secretName=rcu-credentials + else + secretName=$domainUID-rcu-credentials + fi +fi + +if [ -z ${username} ]; then + echo "${script}: -u must be specified." + missingRequiredOption="true" +fi + +if [ -z ${password} ]; then + echo "${script}: -p must be specified." + missingRequiredOption="true" +fi + +if [ -z ${sys_username} ]; then + echo "${script}: -s must be specified." + missingRequiredOption="true" +fi + +if [ -z ${sys_password} ]; then + echo "${script}: -q must be specified." + missingRequiredOption="true" +fi + +if [ "${missingRequiredOption}" == "true" ]; then + usage 1 +fi + +# check and see if the secret already exists +result=`kubectl get secret ${secretName} -n ${namespace} --ignore-not-found=true | grep ${secretName} | wc | awk ' { print $1; }'` +if [ "${result:=Error}" != "0" ]; then + fail "The secret ${secretName} already exists in namespace ${namespace}." +fi + +# create the secret +kubectl -n $namespace create secret generic $secretName \ + --from-literal=username=$username \ + --from-literal=password=$password \ + --from-literal=sys_username=$sys_username \ + --from-literal=sys_password=$sys_password + +# label the secret with domainUID if needed +if [ ! -z $domainUID ]; then + kubectl label secret ${secretName} -n $namespace weblogic.domainUID=$domainUID weblogic.domainName=$domainUID +fi + +# Verify the secret exists +SECRET=`kubectl get secret ${secretName} -n ${namespace} | grep ${secretName} | wc | awk ' { print $1; }'` +if [ "${SECRET}" != "1" ]; then + fail "The secret ${secretName} was not found in namespace ${namespace}" +fi + +echo "The secret ${secretName} has been successfully created in the ${namespace} namespace." + diff --git a/OracleIdentityGovernance/kubernetes/create-rcu-schema/README.md b/OracleIdentityGovernance/kubernetes/create-rcu-schema/README.md new file mode 100755 index 000000000..0d5eee26e --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/create-rcu-schema/README.md @@ -0,0 +1,219 @@ +# Managing RCU schema for a OracleIdentityGovernance domain + +The sample scripts in this directory demonstrate how to: +* Create an RCU schema in the Oracle DB that will be used by a OracleIdentityGovernance domain. +* Delete the RCU schema in the Oracle DB used by a OracleIdentityGovernance domain. + +## Start an Oracle Database service in a Kubernetes cluster + +Use the script ``samples/scripts/create-oracle-db-service/start-db-service.sh`` + +For creating a OracleIdentityGovernance domain, you can use the Database connection string, `oracle-db.default.svc.cluster.local:1521/devpdb.k8s`, as an `rcuDatabaseURL` parameter in the `domain.input.yaml` file. + +You can access the Database through the NodePort outside of the Kubernetes cluster, using the URL `:30011/devpdb.k8s`. + +**Note**: To create a OracleIdentityGovernance domain image, the domain-in-image model needs a public Database URL as an `rcuDatabaseURL` parameter. + + +## Create the RCU schema in the Oracle Database + +This script generates the RCU schema based `schemaPrefix` and `dburl`. + +The script assumes that either the image, `oracle/oig:12.2.1.4.0`, is available in the nodes or an `ImagePullSecret` is created to pull the image. To create a secret, see the script `create-image-pull-secret.sh`. + +``` +$ ./create-rcu-schema.sh -h +usage: ./create-rcu-schema.sh -s -t -d -i -u -p -n -q -r -o -c [-h] + -s RCU Schema Prefix (required) + -t RCU Schema Type (optional) + (supported values: oim) + -d RCU Oracle Database URL (optional) + (default: oracle-db.default.svc.cluster.local:1521/devpdb.k8s) + -p OracleIdentityGovernance ImagePullSecret (optional) + (default: none) + -i OracleIdentityGovernance Image (optional) + (default: oracle/oig:12.2.1.4.0) + -u OracleIdentityGovernance ImagePullPolicy (optional) + (default: IfNotPresent) + -n Namespace for RCU pod (optional) + (default: default) + -q password for database SYSDBA user. (optional) + (default: Oradoc_db1) + -r password for all schema owner (regular user). (optional) + (default: Oradoc_db1) + -o Output directory for the generated YAML file. (optional) + (default: rcuoutput) + -c Comma-separated variables in the format variablename=value. (optional). + (default: none) + -h Help + +$ ./create-rcu-schema.sh -s domain1 +ImagePullSecret[none] Image[oracle/oig:12.2.1.4.0] dburl[oracle-db.default.svc.cluster.local:1521/devpdb.k8s] rcuType[fmw] customVariables[none] +pod/rcu created +[rcu] already initialized .. +Checking Pod READY column for State [1/1] +Pod [rcu] Status is Ready Iter [1/60] +NAME READY STATUS RESTARTS AGE +rcu 1/1 Running 0 6s +NAME READY STATUS RESTARTS AGE +rcu 1/1 Running 0 11s +CLASSPATH=/u01/jdk/lib/tools.jar:/u01/oracle/wlserver/modules/features/wlst.wls.classpath.jar: + +PATH=/u01/oracle/wlserver/server/bin:/u01/oracle/wlserver/../oracle_common/modules/thirdparty/org.apache.ant/1.10.5.0.0/apache-ant-1.10.5/bin:/u01/jdk/jre/bin:/u01/jdk/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/u01/jdk/bin:/u01/oracle/oracle_common/common/bin:/u01/oracle/wlserver/common/bin:/u01/oracle:/u01/oracle/wlserver/../oracle_common/modules/org.apache.maven_3.2.5/bin + +Your environment has been set. +Check if the DB Service is ready to accept request +DB Connection String [oracle-db.default.svc.cluster.local:1521/devpdb.k8s], schemaPrefix [oimcluster] rcuType [fmw] + +**** Success!!! **** + +You can connect to the database in your app using: + + java.util.Properties props = new java.util.Properties(); + props.put("user", "sys as sysdba"); + props.put("password", "Oradoc_db1"); + java.sql.Driver d = + Class.forName("oracle.jdbc.OracleDriver").newInstance(); + java.sql.Connection conn = + Driver.connect("sys as sysdba", props); +Creating RCU Schema for OracleIdentityGovernance Domain ... +Extra RCU Schema Component Choosen[] + +Processing command line .... + +Repository Creation Utility - Checking Prerequisites +Checking Component Prerequisites +Repository Creation Utility - Creating Tablespaces +Validating and Creating Tablespaces +Create tablespaces in the repository database +Repository Creation Utility - Create +Repository Create in progress. +Executing pre create operations + Percent Complete: 20 + Percent Complete: 20 + ..... + Percent Complete: 96 + Percent Complete: 100 + ..... +Executing post create operations + +Repository Creation Utility: Create - Completion Summary + +Database details: +----------------------------- +Host Name : oracle-db.default.svc.cluster.local +Port : 1521 +Service Name : DEVPDB.K8S +Connected As : sys +Prefix for (prefixable) Schema Owners : DOMAIN1 +RCU Logfile : /tmp/RCU2020-05-01_14-35_1160633335/logs/rcu.log + +Component schemas created: +----------------------------- +Component Status Logfile + +Common Infrastructure Services Success /tmp/RCU2020-05-01_14-35_1160633335/logs/stb.log +Oracle Platform Security Services Success /tmp/RCU2020-05-01_14-35_1160633335/logs/opss.log +Audit Services Success /tmp/RCU2020-05-01_14-35_1160633335/logs/iau.log +Audit Services Append Success /tmp/RCU2020-05-01_14-35_1160633335/logs/iau_append.log +Audit Services Viewer Success /tmp/RCU2020-05-01_14-35_1160633335/logs/iau_viewer.log +Metadata Services Success /tmp/RCU2020-05-01_14-35_1160633335/logs/mds.log +WebLogic Services Success /tmp/RCU2020-05-01_14-35_1160633335/logs/wls.log + +Repository Creation Utility - Create : Operation Completed +[INFO] Modify the domain.input.yaml to use [oracle-db.default.svc.cluster.local:1521/devpdb.k8s] as rcuDatabaseURL and [domain1] as rcuSchemaPrefix +``` + +## Drop the RCU schema from the Oracle Database + +Use this script to drop the RCU schema based `schemaPrefix` and `dburl`. + +``` +$ ./drop-rcu-schema.sh -h +usage: ./drop-rcu-schema.sh -s -d -n -q -r [-h] + -s RCU Schema Prefix (required) + -t RCU Schema Type (optional) + (supported values: oim) + -d Oracle Database URL (optional) + (default: oracle-db.default.svc.cluster.local:1521/devpdb.k8s) + -n Namespace where RCU pod is deployed (optional) + (default: default) + -q password for database SYSDBA user. (optional) + (default: Oradoc_db1) + -r password for all schema owner (regular user). (optional) + (default: Oradoc_db1) + -c Comma-separated variables in the format variablename=value. (optional). + (default: none) + -h Help + +$ ./drop-rcu-schema.sh -s domain1 +CLASSPATH=/u01/jdk/lib/tools.jar:/u01/oracle/wlserver/modules/features/wlst.wls.classpath.jar: + +PATH=/u01/oracle/wlserver/server/bin:/u01/oracle/wlserver/../oracle_common/modules/thirdparty/org.apache.ant/1.10.5.0.0/apache-ant-1.10.5/bin:/u01/jdk/jre/bin:/u01/jdk/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/u01/jdk/bin:/u01/oracle/oracle_common/common/bin:/u01/oracle/wlserver/common/bin:/u01/oracle:/u01/oracle/wlserver/../oracle_common/modules/org.apache.maven_3.2.5/bin + +Your environment has been set. +Check if the DB Service is ready to accept request +DB Connection String [oracle-db.default.svc.cluster.local:1521/devpdb.k8s] schemaPrefix [domain1] rcuType[fmw] + +**** Success!!! **** + +You can connect to the database in your app using: + + java.util.Properties props = new java.util.Properties(); + props.put("user", "sys as sysdba"); + props.put("password", "Oradoc_db1"); + java.sql.Driver d = + Class.forName("oracle.jdbc.OracleDriver").newInstance(); + java.sql.Connection conn = + Driver.connect("sys as sysdba", props); +Dropping RCU Schema for OracleIdentityGovernance Domain ... +Extra RCU Schema Component(s) Choosen[] + +Processing command line .... +Repository Creation Utility - Checking Prerequisites +Checking Global Prerequisites +Repository Creation Utility - Checking Prerequisites +Checking Component Prerequisites +Repository Creation Utility - Drop +Repository Drop in progress. + Percent Complete: 2 + Percent Complete: 14 + ..... + Percent Complete: 99 + Percent Complete: 100 + ..... + +Repository Creation Utility: Drop - Completion Summary + +Database details: +----------------------------- +Host Name : oracle-db.default.svc.cluster.local +Port : 1521 +Service Name : DEVPDB.K8S +Connected As : sys +Prefix for (prefixable) Schema Owners : DOMAIN1 +RCU Logfile : /tmp/RCU2020-05-01_14-42_651700358/logs/rcu.log + +Component schemas dropped: +----------------------------- +Component Status Logfile + +Common Infrastructure Services Success /tmp/RCU2020-05-01_14-42_651700358/logs/stb.log +Oracle Platform Security Services Success /tmp/RCU2020-05-01_14-42_651700358/logs/opss.log +Audit Services Success /tmp/RCU2020-05-01_14-42_651700358/logs/iau.log +Audit Services Append Success /tmp/RCU2020-05-01_14-42_651700358/logs/iau_append.log +Audit Services Viewer Success /tmp/RCU2020-05-01_14-42_651700358/logs/iau_viewer.log +Metadata Services Success /tmp/RCU2020-05-01_14-42_651700358/logs/mds.log +WebLogic Services Success /tmp/RCU2020-05-01_14-42_651700358/logs/wls.log + +Repository Creation Utility - Drop : Operation Completed +pod "rcu" deleted +Checking Status for Pod [rcu] in namesapce [default] +Error from server (NotFound): pods "rcu" not found +Pod [rcu] removed from nameSpace [default] +``` + +## Stop an Oracle Database service in a Kubernetes cluster + +Use the script ``samples/scripts/create-oracle-db-service/stop-db-service.sh`` + diff --git a/OracleIdentityGovernance/kubernetes/create-rcu-schema/common/createRepository.sh b/OracleIdentityGovernance/kubernetes/create-rcu-schema/common/createRepository.sh new file mode 100755 index 000000000..6612847e8 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/create-rcu-schema/common/createRepository.sh @@ -0,0 +1,68 @@ +#!/bin/bash +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +. /u01/oracle/wlserver/server/bin/setWLSEnv.sh + +echo "Check if the DB Service is ready to accept request " +connectString=${1:-oracle-db.default.svc.cluster.local:1521/devpdb.k8s} +schemaPrefix=${2:-oimcluster} +rcuType=${3:-fmw} +sysPassword=${4:-Oradoc_db1} +customVariables=${5:-none} + +echo "DB Connection String [$connectString], schemaPrefix [${schemaPrefix}] rcuType [${rcuType}] customVariables [${customVariables}]" + +max=100 +counter=0 +while [ $counter -le ${max} ] +do + java utils.dbping ORACLE_THIN "sys as sysdba" ${sysPassword} ${connectString} > dbping.err 2>&1 + [[ $? == 0 ]] && break; + ((counter++)) + echo "[$counter/${max}] Retrying the DB Connection ..." + sleep 10 +done + +if [ $counter -gt ${max} ]; then + echo "Error output from 'java utils.dbping ORACLE_THIN \"sys as sysdba\" SYSPASSWORD ${connectString}' from '$(pwd)/dbping.err':" + cat dbping.err + echo "[ERROR] Oracle DB Service is not ready after [${max}] iterations ..." + exit -1 +else + java utils.dbping ORACLE_THIN "sys as sysdba" ${sysPassword} ${connectString} +fi + +if [ $customVariables != "none" ]; then + extVariables="-variables $customVariables" +else + extVariables="" +fi +case $rcuType in + +oim) + extComponents="-component OIM -component SOAINFRA" + echo "Creating RCU Schema for OracleIdentityGovernance Domain ..." + ;; + * ) + echo "[ERROR] Unknown RCU Schema Type [$rcuType]" + echo "Supported values: oim" + exit -1 + ;; +esac + +echo "Extra RCU Schema Component Choosen[${extComponents}]" +echo "Extra RCU Schema Variable Choosen[${extVariables}]" + +#Debug +#export DISPLAY=0.0 +#/u01/oracle/oracle_common/bin/rcu -listComponents + +/u01/oracle/oracle_common/bin/rcu -silent -createRepository \ + -databaseType ORACLE -connectString ${connectString} \ + -dbUser sys -dbRole sysdba -useSamePasswordForAllSchemaUsers true \ + -selectDependentsForComponents true \ + -schemaPrefix ${schemaPrefix} ${extComponents} ${extVariables} \ + -component MDS -component IAU -component IAU_APPEND -component IAU_VIEWER \ + -component OPSS -component WLS -component STB < /u01/oracle/pwd.txt + diff --git a/OracleIdentityGovernance/kubernetes/create-rcu-schema/common/dropRepository.sh b/OracleIdentityGovernance/kubernetes/create-rcu-schema/common/dropRepository.sh new file mode 100755 index 000000000..3b353e1df --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/create-rcu-schema/common/dropRepository.sh @@ -0,0 +1,62 @@ +#!/bin/bash +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +. /u01/oracle/wlserver/server/bin/setWLSEnv.sh + +echo "Check if the DB Service is ready to accept request " +connectString=${1:-oracle-db.default.svc.cluster.local:1521/devpdb.k8s} +schemaPrefix=${2:-oimcluster} +rcuType=${3:-fmw} +sysPassword=${4:-Oradoc_db1} +customVariables=${5:-none} + +echo "DB Connection String [$connectString] schemaPrefix [${schemaPrefix}] rcuType[${rcuType}] customVariables[${customVariables}]" + +max=20 +counter=0 +while [ $counter -le ${max} ] +do + java utils.dbping ORACLE_THIN "sys as sysdba" ${sysPassword} ${connectString} > dbping.err 2>&1 + [[ $? == 0 ]] && break; + ((counter++)) + echo "[$counter/${max}] Retrying the DB Connection ..." + sleep 10 +done + +if [ $counter -gt ${max} ]; then + echo "[ERROR] Oracle DB Service is not ready after [${max}] iterations ..." + exit -1 +else + java utils.dbping ORACLE_THIN "sys as sysdba" ${sysPassword} ${connectString} +fi + +if [ $customVariables != "none" ]; then + extVariables="-variables $customVariables" +else + extVariables="" +fi + +case $rcuType in +oim) + extComponents="-component OIM -component SOAINFRA" + echo "Dropping RCU Schema for OracleIdentityGovernance Domain ..." + ;; + * ) + echo "[ERROR] Unknown RCU Schema Type [$rcuType]" + echo "Supported values: oim" + exit -1 + ;; +esac + +echo "Extra RCU Schema Component(s) Choosen[${extComponents}]" +echo "Extra RCU Schema Variable(s) Choosen[${extVariables}]" + +/u01/oracle/oracle_common/bin/rcu -silent -dropRepository \ + -databaseType ORACLE -connectString ${connectString} \ + -dbUser sys -dbRole sysdba \ + -selectDependentsForComponents true \ + -schemaPrefix ${schemaPrefix} ${extComponents} ${extVariables} \ + -component MDS -component IAU -component IAU_APPEND -component IAU_VIEWER \ + -component OPSS -component WLS -component STB < /u01/oracle/pwd.txt + diff --git a/OracleIdentityGovernance/kubernetes/create-rcu-schema/common/rcu.yaml b/OracleIdentityGovernance/kubernetes/create-rcu-schema/common/rcu.yaml new file mode 100755 index 000000000..16f91ca07 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/create-rcu-schema/common/rcu.yaml @@ -0,0 +1,20 @@ +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +apiVersion: v1 +kind: Pod +metadata: + labels: + run: rcu + name: rcu + namespace: default +spec: + containers: + - args: + - sleep + - infinity + image: oracle/oig:12.2.1.4.0 + imagePullPolicy: IfNotPresent + name: rcu + imagePullSecrets: + - name: docker-store diff --git a/OracleIdentityGovernance/kubernetes/create-rcu-schema/common/template/rcu.yaml.template b/OracleIdentityGovernance/kubernetes/create-rcu-schema/common/template/rcu.yaml.template new file mode 100755 index 000000000..0e9f3f038 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/create-rcu-schema/common/template/rcu.yaml.template @@ -0,0 +1,22 @@ +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# This is a template for RCU Pod +# +apiVersion: v1 +kind: Pod +metadata: + labels: + run: rcu + name: rcu + namespace: %NAMESPACE% +spec: + containers: + - args: + - sleep + - infinity + image: oracle/oig:12.2.1.4.0 + imagePullPolicy: %WEBLOGIC_IMAGE_PULL_POLICY% + name: rcu + %WEBLOGIC_IMAGE_PULL_SECRET_PREFIX%imagePullSecrets: + %WEBLOGIC_IMAGE_PULL_SECRET_PREFIX%- name: %WEBLOGIC_IMAGE_PULL_SECRET_NAME% diff --git a/OracleIdentityGovernance/kubernetes/create-rcu-schema/create-image-pull-secret.sh b/OracleIdentityGovernance/kubernetes/create-rcu-schema/create-image-pull-secret.sh new file mode 100755 index 000000000..307591ff1 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/create-rcu-schema/create-image-pull-secret.sh @@ -0,0 +1,59 @@ +#!/bin/bash +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# Create ImagePullSecret to pull Oracle DB and OracleIdentityGovernance Image + +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" + +function usage { + echo "usage: ${script} -u -p -e -s [-h]" + echo " -u Oracle Container Registry User Name (needed)" + echo " -p Oracle Container Registry Password (needed)" + echo " -e email (needed)" + echo " -s Generated Secret (optional) " + echo " (default: docker-store) " + echo " -h Help" + exit $1 +} + +while getopts ":u:p:s:e:" opt; do + case $opt in + u) username="${OPTARG}" + ;; + p) password="${OPTARG}" + ;; + e) email="${OPTARG}" + ;; + s) secert="${OPTARG}" + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +if [ -z ${username} ]; then + echo "${script}: -u must be specified." + usage 1 +fi + +if [ -z ${password} ]; then + echo "${script}: -p must be specified." + usage 1 +fi + +if [ -e ${email} ]; then + echo "${script}: -p must be specified." + usage 1 +fi + +if [ -z ${secret} ]; then + secret="docker-store" +fi + +kubectl delete secret/${secret} --ignore-not-found +echo "Creating ImagePullSecret on container-registry.oracle.com" +kubectl create secret docker-registry ${secret} --docker-server=container-registry.oracle.com --docker-username=${username} --docker-password=${password} --docker-email=${email} diff --git a/OracleIdentityGovernance/kubernetes/create-rcu-schema/create-rcu-schema.sh b/OracleIdentityGovernance/kubernetes/create-rcu-schema/create-rcu-schema.sh new file mode 100755 index 000000000..1880efd8a --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/create-rcu-schema/create-rcu-schema.sh @@ -0,0 +1,204 @@ +#!/bin/bash +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# Configure RCU schema based on schemaPreifix and rcuDatabaseURL + +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" +source ${scriptDir}/../common/utility.sh + +function usage { + echo "usage: ${script} -s -t -d -i -u -p -n -q -r -o -c [-l] [-h] " + echo " -s RCU Schema Prefix (required)" + echo " -t RCU Schema Type (optional)" + echo " (supported values: oim)" + echo " -d RCU Oracle Database URL (optional) " + echo " (default: oracle-db.default.svc.cluster.local:1521/devpdb.k8s) " + echo " -p OracleIdentityGovernance ImagePullSecret (optional) " + echo " (default: none) " + echo " -i OracleIdentityGovernance Image (optional) " + echo " (default: oracle/oig:12.2.1.4.0) " + echo " -u OracleIdentityGovernance ImagePullPolicy (optional) " + echo " (default: IfNotPresent) " + echo " -n Namespace for RCU pod (optional)" + echo " (default: default)" + echo " -q password for database SYSDBA user. (optional)" + echo " (default: Oradoc_db1)" + echo " -r password for all schema owner (regular user). (optional)" + echo " (default: Oradoc_db1)" + echo " -o Output directory for the generated YAML file. (optional)" + echo " (default: rcuoutput)" + echo " -c Comma-separated custom variables in the format variablename=value. (optional)." + echo " (default: none)" + echo " -l Timeout limit in seconds. (optional)." + echo " (default: 300)" + echo " -h Help" + exit $1 +} + +# Checks if all container(s) in a pod are running state based on READY column using given timeout limit +# NAME READY STATUS RESTARTS AGE +# domain1-adminserver 1/1 Running 0 4m +function checkPodStateUsingCustomTimeout(){ + + status="NotReady" + count=1 + + pod=$1 + ns=$2 + state=${3:-1/1} + timeoutLimit=${4:-300} + max=`expr ${timeoutLimit} / 5` + + echo "Checking Pod READY column for State [$state]" + pname=`kubectl get po -n ${ns} | grep -w ${pod} | awk '{print $1}'` + if [ -z ${pname} ]; then + echo "No such pod [$pod] exists in NameSpace [$ns] " + exit -1 + fi + + rcode=`kubectl get po ${pname} -n ${ns} | grep -w ${pod} | awk '{print $2}'` + [[ ${rcode} -eq "${state}" ]] && status="Ready" + + while [ ${status} != "Ready" -a $count -le $max ] ; do + sleep 5 + rcode=`kubectl get po/$pod -n ${ns} | grep -v NAME | awk '{print $2}'` + [[ ${rcode} -eq "1/1" ]] && status="Ready" + echo "Pod [$1] Status is ${status} Iter [$count/$max]" + count=`expr $count + 1` + done + if [ $count -gt $max ] ; then + echo "[ERROR] Unable to start the Pod [$pod] after ${timeout}s "; + exit 1 + fi + + pname=`kubectl get po -n ${ns} | grep -w ${pod} | awk '{print $1}'` + kubectl -n ${ns} get po ${pname} +} + +timeout=300 + +while getopts ":h:s:d:p:i:t:n:q:r:o:u:c:l:" opt; do + case $opt in + s) schemaPrefix="${OPTARG}" + ;; + t) rcuType="${OPTARG}" + ;; + d) dburl="${OPTARG}" + ;; + p) pullsecret="${OPTARG}" + ;; + i) fmwimage="${OPTARG}" + ;; + n) namespace="${OPTARG}" + ;; + q) sysPassword="${OPTARG}" + ;; + r) schemaPassword="${OPTARG}" + ;; + o) rcuOutputDir="${OPTARG}" + ;; + u) imagePullPolicy="${OPTARG}" + ;; + c) customVariables="${OPTARG}" + ;; + l) timeout="${OPTARG}" + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +if [ -z ${schemaPrefix} ]; then + echo "${script}: -s must be specified." + usage 1 +fi + +if [ -z ${dburl} ]; then + dburl="oracle-db.default.svc.cluster.local:1521/devpdb.k8s" +fi + +if [ -z ${rcuType} ]; then + rcuType="fmw" +fi + +if [ -z ${pullsecret} ]; then + pullsecret="none" + pullsecretPrefix="#" +fi + +if [ -z ${fmwimage} ]; then + fmwimage="oracle/oig:12.2.1.4.0" +fi + +if [ -z ${imagePullPolicy} ]; then + imagePullPolicy="IfNotPresent" +fi + +if [ -z ${namespace} ]; then + namespace="default" +fi + +if [ -z ${sysPassword} ]; then + sysPassword="Oradoc_db1" +fi + +if [ -z ${schemaPassword} ]; then + schemaPassword="Oradoc_db1" +fi + +if [ -z ${rcuOutputDir} ]; then + rcuOutputDir="rcuoutput" +fi + +if [ -z ${customVariables} ]; then + customVariables="none" +fi + +if [ -z ${timeout} ]; then + timeout=300 +fi + +echo "ImagePullSecret[$pullsecret] Image[${fmwimage}] dburl[${dburl}] rcuType[${rcuType}] customVariables[${customVariables}]" + +mkdir -p ${rcuOutputDir} +rcuYaml=${rcuOutputDir}/rcu.yaml +rm -f ${rcuYaml} +rcuYamlTemp=${scriptDir}/common/template/rcu.yaml.template +cp $rcuYamlTemp $rcuYaml + +# Modify the ImagePullSecret based on input +sed -i -e "s:%NAMESPACE%:${namespace}:g" $rcuYaml +sed -i -e "s:%WEBLOGIC_IMAGE_PULL_POLICY%:${imagePullPolicy}:g" $rcuYaml +sed -i -e "s:%WEBLOGIC_IMAGE_PULL_SECRET_NAME%:${pullsecret}:g" $rcuYaml +sed -i -e "s:%WEBLOGIC_IMAGE_PULL_SECRET_PREFIX%:${pullsecretPrefix}:g" $rcuYaml +sed -i -e "s?image:.*?image: ${fmwimage}?g" $rcuYaml +kubectl apply -f $rcuYaml + +# Make sure the rcu deployment Pod is RUNNING +checkPod rcu $namespace +checkPodStateUsingCustomTimeout rcu $namespace "1/1" ${timeout} +sleep 5 +kubectl get po/rcu -n $namespace + +# Generate the default password files for rcu command +echo "$sysPassword" > pwd.txt +echo "$schemaPassword" >> pwd.txt + +kubectl exec -n $namespace -i rcu -- bash -c 'cat > /u01/oracle/createRepository.sh' < ${scriptDir}/common/createRepository.sh +kubectl exec -n $namespace -i rcu -- bash -c 'cat > /u01/oracle/pwd.txt' < pwd.txt +rm -rf createRepository.sh pwd.txt + +kubectl exec -n $namespace -i rcu /bin/bash /u01/oracle/createRepository.sh ${dburl} ${schemaPrefix} ${rcuType} ${sysPassword} ${customVariables} +if [ $? != 0 ]; then + echo "######################"; + echo "[ERROR] Could not create the RCU Repository"; + echo "######################"; + exit -3; +fi + +echo "[INFO] Modify the domain.input.yaml to use [$dburl] as rcuDatabaseURL and [${schemaPrefix}] as rcuSchemaPrefix " + diff --git a/OracleIdentityGovernance/kubernetes/create-rcu-schema/drop-rcu-schema.sh b/OracleIdentityGovernance/kubernetes/create-rcu-schema/drop-rcu-schema.sh new file mode 100755 index 000000000..71623bcd8 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/create-rcu-schema/drop-rcu-schema.sh @@ -0,0 +1,108 @@ +#!/bin/bash +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# Drop the RCU schema based on schemaPreifix and Database URL + +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" +source ${scriptDir}/../common/utility.sh + +function usage { + echo "usage: ${script} -s -d -n -q -r -c [-h]" + echo " -s RCU Schema Prefix (required)" + echo " -t RCU Schema Type (optional)" + echo " (supported values: oim) " + echo " -d Oracle Database URL (optional)" + echo " (default: oracle-db.default.svc.cluster.local:1521/devpdb.k8s) " + echo " -n Namespace where RCU pod is deployed (optional)" + echo " (default: default) " + echo " -q password for database SYSDBA user. (optional)" + echo " (default: Oradoc_db1)" + echo " -r password for all schema owner (regular user). (optional)" + echo " (default: Oradoc_db1)" + echo " -c Comma-separated custom variables in the format variablename=value. (optional)." + echo " (default: none)" + echo " -h Help" + exit $1 +} + +while getopts ":h:s:d:t:n:q:r:c:" opt; do + case $opt in + s) schemaPrefix="${OPTARG}" + ;; + t) rcuType="${OPTARG}" + ;; + d) dburl="${OPTARG}" + ;; + n) namespace="${OPTARG}" + ;; + q) sysPassword="${OPTARG}" + ;; + r) schemaPassword="${OPTARG}" + ;; + c) customVariables="${OPTARG}" + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +if [ -z ${schemaPrefix} ]; then + echo "${script}: -s must be specified." + usage 1 +fi + +if [ -z ${dburl} ]; then + dburl="oracle-db.default.svc.cluster.local:1521/devpdb.k8s" +fi + +if [ -z ${rcuType} ]; then + rcuType="fmw" +fi + +if [ -z ${namespace} ]; then + namespace="default" +fi + +if [ -z ${sysPassword} ]; then + sysPassword="Oradoc_db1" +fi + +if [ -z ${schemaPassword} ]; then + schemaPassword="Oradoc_db1" +fi + +if [ -z ${customVariables} ]; then + customVariables="none" +fi + +rcupod=`kubectl get po -n ${namespace} | grep rcu | cut -f1 -d " " ` +if [ -z ${rcupod} ]; then + echo "RCU deployment pod not found in [$namespace] Namespace" + exit -2 +fi + +#fmwimage=`kubectl get pod/rcu -o jsonpath="{..image}"` +echo "DB Connection String [$dbUrl], schemaPrefix [${schemaPrefix}] rcuType [${rcuType}] schemaProfileType [${customVariables}]" + +echo "${sysPassword}" > pwd.txt +echo "${schemaPassword}" >> pwd.txt + +kubectl exec -n $namespace -i rcu -- bash -c 'cat > /u01/oracle/dropRepository.sh' < ${scriptDir}/common/dropRepository.sh +kubectl exec -n $namespace -i rcu -- bash -c 'cat > /u01/oracle/pwd.txt' < pwd.txt +rm -rf dropRepository.sh pwd.txt + +kubectl exec -n $namespace -i rcu /bin/bash /u01/oracle/dropRepository.sh ${dburl} ${schemaPrefix} ${rcuType} ${sysPassword} ${customVariables} +if [ $? != 0 ]; then + echo "######################"; + echo "[ERROR] Could not drop the RCU Repository based on dburl[${dburl}] schemaPrefix[${schemaPrefix}] "; + echo "######################"; + exit -3; +fi + +kubectl delete pod rcu -n ${namespace} +checkPodDelete rcu ${namespace} + diff --git a/OracleIdentityGovernance/kubernetes/create-weblogic-domain-credentials/README.md b/OracleIdentityGovernance/kubernetes/create-weblogic-domain-credentials/README.md new file mode 100755 index 000000000..caaceb130 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/create-weblogic-domain-credentials/README.md @@ -0,0 +1,50 @@ +# Creating credentials for a WebLogic domain + +This sample demonstrates how to create a Kubernetes secret containing the +credentials for a WebLogic domain. The operator expects this secret to be +named following the pattern `domainUID-weblogic-credentials`, where `domainUID` +is the unique identifier of the domain. It must be in the same namespace +that the domain will run in. + +To use the sample, run the command: + +``` +$ ./create-weblogic-credentials.sh -u username -p password -d domainUID -n namespace -s secretName +``` + +The parameters are as follows: + +``` + -u user name, must be specified. + -p password, must be specified. + -d domainUID, optional. The default value is oimcluster. If specified, the secret will be labeled with the domainUID unless the given value is an empty string. + -n namespace, optional. Use the oimcluster namespace if not specified. + -s secretName, optional. If not specified, the secret name will be determined based on the domainUID value. +``` + +This creates a `generic` secret containing the user name and password as literal values. + +You can check the secret with the `kubectl get secret` command. An example is shown below, +including the output: + +``` +$ kubectl -n oimcluster get secret oimcluster-weblogic-credentials -o yaml +apiVersion: v1 +data: + password: d2VsY29tZTE= + username: d2VibG9naWM= +kind: Secret +metadata: + creationTimestamp: 2018-12-12T20:25:20Z + labels: + weblogic.domainName: oimcluster + weblogic.domainUID: oimcluster + name: oimcluster-weblogic-credentials + namespace: oimcluster + resourceVersion: "5680" + selfLink: /api/v1/namespaces/oimcluster/secrets/oimcluster-weblogic-credentials + uid: 0c2b3510-fe4c-11e8-994d-00001700101d +type: Opaque + +``` + diff --git a/OracleIdentityGovernance/kubernetes/create-weblogic-domain-credentials/create-weblogic-credentials.sh b/OracleIdentityGovernance/kubernetes/create-weblogic-domain-credentials/create-weblogic-credentials.sh new file mode 100755 index 000000000..3b835dc36 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/create-weblogic-domain-credentials/create-weblogic-credentials.sh @@ -0,0 +1,121 @@ +#!/usr/bin/env bash +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# Description +# This sample script creates a Kubernetes secret for WebLogic domain admin credentials. +# +# The following pre-requisites must be handled prior to running this script: +# * The kubernetes namespace must already be created +# +# Secret name determination +# 1) secretName - if specified +# 2) oimcluster-weblogic-credentials - if secretName and domainUID are both not specified. This is the default out-of-the-box. +# 3) -weblogic-credentials - if secretName is not specified, and domainUID is specified. +# 4) weblogic-credentials - if secretName is not specified, and domainUID is specified as "". +# +# The generated secret will be labeled with +# weblogic.domainUID=$domainUID +# and +# weblogic.domainName=$domainUID +# Where the $domainUID is the value of the -d command line option, unless the value supplied is an empty String "" +# + +script="${BASH_SOURCE[0]}" + +# +# Function to exit and print an error message +# $1 - text of message +function fail { + echo [ERROR] $* + exit 1 +} + +# Try to execute kubectl to see whether kubectl is available +function validateKubectlAvailable { + if ! [ -x "$(command -v kubectl)" ]; then + fail "kubectl is not installed" + fi +} + +function usage { + echo usage: ${script} -u username -p password [-d domainUID] [-n namespace] [-s secretName] [-h] + echo " -u username, must be specified." + echo " -p password, must be specified." + echo " -d domainUID, optional. The default value is oimcluster. If specified, the secret will be labeled with the domainUID unless the given value is an empty string." + echo " -n namespace, optional. Use the oimcluster namespace if not specified" + echo " -s secretName, optional. If not specified, the secret name will be determined based on the domainUID value" + echo " -h Help" + exit $1 +} + +# +# Parse the command line options +# +domainUID=oimcluster +namespace=oimcluster +while getopts "hu:p:n:d:s:" opt; do + case $opt in + u) username="${OPTARG}" + ;; + p) password="${OPTARG}" + ;; + n) namespace="${OPTARG}" + ;; + d) domainUID="${OPTARG}" + ;; + s) secretName="${OPTARG}" + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +if [ -z $secretName ]; then + if [ -z $domainUID ]; then + secretName=weblogic-credentials + else + secretName=$domainUID-weblogic-credentials + fi +fi + +if [ -z ${username} ]; then + echo "${script}: -u must be specified." + missingRequiredOption="true" +fi + +if [ -z ${password} ]; then + echo "${script}: -p must be specified." + missingRequiredOption="true" +fi + +if [ "${missingRequiredOption}" == "true" ]; then + usage 1 +fi + +# check and see if the secret already exists +result=`kubectl get secret ${secretName} -n ${namespace} --ignore-not-found=true | grep ${secretName} | wc | awk ' { print $1; }'` +if [ "${result:=Error}" != "0" ]; then + fail "The secret ${secretName} already exists in namespace ${namespace}." +fi + +# create the secret +kubectl -n $namespace create secret generic $secretName \ + --from-literal=username=$username \ + --from-literal=password=$password + +# label the secret with domainUID if needed +if [ ! -z $domainUID ]; then + kubectl label secret ${secretName} -n $namespace weblogic.domainUID=$domainUID weblogic.domainName=$domainUID +fi + +# Verify the secret exists +SECRET=`kubectl get secret ${secretName} -n ${namespace} | grep ${secretName} | wc | awk ' { print $1; }'` +if [ "${SECRET}" != "1" ]; then + fail "The secret ${secretName} was not found in namespace ${namespace}" +fi + +echo "The secret ${secretName} has been successfully created in the ${namespace} namespace." + diff --git a/OracleIdentityGovernance/kubernetes/create-weblogic-domain-pv-pvc/README.md b/OracleIdentityGovernance/kubernetes/create-weblogic-domain-pv-pvc/README.md new file mode 100755 index 000000000..96b81e668 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/create-weblogic-domain-pv-pvc/README.md @@ -0,0 +1,222 @@ +# Sample persistent volume and persistent volume claim + +The sample scripts demonstrate the creation of a Kubernetes persistent volume (PV) and persistent volume claim (PVC), which can then be used in a domain resource as a persistent storage for the WebLogic domain home or log files. + +A PV and PVC can be shared by multiple WebLogic domains or dedicated to a particular domain. + +## Prerequisites + +Please read the [Persistent Volumes](../../../../site/persistent-volumes.md) guide before proceeding. + +## Using the scripts to create a PV and PVC + +Prior to running the `create-pv-pvc.sh` script, make a copy of the `create-pv-pvc-inputs.yaml` file, and uncomment and explicitly configure the `weblogicDomainStoragePath` property in the inputs file. + +Run the create script, pointing it at your inputs file and an output directory: + +``` +$ ./create-pv-pvc.sh \ + -i create-pv-pvc-inputs.yaml \ + -o /path/to/output-directory +``` + +The `create-pv-pvc.sh` script will create a subdirectory `pv-pvcs` under the given `/path/to/output-directory` directory. By default, the script generates two YAML files, namely `weblogic-sample-pv.yaml` and `weblogic-sample-pvc.yaml`, in the `/path/to/output-directory/pv-pvcs`. These two YAML files can be used to create the Kubernetes resources using the `kubectl create -f` command. + +``` +$ kubectl create -f oimcluster-domain-pv.yaml +$ kubectl create -f oimcluster-domain-pvc.yaml + +``` + +As a convenience, the script can optionally create the PV and PVC resources using the `-e` option. + +The usage of the create script is as follows: + +``` +$ sh create-pv-pvc.sh -h +usage: create-pv-pvc.sh -i file -o dir [-e] [-h] + -i Parameter inputs file, must be specified. + -o Output directory for the generated yaml files, must be specified. + -e Also create the Kubernetes objects using the generated yaml files + -h Help +``` + +If you copy the sample scripts to a different location, make sure that you copy everything in the `/kubernetes/samples/scripts` directory together into the target directory, maintaining the original directory hierarchy. + +## Configuration parameters + +The PV and PVC creation inputs can be customized by editing the `create-pv-pvc-inputs.yaml` file. + +| Parameter | Definition | Default | +| --- | --- | --- | +| `domainUID` | ID of the domain resource to which the generated PV and PVC will be dedicated. Leave it empty if the PV and PVC are going to be shared by multiple domains. | no default | +| `namespace` | Kubernetes namespace to create the PVC. | `default` | +| `baseName` | Base name of the PV and PVC. The generated PV and PVC will be `-pv` and `-pvc` respectively. | `weblogic-sample` | +| `weblogicDomainStoragePath` | Physical path of the storage for the PV. When `weblogicDomainStorageType` is set to `HOST_PATH`, this value should be set the to path to the domain storage on the Kubernetes host. When `weblogicDomainStorageType` is set to NFS, then `weblogicDomainStorageNFSServer` should be set to the IP address or name of the DNS server, and this value should be set to the exported path on that server. Note that the path where the domain is mounted in the WebLogic containers is not affected by this setting, that is determined when you create your domain. | no default | +| `weblogicDomainStorageReclaimPolicy` | Kubernetes PVC policy for the persistent storage. The valid values are: `Retain`, `Delete`, and `Recycle`. | `Retain` | +| `weblogicDomainStorageSize` | Total storage allocated for the PVC. | `10Gi` | +| `weblogicDomainStorageType` | Type of storage. Legal values are `NFS` and `HOST_PATH`. If using `NFS`, `weblogicDomainStorageNFSServer` must be specified. | `HOST_PATH` | +| `weblogicDomainStorageNFSServer`| Name or IP address of the NFS server. This setting only applies if `weblogicDomainStorateType` is `NFS`. | no default | + +## Shared versus dedicated PVC + +By default, the `domainUID` is left empty in the inputs file, which means the generated PV and PVC will not be associated with a particular domain, but can be shared by multiple domain resources in the same Kubernetes namespaces as the PV and PVC. + +For the use cases where dedicated PV and PVC are desired for a particular domain, the `domainUID` needs to be set in the `create-pv-pvc-inputs.yaml` file. The presence of a non-empty `domainUID` in the inputs file will cause the generated PV and PVC to be associated with the specified `domainUID`. The association includes that the names of the generated YAML files and the Kubernetes PV and PVC objects are decorated with the `domainUID`, and the PV and PVC objects are also labeled with the `domainUID`. + +## Verify the results + +The create script will verify that the PV and PVC were created, and will report a failure if there was any error. However, it may be desirable to manually verify the PV and PVC, even if just to gain familiarity with the various Kubernetes objects that were created by the script. + +### Generated YAML files with the default inputs + +The content of the generated `oimcluster-domain-pvc.yaml`: + +``` +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: oimcluster-domain-pvc + namespace: default + + storageClassName: oimcluster-domain-storage-class + accessModes: + - ReadWriteMany + resources: + requests: + storage: 10Gi +``` + +The content of the generated `oimcluster-domain-pv.yaml`: +``` +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +apiVersion: v1 +kind: PersistentVolume +metadata: + name: oimcluster-domain-pv + # labels: + # weblogic.domainUID: +spec: + storageClassName: oimcluster-domain-storage-class + capacity: + storage: 10Gi + accessModes: + - ReadWriteMany + # Valid values are Retain, Delete or Recycle + persistentVolumeReclaimPolicy: Retain + hostPath: + # nfs: + # server: %SAMPLE_STORAGE_NFS_SERVER% + path: "/scratch/k8s_dir" + +``` + +### Generated YAML files for dedicated PV and PVC + +The content of the generated `oimcluster-domain-pvc.yaml` when `domainUID` is set to `domain1`: + +``` +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: oimcluster-domain-pvc + namespace: default + labels: + weblogic.domainUID: oimcluster +spec: + storageClassName: oimcluster-domain-storage-class + accessModes: + - ReadWriteMany + resources: + requests: + storage: 10Gi +``` + +The content of the generated `oimcluster-domain-pv.yaml` when `domainUID` is set to `domain1`: +``` +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +apiVersion: v1 +kind: PersistentVolume +metadata: + name: oimcluster-domain-pv + labels: + weblogic.domainUID: oimcluster +spec: + storageClassName: oimcluster-domain-storage-class + capacity: + storage: 10Gi + accessModes: + - ReadWriteMany + # Valid values are Retain, Delete or Recycle + persistentVolumeReclaimPolicy: Retain + hostPath: + # nfs: + # server: %SAMPLE_STORAGE_NFS_SERVER% + path: "/scratch/k8s_dir" +``` + +### Verify the PV and PVC objects + +You can use this command to verify the persistent volume was created, note that the `Status` field +should have the value `Bound`, indicating the that persistent volume has been claimed: + +``` +$ kubectl describe pv oimcluster-domain-pv +Name: oimcluster-domain-pv +Annotations: pv.kubernetes.io/bound-by-controller=yes +StorageClass: oimcluster-domain-storage-class +Status: Bound +Claim: default/oimcluster-domain-pvc +Reclaim Policy: Retain +Access Modes: RWX +Capacity: 10Gi +Message: +Source: + Type: HostPath (bare host directory volume) + Path: /scratch/k8s_dir + HostPathType: +Events: + +``` + +You can use this command to verify the persistent volume claim was created: + +``` +$ kubectl describe pvc oimcluster-domain-pvc +Name: oimcluster-domain-pvc +Namespace: default +StorageClass: oimcluster-domain-storage-class +Status: Bound +Volume: oimcluster-domain-pv +Annotations: pv.kubernetes.io/bind-completed=yes + pv.kubernetes.io/bound-by-controller=yes +Finalizers: [] +Capacity: 10Gi +Access Modes: RWX +Events: + +``` + +## Troubleshooting + +* Message: `[ERROR] The weblogicDomainStoragePath parameter in kubernetes/samples/scripts/create-weblogic-domain-pv-pvc/create-pv-pvc-inputs.yaml is missing, null or empty` +Edit the file and set the value of the field. This value must be a directory that is world writable. +Optionally, follow these steps to tighten permissions on the named directory after you run the sample the first time: + + * Become the root user. + * `ls -nd $value-of-weblogicDomainStoragePath` + * Note the values of the third and fourth field of the output. + * `chown $third-field:$fourth-field $value-of-weblogicDomainStoragePath` + * `chmod 755 $value-of-weblogicDomainStoragePath` + * Return to your normal user ID. + diff --git a/OracleIdentityGovernance/kubernetes/create-weblogic-domain-pv-pvc/create-pv-pvc-inputs.yaml b/OracleIdentityGovernance/kubernetes/create-weblogic-domain-pv-pvc/create-pv-pvc-inputs.yaml new file mode 100755 index 000000000..a69c333e9 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/create-weblogic-domain-pv-pvc/create-pv-pvc-inputs.yaml @@ -0,0 +1,44 @@ +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# The version of this inputs file. Do not modify. +version: create-oimcluster-pv-pvc-inputs-v1 + +# The base name of the pv and pvc +baseName: domain + +# Unique ID identifying a domain. +# If left empty, the generated pv can be shared by multiple domains +# This ID must not contain an underscope ("_"), and must be lowercase and unique across all domains in a Kubernetes cluster. +domainUID: oimcluster + +# Name of the namespace for the persistent volume claim +namespace: oimcluster + +# Persistent volume type for the persistent storage. +# The value must be 'HOST_PATH' or 'NFS'. +# If using 'NFS', weblogicDomainStorageNFSServer must be specified. +weblogicDomainStorageType: HOST_PATH + +# The server name or ip address of the NFS server to use for the persistent storage. +# The following line must be uncomment and customized if weblogicDomainStorateType is NFS: +#weblogicDomainStorageNFSServer: nfsServer + +# Physical path of the persistent storage. +# When weblogicDomainStorageType is set to HOST_PATH, this value should be set the to path to the +# domain storage on the Kubernetes host. +# When weblogicDomainStorageType is set to NFS, then weblogicDomainStorageNFSServer should be set +# to the IP address or name of the DNS server, and this value should be set to the exported path +# on that server. +# Note that the path where the domain is mounted in the WebLogic containers is not affected by this +# setting, that is determined when you create your domain. +# The following line must be uncomment and customized: +weblogicDomainStoragePath: /scratch/k8s_dir + +# Reclaim policy of the persistent storage +# The valid values are: 'Retain', 'Delete', and 'Recycle' +weblogicDomainStorageReclaimPolicy: Retain + +# Total storage allocated to the persistent storage. +weblogicDomainStorageSize: 10Gi + diff --git a/OracleIdentityGovernance/kubernetes/create-weblogic-domain-pv-pvc/create-pv-pvc.sh b/OracleIdentityGovernance/kubernetes/create-weblogic-domain-pv-pvc/create-pv-pvc.sh new file mode 100755 index 000000000..ba3de3459 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/create-weblogic-domain-pv-pvc/create-pv-pvc.sh @@ -0,0 +1,267 @@ +#!/usr/bin/env bash +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# Description +# This sample script generates the Kubernetes yaml files for a persistent volume and persistent volume claim +# that can be used by a domain custom resource. +# +# The creation inputs can be customized by editing create-pv-pvc-inputs.yaml +# +# The following pre-requisites must be handled prior to running this script: +# * The Kubernetes namespace must already be created +# + +# Initialize +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" +source ${scriptDir}/../common/utility.sh +source ${scriptDir}/../common/validate.sh + +function usage { + echo usage: ${script} -i file -o dir [-e] [-h] + echo " -i Parameter inputs file, must be specified." + echo " -o Output directory for the generated yaml files, must be specified." + echo " -e Also create the Kubernetes objects using the generated yaml files" + echo " -h Help" + exit $1 +} + +# +# Parse the command line options +# +executeIt=false +while getopts "ehi:o:" opt; do + case $opt in + i) valuesInputFile="${OPTARG}" + ;; + o) outputDir="${OPTARG}" + ;; + e) executeIt=true + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +if [ -z ${valuesInputFile} ]; then + echo "${script}: -i must be specified." + missingRequiredOption="true" +fi + +if [ -z ${outputDir} ]; then + echo "${script}: -o must be specified." + missingRequiredOption="true" +fi + +if [ "${missingRequiredOption}" == "true" ]; then + usage 1 +fi + +# +# Function to initialize and validate the output directory +# for the generated yaml files for this domain. +# +function initOutputDir { + pvOutputDir="$outputDir/pv-pvcs" + + if [ -z ${domainUID} ]; then + pvOutput="${pvOutputDir}/${baseName}-pv.yaml" + pvcOutput="${pvOutputDir}/${baseName}-pvc.yaml" + persistentVolumeName=${baseName}-pv + persistentVolumeClaimName=${baseName}-pvc + else + pvOutput="${pvOutputDir}/${domainUID}-${baseName}-pv.yaml" + pvcOutput="${pvOutputDir}/${domainUID}-${baseName}-pvc.yaml" + persistentVolumeName=${domainUID}-${baseName}-pv + persistentVolumeClaimName=${domainUID}-${baseName}-pvc + fi + removeFileIfExists ${pvOutputDir}/{valuesInputFile} + removeFileIfExists ${pvOutputDir}/{pvOutput} + removeFileIfExists ${pvOutputDir}/{pvcOutput} + removeFileIfExists ${pvOutputDir}/create-pv-pvc-inputs.yaml +} + +# +# Function to setup the environment to run the create domain job +# +function initialize { + + # Validate the required files exist + validateErrors=false + + if [ -z "${valuesInputFile}" ]; then + validationError "You must use the -i option to specify the name of the inputs parameter file (a modified copy of kubernetes/samples/scripts/create-weblogic-domain-pv-pvc/create-pv-pvc-inputs.yaml)." + else + if [ ! -f ${valuesInputFile} ]; then + validationError "Unable to locate the input parameters file ${valuesInputFile}" + fi + fi + + if [ -z "${outputDir}" ]; then + validationError "You must use the -o option to specify the name of an existing directory to store the generated yaml files in." + fi + + domainPVInput="${scriptDir}/pv-template.yaml" + if [ ! -f ${domainPVInput} ]; then + validationError "The template file ${domainPVInput} for generating a persistent volume was not found" + fi + + domainPVCInput="${scriptDir}/pvc-template.yaml" + if [ ! -f ${domainPVCInput} ]; then + validationError "The template file ${domainPVCInput} for generating a persistent volume claim was not found" + fi + + failIfValidationErrors + + # Parse the commonn inputs file + parseCommonInputs + validateInputParamsSpecified \ + weblogicDomainStoragePath \ + weblogicDomainStorageSize \ + baseName \ + namespace \ + version + + export requiredInputsVersion="create-weblogic-sample-domain-pv-pvc-inputs-v1" + validateDomainUid + validateNamespace + validateWeblogicDomainStorageType + validateWeblogicDomainStorageReclaimPolicy + initOutputDir + failIfValidationErrors +} + + +# +# Function to generate the yaml files for creating a domain +# +function createYamlFiles { + + # Create a directory for this domain's output files + mkdir -p ${pvOutputDir} + + # Make sure the output directory has a copy of the inputs file. + # The user can either pre-create the output directory, put the inputs + # file there, and create the domain from it, or the user can put the + # inputs file some place else and let this script create the output directory + # (if needed) and copy the inputs file there. + copyInputsFileToOutputDirectory ${valuesInputFile} "${pvOutputDir}/create-pv-pvc-inputs.yaml" + + enabledPrefix="" # uncomment the feature + disabledPrefix="# " # comment out the feature + + echo Generating ${pvOutput} + + cp ${domainPVInput} ${pvOutput} + if [ "${weblogicDomainStorageType}" == "NFS" ]; then + hostPathPrefix="${disabledPrefix}" + nfsPrefix="${enabledPrefix}" + sed -i -e "s:%SAMPLE_STORAGE_NFS_SERVER%:${weblogicDomainStorageNFSServer}:g" ${pvOutput} + else + hostPathPrefix="${enabledPrefix}" + nfsPrefix="${disabledPrefix}" + fi + + sed -i -e "s:%NAMESPACE%:$namespace:g" ${pvOutput} + if [ -z ${domainUID} ]; then + domainUIDLabelPrefix="${disabledPrefix}" + separator="" + else + domainUIDLabelPrefix="${enabledPrefix}" + separator="-" + fi + sed -i -e "s:%DOMAIN_UID%:$domainUID:g" ${pvOutput} + sed -i -e "s:%SEPARATOR%:$separator:g" ${pvOutput} + sed -i -e "s:%DOMAIN_UID_LABEL_PREFIX%:${domainUIDLabelPrefix}:g" ${pvOutput} + + sed -i -e "s:%BASE_NAME%:$baseName:g" ${pvOutput} + sed -i -e "s:%SAMPLE_STORAGE_PATH%:${weblogicDomainStoragePath}:g" ${pvOutput} + sed -i -e "s:%SAMPLE_STORAGE_RECLAIM_POLICY%:${weblogicDomainStorageReclaimPolicy}:g" ${pvOutput} + sed -i -e "s:%SAMPLE_STORAGE_SIZE%:${weblogicDomainStorageSize}:g" ${pvOutput} + sed -i -e "s:%HOST_PATH_PREFIX%:${hostPathPrefix}:g" ${pvOutput} + sed -i -e "s:%NFS_PREFIX%:${nfsPrefix}:g" ${pvOutput} + + # Generate the yaml to create the persistent volume claim + echo Generating ${pvcOutput} + + cp ${domainPVCInput} ${pvcOutput} + sed -i -e "s:%NAMESPACE%:$namespace:g" ${pvcOutput} + sed -i -e "s:%BASE_NAME%:${baseName}:g" ${pvcOutput} + + sed -i -e "s:%DOMAIN_UID%:$domainUID:g" ${pvcOutput} + sed -i -e "s:%SEPARATOR%:$separator:g" ${pvcOutput} + sed -i -e "s:%DOMAIN_UID_LABEL_PREFIX%:${domainUIDLabelPrefix}:g" ${pvcOutput} + + sed -i -e "s:%SAMPLE_STORAGE_SIZE%:${weblogicDomainStorageSize}:g" ${pvcOutput} + + # Remove any "...yaml-e" files left over from running sed + rm -f ${pvOutputDir}/*.yaml-e +} + +# +# Function to create the domain's persistent volume +# +function createDomainPV { + # Check if the persistent volume is already available + checkPvExists ${persistentVolumeName} + if [ "${PV_EXISTS}" = "false" ]; then + echo Creating the persistent volume ${persistentVolumeName} + kubectl create -f ${pvOutput} + checkPvState ${persistentVolumeName} Available + fi +} + +# +# Function to create the domain's persistent volume claim +# Must be called after createDomainPV since it relies on +# createDomainPV defining persistentVolumeName +# +function createDomainPVC { + # Check if the persistent volume claim is already available + checkPvcExists ${persistentVolumeClaimName} ${namespace} + if [ "${PVC_EXISTS}" = "false" ]; then + echo Creating the persistent volume claim ${persistentVolumeClaimName} + kubectl create -f ${pvcOutput} + checkPvState ${persistentVolumeName} Bound + fi +} + +# +# Function to output to the console a summary of the work completed +# +function printSummary { + echo "The following files were generated:" + echo " ${pvOutput}" + echo " ${pvcOutput}" +} + +# +# Perform the following sequence of steps to create a domain +# + +# Setup the environment for running this script and perform initial validation checks +initialize + +# Generate the yaml files for creating the domain +createYamlFiles + +# All done if the generate only option is true +if [ "${executeIt}" = true ]; then + + # Create the domain's persistent volume + createDomainPV + + # Create the domain's persistent volume claim + createDomainPVC +fi + +# Output a job summary +printSummary + +echo +echo Completed + + diff --git a/OracleIdentityGovernance/kubernetes/create-weblogic-domain-pv-pvc/pv-template.yaml b/OracleIdentityGovernance/kubernetes/create-weblogic-domain-pv-pvc/pv-template.yaml new file mode 100755 index 000000000..49e33a22f --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/create-weblogic-domain-pv-pvc/pv-template.yaml @@ -0,0 +1,21 @@ +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +apiVersion: v1 +kind: PersistentVolume +metadata: + name: %DOMAIN_UID%%SEPARATOR%%BASE_NAME%-pv + labels: + %DOMAIN_UID_LABEL_PREFIX%weblogic.domainUID: %DOMAIN_UID% +spec: + storageClassName: %DOMAIN_UID%%SEPARATOR%%BASE_NAME%-storage-class + capacity: + storage: %SAMPLE_STORAGE_SIZE% + accessModes: + - ReadWriteMany + # Valid values are Retain, Delete or Recycle + persistentVolumeReclaimPolicy: %SAMPLE_STORAGE_RECLAIM_POLICY% + %HOST_PATH_PREFIX%hostPath: + %NFS_PREFIX%nfs: + %NFS_PREFIX%server: %SAMPLE_STORAGE_NFS_SERVER% + path: "%SAMPLE_STORAGE_PATH%" diff --git a/OracleIdentityGovernance/kubernetes/create-weblogic-domain-pv-pvc/pvc-template.yaml b/OracleIdentityGovernance/kubernetes/create-weblogic-domain-pv-pvc/pvc-template.yaml new file mode 100755 index 000000000..49e8d5afb --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/create-weblogic-domain-pv-pvc/pvc-template.yaml @@ -0,0 +1,17 @@ +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: %DOMAIN_UID%%SEPARATOR%%BASE_NAME%-pvc + namespace: %NAMESPACE% + labels: + %DOMAIN_UID_LABEL_PREFIX%weblogic.domainUID: %DOMAIN_UID% +spec: + storageClassName: %DOMAIN_UID%%SEPARATOR%%BASE_NAME%-storage-class + accessModes: + - ReadWriteMany + resources: + requests: + storage: %SAMPLE_STORAGE_SIZE% diff --git a/OracleIdentityGovernance/kubernetes/delete-domain/README.md b/OracleIdentityGovernance/kubernetes/delete-domain/README.md new file mode 100755 index 000000000..800c7094a --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/delete-domain/README.md @@ -0,0 +1,27 @@ +# Deleting domain resources created while executing the samples + +After running the sample, you will need to release domain resources that +can then be used for other purposes. The script in this sample demonstrates one approach to releasing +domain resources. + +## Using the script to delete domain resources + +```shell +$ ./delete-weblogic-domain-resources.sh \ + -d domain-uid[,domain-uid...] \ + [-s max-seconds] \ + [-t] +``` +The required option `-d` takes `domain-uid` values (separated + by commas and no spaces) to identify the domain resources that should be deleted. + +To limit the amount of time spent on attempting to delete domain resources, use `-s`. +The option must be followed by an integer that represents the total number of seconds +that will be spent attempting to delete resources. The default number of seconds is 120. + +The optional option `-t` shows what the script will delete without executing the deletion. + +To see the help associated with the script: +```shell +$ ./delete-weblogic-domain-resources.sh -h +``` diff --git a/OracleIdentityGovernance/kubernetes/delete-domain/delete-weblogic-domain-resources.sh b/OracleIdentityGovernance/kubernetes/delete-domain/delete-weblogic-domain-resources.sh new file mode 100755 index 000000000..fd54d1ea0 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/delete-domain/delete-weblogic-domain-resources.sh @@ -0,0 +1,283 @@ +#!/bin/bash +# Copyright (c) 2019, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# Description: +# Use this script to delete all kubernetes resources associated +# with a set of given domains. Alternatively, run the script +# in a test mode to show what would be deleted without actually +# performing the deletes. +# +# Usage: +# See "function usage" below or call this script with no parameters. +# + +script="${BASH_SOURCE[0]}" + +function usage { +cat << EOF + Usage: + + $(basename $0) -d domain-uid,domain-uid,... [-s max-seconds] [-t] + $(basename $0) -h + + Perform a best-effort delete of the kubernetes resources for + the given domain(s), and retry until either max-seconds is reached + or all resources were deleted (default $default_maxwaitsecs seconds). + + The domains can be specified as a comma-separated list of + domain-uids (no spaces). The domains can be located in any + kubernetes namespace. + + Specify '-t' to run the script in a test mode which will + show kubernetes commands but not actually perform them. + + The script runs in phases: + + Phase 1: Set the serverStartPolicy of each domain to NEVER if + it's not already NEVER. This should cause each + domain's operator to initiate a controlled shutdown + of the domain. Immediately proceed to phase 2. + + Phase 2: Wait up to half of max-seconds for WebLogic + Server pods to exit normally, and then proceed + to phase 3. + + Phase 3: Periodically delete any remaining kubernetes resources + for the specified domains, including any pods + leftover from previous phases. Exit and fail if + max-seconds is exceeded and there are any leftover + kubernetes resources. + + This script exits with a zero status on success, and a + non-zero status on failure. +EOF +} + +# +# getDomainResources domain(s) outfilename +# +# Usage: +# getDomainResources domainA,domainB,... outfilename +# +# Internal helper function +# +# File output is all domain related resources for the given domain uids, one per line, +# in the form: 'kind name [-n namespace]'. For example: +# PersistentVolumeClaim domain1-pv-claim -n default +# PersistentVolume domain1-pv +# +function getDomainResources { + local domain_regex='' + LABEL_SELECTOR="weblogic.domainUID in ($1)" + IFS=',' read -ra UIDS <<< "$1" + for i in "${!UIDS[@]}"; do + if [ $i -gt 0 ]; then + domain_regex="$domain_regex|" + fi + domain_regex="$domain_regex^Domain ${UIDS[$i]} " + done + + # clean the output file + if [ -e $2 ]; then + rm $2 + fi + + # first, let's get all namespaced types with -l $LABEL_SELECTOR + NAMESPACED_TYPES="pod,job,deploy,rs,service,pvc,ingress,cm,serviceaccount,role,rolebinding,secret" + + kubectl get $NAMESPACED_TYPES \ + -l "$LABEL_SELECTOR" \ + -o=jsonpath='{range .items[*]}{.kind}{" "}{.metadata.name}{" -n "}{.metadata.namespace}{"\n"}{end}' \ + --all-namespaces=true >> $2 + + # if domain crd exists, look for domains too: + kubectl get crd domains.weblogic.oracle > /dev/null 2>&1 + if [ $? -eq 0 ]; then + kubectl get domain \ + -o=jsonpath='{range .items[*]}{.kind}{" "}{.metadata.name}{" -n "}{.metadata.namespace}{"\n"}{end}' \ + --all-namespaces=true | egrep "$domain_regex" >> $2 + fi + + # now, get all non-namespaced types with -l $LABEL_SELECTOR + + NOT_NAMESPACED_TYPES="pv,clusterroles,clusterrolebindings" + + kubectl get $NOT_NAMESPACED_TYPES \ + -l "$LABEL_SELECTOR" \ + -o=jsonpath='{range .items[*]}{.kind}{" "}{.metadata.name}{"\n"}{end}' \ + --all-namespaces=true >> $2 +} + +# +# deleteDomains domain(s) maxwaitsecs +# +# Usage: +# deleteDomains domainA,domainB,... maxwaitsecs +# +# Internal helper function +# This function first sets the serverStartPolicy of each Domain to NEVER +# and waits up to half of $2 for pods to 'self delete'. It then performs +# a helm delete on $1, and finally it directly deletes +# any remaining k8s resources for domain $1 (including any remaining pods) +# and retries these direct deletes up to $2 seconds. +# +# If global $test_mode is true, it shows candidate actions but doesn't +# actually perform them +# +function deleteDomains { + + if [ "$test_mode" = "true" ]; then + echo @@ Test mode! Displaying commands for deleting kubernetes resources with label weblogic.domainUID \'$1\' without actually deleting them. + else + echo @@ Deleting kubernetes resources with label weblogic.domainUID \'$1\'. + fi + + local maxwaitsecs=${2:-$default_maxwaitsecs} + local tempfile="/tmp/$(basename $0).tmp.$$" # == /tmp/[script-file-name].tmp.[pid] + local mstart=`date +%s` + local phase=1 + + while : ; do + # get all k8s resources with matching domain-uid labels and put them in $tempfile + getDomainResources $1 $tempfile + + # get a count of all k8s resources with matching domain-uid labels + local allcount=`wc -l $tempfile | awk '{ print $1 }'` + + # get a count of all WLS pods (any pod with a matching domain-uid label that doesn't have 'traefik' or 'apache' embedded in its name) + local podcount=`grep "^Pod" $tempfile | grep -v traefik | grep -v apache | wc -l | awk '{ print $1 }'` + + local mnow=`date +%s` + + echo @@ $allcount resources remaining after $((mnow - mstart)) seconds, including $podcount WebLogic Server pods. Max wait is $maxwaitsecs seconds. + + # Exit if all k8s resources deleted or max wait seconds exceeded. + + if [ $allcount -eq 0 ]; then + echo @@ Success. + rm -f $tempfile + exit 0 + elif [ $((mnow - mstart)) -gt $maxwaitsecs ]; then + echo @@ Error! Max wait of $maxwaitsecs seconds exceeded with $allcount resources remaining, including $podcount WebLogic Server pods. Giving up. Remaining resources: + cat $tempfile + rm -f $tempfile + exit $allcount + fi + + # In phase 1, set the serverStartPolicy of each domain to NEVER and then immediately + # proceed to phase 2. If there are no domains or WLS pods, we also immediately go to phase 2. + + if [ $phase -eq 1 ]; then + phase=2 + if [ $podcount -gt 0 ]; then + echo @@ "Setting serverStartPolicy to NEVER on each domain (this should cause operator(s) to initiate a controlled shutdown of the domain's pods.)" + cat $tempfile | grep "^Domain" | while read line; do + local name="`echo $line | awk '{ print $2 }'`" + local namespace="`echo $line | awk '{ print $4 }'`" + if [ "$test_mode" = "true" ]; then + echo "kubectl patch domain $name -n $namespace -p '{\"spec\":{\"serverStartPolicy\":\"NEVER\"}}' --type merge" + else + kubectl patch domain $name -n $namespace -p '{"spec":{"serverStartPolicy":"NEVER"}}' --type merge + fi + done + fi + fi + + # In phase 2, wait for the WLS pod count to go down to 0 for at most half + # of 'maxwaitsecs'. Otherwise proceed immediately to phase 3. + + if [ $phase -eq 2 ]; then + if [ $podcount -eq 0 ]; then + echo @@ All pods shutdown, about to directly delete remaining resources. + phase=3 + elif [ $((mnow - mstart)) -gt $((maxwaitsecs / 2)) ]; then + echo @@ Warning! $podcount WebLogic Server pods remaining but wait time exceeds half of max wait seconds. About to directly delete all remaining resources, including the leftover pods. + phase=3 + else + echo @@ "Waiting for operator to shutdown pods (will wait for no more than half of max wait seconds before directly deleting them)." + sleep 3 + continue + fi + fi + + # In phase 3, directly delete remaining k8s resources for the given domainUids + # (including any leftover WLS pods from previous phases). + + # for each namespace with leftover resources, try delete them + cat $tempfile | awk '{ print $4 }' | grep -v "^$" | sort -u | while read line; do + if [ "$test_mode" = "true" ]; then + echo kubectl -n $line delete $NAMESPACED_TYPES -l "$LABEL_SELECTOR" + else + kubectl -n $line delete $NAMESPACED_TYPES -l "$LABEL_SELECTOR" + fi + done + + # if there are any non-namespaced types left, try delete them + local no_namespace_count=`grep -c -v " -n " $tempfile` + if [ ! "$no_namespace_count" = "0" ]; then + if [ "$test_mode" = "true" ]; then + echo kubectl delete $NOT_NAMESPACED_TYPES -l "$LABEL_SELECTOR" + else + kubectl delete $NOT_NAMESPACED_TYPES -l "$LABEL_SELECTOR" + fi + fi + + # Delete domains, if any + cat $tempfile | grep "^Domain " | while read line; do + if [ "$test_mode" = "true" ]; then + echo kubectl delete $line + else + kubectl delete $line + fi + done + + sleep 3 + done +} + +# main entry point + +# default when to stop retrying (override via command line) +default_maxwaitsecs=120 + +# optional test mode that lists what would be deleted without +# actually deleting (override via command line) +test_mode=false + +domains="" + +# parse command line options +while getopts ":d:s:th" opt; do + case $opt in + d) domains="${OPTARG}" + ;; + + s) maxwaitsecs="${OPTARG}" + ;; + + t) test_mode="true" + ;; + + h) usage + exit 0 + ;; + + *) usage + exit 9999 + ;; + esac +done + +if [ "$domains" = "" ]; then + usage + exit 9999 +fi + +if [ ! -x "$(command -v kubectl)" ]; then + echo "@@ Error! kubectl is not installed." + exit 9999 +fi + +deleteDomains "${domains}" "${maxwaitsecs:-$default_maxwaitsecs}" + diff --git a/OracleIdentityGovernance/kubernetes/3.0.1/design-console-ingress/Chart.yaml b/OracleIdentityGovernance/kubernetes/design-console-ingress/Chart.yaml similarity index 100% rename from OracleIdentityGovernance/kubernetes/3.0.1/design-console-ingress/Chart.yaml rename to OracleIdentityGovernance/kubernetes/design-console-ingress/Chart.yaml diff --git a/OracleIdentityGovernance/kubernetes/3.0.1/design-console-ingress/README.md b/OracleIdentityGovernance/kubernetes/design-console-ingress/README.md similarity index 100% rename from OracleIdentityGovernance/kubernetes/3.0.1/design-console-ingress/README.md rename to OracleIdentityGovernance/kubernetes/design-console-ingress/README.md diff --git a/OracleIdentityGovernance/kubernetes/design-console-ingress/templates/nginx-ingress-k8s1.19.yaml b/OracleIdentityGovernance/kubernetes/design-console-ingress/templates/nginx-ingress-k8s1.19.yaml new file mode 100644 index 000000000..b1dfb123e --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/design-console-ingress/templates/nginx-ingress-k8s1.19.yaml @@ -0,0 +1,38 @@ +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +{{- if eq .Values.type "NGINX" }} +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ .Values.wlsDomain.domainUID }}-nginx-designconsole + namespace: {{ .Release.Namespace }} + labels: + weblogic.resourceVersion: domain-v2 + annotations: + nginx.ingress.kubernetes.io/affinity: 'cookie' + nginx.ingress.kubernetes.io/enable-access-log: 'false' + kubernetes.io/ingress.class: 'nginx' +{{- if eq .Values.tls "SSL" }} + nginx.ingress.kubernetes.io/proxy-buffer-size: '2000k' + nginx.ingress.kubernetes.io/configuration-snippet: | + more_set_input_headers "X-Forwarded-Proto: https"; + more_set_input_headers "WL-Proxy-SSL: true"; + nginx.ingress.kubernetes.io/ingress.allow-http: 'false' +{{- end }} +spec: + rules: + - http: + paths: + - path: + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimServerT3Port }} +{{- end }} +{{- end }} + diff --git a/OracleIdentityGovernance/kubernetes/3.0.1/design-console-ingress/templates/nginx-ingress.yaml b/OracleIdentityGovernance/kubernetes/design-console-ingress/templates/nginx-ingress.yaml similarity index 85% rename from OracleIdentityGovernance/kubernetes/3.0.1/design-console-ingress/templates/nginx-ingress.yaml rename to OracleIdentityGovernance/kubernetes/design-console-ingress/templates/nginx-ingress.yaml index 9fedc4352..9849974e5 100644 --- a/OracleIdentityGovernance/kubernetes/3.0.1/design-console-ingress/templates/nginx-ingress.yaml +++ b/OracleIdentityGovernance/kubernetes/design-console-ingress/templates/nginx-ingress.yaml @@ -1,6 +1,7 @@ -# Copyright (c) 2020, Oracle Corporation and/or its affiliates. +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +{{- if semverCompare "<1.19-0" .Capabilities.KubeVersion.GitVersion -}} {{- if eq .Values.type "NGINX" }} --- apiVersion: extensions/v1beta1 @@ -13,7 +14,7 @@ metadata: annotations: nginx.ingress.kubernetes.io/affinity: "cookie" nginx.ingress.kubernetes.io/enable-access-log: "false" - kubernetes.io/ingress.class: nginx-designconsole + kubernetes.io/ingress.class: 'nginx' {{- if eq .Values.tls "SSL" }} nginx.ingress.kubernetes.io/proxy-buffer-size: "2000k" nginx.ingress.kubernetes.io/configuration-snippet: | @@ -30,3 +31,4 @@ spec: serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' servicePort: {{ .Values.wlsDomain.oimServerT3Port }} {{- end }} +{{- end }} diff --git a/OracleIdentityGovernance/kubernetes/3.0.1/design-console-ingress/values.yaml b/OracleIdentityGovernance/kubernetes/design-console-ingress/values.yaml similarity index 66% rename from OracleIdentityGovernance/kubernetes/3.0.1/design-console-ingress/values.yaml rename to OracleIdentityGovernance/kubernetes/design-console-ingress/values.yaml index 04e42786b..8a81cd17d 100644 --- a/OracleIdentityGovernance/kubernetes/3.0.1/design-console-ingress/values.yaml +++ b/OracleIdentityGovernance/kubernetes/design-console-ingress/values.yaml @@ -1,14 +1,13 @@ -# Copyright (c) 2020, Oracle Corporation and/or its affiliates. +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. # Default values for design-console-ingress. # This is a YAML-formatted file. # Declare variables to be passed into your templates. -# Load balancer type. Supported values are: VOYAGER, NGINX -type: VOYAGER +# Load balancer type. Supported values are: NGINX +type: NGINX # Type of Configuration Supported Values are : NONSSL,SSL -# tls: NONSSL tls: NONSSL # TLS secret name if the mode is SSL secretName: dc-tls-cert @@ -20,9 +19,3 @@ wlsDomain: oimClusterName: oim_cluster oimServerT3Port: 14002 -# Voyager specific values -voyager: - # web port - webPort: 30320 - # stats port - statsPort: 30321 diff --git a/OracleIdentityGovernance/kubernetes/domain-lifecycle/README.md b/OracleIdentityGovernance/kubernetes/domain-lifecycle/README.md new file mode 100755 index 000000000..0eeab95d5 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/domain-lifecycle/README.md @@ -0,0 +1,186 @@ +### Domain life cycle sample scripts + +The operator provides sample scripts to start up or shut down a specific Managed Server or cluster in a deployed domain, or the entire deployed domain. + +**Note**: Prior to running these scripts, you must have previously created and deployed the domain. These scripts make use of [jq](https://stedolan.github.io/jq/) for processing JSON. You must have `jq 1.5 or higher` installed in order to run these scripts. See the installation options on the [jq downlod](https://stedolan.github.io/jq/download/) page. + +These scripts can be helpful when scripting the life cycle of a WebLogic Server domain. For information on how to start, stop, restart, and scale WebLogic Server instances in your domain, see [Domain Life Cycle](https://oracle.github.io/weblogic-kubernetes-operator/userguide/managing-domains/domain-lifecycle). + +#### Scripts to start and stop a WebLogic Server +The `startServer.sh` script starts a WebLogic Server in a domain. For clustered Managed Servers, either it increases the `spec.clusters[].replicas` value for the Managed Server's cluster by `1` or updates the `spec.managedServers[].serverStartPolicy` attribute of the domain resource or both as necessary. For the Administration Server, it updates the value of the `spec.adminServer.serverStartPolicy` attribute of the domain resource. For non-clustered Managed Servers, it updates the `spec.managedServers[].serverStartPolicy` attribute of the domain resource. The script provides an option to keep the `spec.clusters[].replicas` value constant for clustered servers. See the script `usage` information by using the `-h` option. + +Use the following command to start the server either by increasing the replica count or by updating the server start policy: +``` +$ startServer.sh -d domain1 -n weblogic-domain-1 -s managed-server1 +[INFO] Updating replica count for cluster 'cluster-1' to 1. +domain.weblogic.oracle/domain1 patched +[INFO] Successfully updated replica count for cluster 'cluster-1' to 1. +``` + +Use the following command to start the server without increasing the replica count: +``` +$ startServer.sh -d domain1 -n weblogic-domain-1 -s managed-server2 -k +[INFO] Patching start policy for 'managed-server2' to 'ALWAYS'. +domain.weblogic.oracle/domain1 patched +[INFO] Successfully patched server 'managed-server2' with 'ALWAYS' start policy. +``` + +The `stopServer.sh` script shuts down a running WebLogic Server in a domain. For clustered Managed Servers, either it decreases the `spec.clusters[].replicas` value for the Managed Server's cluster by `1` or updates the `spec.managedServers[].serverStartPolicy` attribute of the domain resource or both as necessary. For the Administration Server, it updates the value of the `spec.adminServer.serverStartPolicy` attribute of the domain resource. For non-clustered Managed Servers, it updates the `spec.managedServers[].serverStartPolicy` attribute of the domain resource. The script provides an option to keep the `spec.clusters[].replicas` value constant for clustered servers. See the script `usage` information by using the `-h` option. + +Use the following command to stop the server either by decreasing the replica count or by updating the server start policy: +``` +$ stopServer.sh -d domain1 -n weblogic-domain-1 -s managed-server1 +[INFO] Updating replica count for cluster cluster-1 to 0. +domain.weblogic.oracle/domain1 patched +[INFO] Successfully updated replica count for cluster 'cluster-1' to 0. +``` + +Use the following command to stop the server without decreasing the replica count: +``` +$ stopServer.sh -d domain1 -n weblogic-domain-1 -s managed-server2 -k +[INFO] Unsetting the current start policy 'ALWAYS' for 'managed-server2'. +domain.weblogic.oracle/domain1 patched +[INFO] Successfully unset policy 'ALWAYS'. +``` + +### Scripts to start and stop a cluster + +The `startCluster.sh` script starts a cluster by patching the `spec.clusters[].serverStartPolicy` attribute of the domain resource to `IF_NEEDED`. The operator will start the WebLogic Server instance Pods that are part of the cluster after the `serverStartPolicy` attribute is updated to `IF_NEEDED`. See the script `usage` information by using the `-h` option. +``` +$ startCluster.sh -d domain1 -n weblogic-domain-1 -c cluster-1 +[INFO]Patching start policy of cluster 'cluster-1' from 'NEVER' to 'IF_NEEDED'. +domain.weblogic.oracle/domain1 patched +[INFO] Successfully patched cluster 'cluster-1' with 'IF_NEEDED' start policy!. +``` +The `stopCluster.sh` script shuts down a cluster by patching the `spec.clusters[].serverStartPolicy` attribute of the domain resource to `NEVER`. The operator will shut down the WebLogic Server instance Pods that are part of the cluster after the `serverStartPolicy` attribute is updated to `NEVER`. See the script `usage` information by using the `-h` option. +``` +$ stopCluster.sh -d domain1 -n weblogic-domain-1 -c cluster-1 +[INFO] Patching start policy of cluster 'cluster-1' from 'IF_NEEDED' to 'NEVER'. +domain.weblogic.oracle/domain1 patched +[INFO] Successfully patched cluster 'cluster-1' with 'NEVER' start policy! +``` +### Scripts to start and stop a domain +The `startDomain.sh` script starts a deployed domain by patching the `spec.serverStartPolicy` attribute of the domain resource to `IF_NEEDED`. The operator will start the WebLogic Server instance Pods that are part of the domain after the `spec.serverStartPolicy` attribute of the domain resource is updated to `IF_NEEDED`. See the script `usage` information by using the `-h` option. +``` +$ startDomain.sh -d domain1 -n weblogic-domain-1 +[INFO] Patching domain 'domain1' from serverStartPolicy='NEVER' to 'IF_NEEDED'. +domain.weblogic.oracle/domain1 patched +[INFO] Successfully patched domain 'domain1' in namespace 'weblogic-domain-1' with 'IF_NEEDED' start policy! +``` + +The `stopDomain.sh` script shuts down a domain by patching the `spec.serverStartPolicy` attribute of the domain resource to `NEVER`. The operator will shut down the WebLogic Server instance Pods that are part of the domain after the `spec.serverStartPolicy` attribute is updated to `NEVER`. See the script `usage` information by using the `-h` option. +``` +$ stopDomain.sh -d domain1 -n weblogic-domain-1 +[INFO] Patching domain 'domain1' in namespace 'weblogic-domain-1' from serverStartPolicy='IF_NEEDED' to 'NEVER'. +domain.weblogic.oracle/domain1 patched +[INFO] Successfully patched domain 'domain1' in namespace 'weblogic-domain-1' with 'NEVER' start policy! +``` + +### Script to scale a WebLogic cluster + +The `scaleCluster.sh` script scales a WebLogic cluster by patching the `spec.clusters[].replicas` attribute of the domain resource to the specified value. The operator will perform the scaling operation for the WebLogic cluster based on the specified value of the `replicas` attribute after its value is updated. See the script `usage` information by using the `-h` option. +``` +$ scaleCluster.sh -d domain1 -n weblogic-domain-1 -c cluster-1 -r 3 +[2021-02-26T19:04:14.335000Z][INFO] Patching replicas for cluster 'cluster-1' to '3'. +domain.weblogic.oracle/domain1 patched +[2021-02-26T19:04:14.466000Z][INFO] Successfully patched replicas for cluster 'cluster-1'! +``` + +### Script to view the status of a WebLogic cluster + +The `clusterStatus.sh` script can be used to view the status of a WebLogic cluster in the WebLogic domain managed by the operator. The WebLogic Cluster Status contains information about the minimum, maximum, goal, current, and ready replica count for a WebLogic cluster. This script displays a table containing the status for WebLogic clusters in one or more domains across one or more namespaces. See the script `usage` information by using the `-h` option. + +Use the following command to view the status of all WebLogic clusters in all domains across all namespaces. +```shell +$ clusterStatus.sh + +WebLogic Cluster Status -n "" -d "" -c "": + +namespace domain cluster min max goal current ready +--------- ------ ------- --- --- ---- ------- ----- +ns-kvmt mii-domain1 cluster-1 1 5 5 5 5 +weblogic-domain-1 domain1 cluster-1 0 4 2 2 2 +weblogic-domain-1 domain1 cluster-2 0 4 0 0 0 +``` + +Use the following command to view the status of all WebLogic clusters in 'domain1' in 'weblogic-domain-1' namespace. +``` +$ clusterStatus.sh -d domain1 -n weblogic-domain-1 + +WebLogic Cluster Status -n "weblogic-domain-1" -d "domain1" -c "": + +namespace domain cluster min max goal current ready +--------- ------ ------- --- --- ---- ------- ----- +weblogic-domain-1 domain1 cluster-1 0 4 2 2 2 +weblogic-domain-1 domain1 cluster-2 0 4 0 0 0 +``` + +### Scripts to initiate a rolling restart of a WebLogic domain or cluster + +The `rollDomain.sh` script can be used to initiate a rolling restart of the WebLogic Server Pods in a domain managed by the operator. Similarly, the `rollCluster.sh` script can be used to initiate a rolling restart of the WebLogic Server Pods belonging to a WebLogic cluster in a domain managed by the operator. + +The `rollDomain.sh` script updates the value of the `spec.restartVersion` attribute of the domain resource. Then, the operator will do a rolling restart of the Server Pods in the WebLogic domain after the value of the `spec.restartVersion` is updated. You can provide the new value for `spec.restartVersion` as a parameter to the script or the script will automatically generate a new value to trigger the rolling restart. See the script `usage` information by using the `-h` option. + +``` +$ rollDomain.sh -d domain1 -n weblogic-domain-1 +[2021-03-24T04:01:19.733000Z][INFO] Patching restartVersion for domain 'domain1' to '1'. +domain.weblogic.oracle/domain1 patched +[2021-03-24T04:01:19.850000Z][INFO] Successfully patched restartVersion for domain 'domain1'! +``` + +Use the following command to roll the Server Pods in a WebLogic domain with a specific `restartVersion`: +``` +$ rollDomain.sh -r v1 -d domain1 -n weblogic-domain-1 +[2021-03-24T13:43:47.586000Z][INFO] Patching restartVersion for domain 'domain1' to 'v1'. +domain.weblogic.oracle/domain1 patched +[2021-03-24T13:43:47.708000Z][INFO] Successfully patched restartVersion for domain 'domain1'! +``` + +The `rollCluster.sh` script updates the value of the `spec.clusters[].restartVersion` attribute of the domain resource. Then, the operator will do a rolling restart of the WebLogic cluster Server Pods after the value of the `spec.clusters[].restartVersion` is updated. You can provide the new value of the `restartVersion` as a parameter to the script or the script will automatically generate a new value to trigger the rolling restart. See the script `usage` information by using the `-h` option. + +``` +$ rollCluster.sh -c cluster-1 -d domain1 -n weblogic-domain-1 +[2021-03-24T04:03:27.521000Z][INFO] Patching restartVersion for cluster 'cluster-1' to '2'. +domain.weblogic.oracle/domain1 patched +[2021-03-24T04:03:27.669000Z][INFO] Successfully patched restartVersion for cluster 'cluster-1'! +``` + +Use the following command to roll the WebLogic Cluster Servers with a specific `restartVersion`: +``` +$ rollCluster.sh -r v2 -c cluster-1 -d domain1 -n weblogic-domain-1 +[2021-03-24T13:46:16.833000Z][INFO] Patching restartVersion for cluster 'cluster-1' to 'v2'. +domain.weblogic.oracle/domain1 patched +[2021-03-24T13:46:16.975000Z][INFO] Successfully patched restartVersion for cluster 'cluster-1'! +``` + +### Scripts to restart a WebLogic Server in a domain +The `restartServer.sh` script can be used to restart a WebLogic Server in a domain. This script restarts the Server by deleting the Server Pod for the WebLogic Server instance. +``` +$ restartServer.sh -s managed-server1 -d domain1 -n weblogic-domain-1 +[2021-03-24T22:20:22.498000Z][INFO] Initiating restart of 'managed-server1' by deleting server pod 'domain1-managed-server1'. +[2021-03-24T22:20:37.614000Z][INFO] Server restart succeeded ! +``` + +### Scripts to explicitly initiate introspection of a WebLogic domain + +The `introspectDomain.sh` script can be used to rerun a WebLogic domain's introspect job by explicitly initiating the introspection. This script updates the value of the `spec.introspectVersion` attribute of the domain resource. The resulting behavior depends on your domain home source type and other factors, see [Initiating introspection](https://oracle.github.io/weblogic-kubernetes-operator/userguide/managing-domains/domain-lifecycle/introspection/#initiating-introspection) for details. You can provide the new value of the `introspectVersion` as a parameter to the script or the script will automatically generate a new value to trigger the introspection. See the script `usage` information by using the `-h` option. + +Use the following command to rerun a domain's introspect job with the `introspectVersion` value generated by the script. +``` +$ introspectDomain.sh -d domain1 -n weblogic-domain-1 +[2021-03-24T21:37:55.989000Z][INFO] Patching introspectVersion for domain 'domain1' to '1'. +domain.weblogic.oracle/domain1 patched +[2021-03-24T21:37:56.110000Z][INFO] Successfully patched introspectVersion for domain 'domain1'! +``` + +Use the following command to rerun a domain's introspect job with a specific `introspectVersion` value. +``` +$ introspectDomain.sh -i v1 -d domain1 -n weblogic-domain-1 +[2021-03-24T21:38:34.369000Z][INFO] Patching introspectVersion for domain 'domain1' to 'v1'. +domain.weblogic.oracle/domain1 patched +[2021-03-24T21:38:34.488000Z][INFO] Successfully patched introspectVersion for domain 'domain1'! +``` + +### Watching the Pods after executing life cycle scripts + +After executing the lifecycle scripts described above for a domain or a cluster or a Server, you can manually run the `kubectl -n MYNS get pods --watch=true --show-labels` command to watch the effect of running the scripts and monitor the status and labels of various Pods. You will need to do 'Ctrl-C' to stop watching the Pods and exit. diff --git a/OracleIdentityGovernance/kubernetes/domain-lifecycle/clusterStatus.sh b/OracleIdentityGovernance/kubernetes/domain-lifecycle/clusterStatus.sh new file mode 100755 index 000000000..8bfeb45f3 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/domain-lifecycle/clusterStatus.sh @@ -0,0 +1,130 @@ +# !/bin/sh +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +set -eu +set -o pipefail + +function usage() { +cat< ${kubernetesCli} patch domain ${domainUid} \ + -n ${domainNamespace} --type=merge --patch \"${patchJson}\"" + fi + ${kubernetesCli} patch domain ${domainUid} -n ${domainNamespace} --type=merge --patch "${patchJson}" +} + +# timestamp +# purpose: echo timestamp in the form yyyy-mm-ddThh:mm:ss.nnnnnnZ +# example: 2018-10-01T14:00:00.000001Z +function timestamp() { + local timestamp="`date --utc '+%Y-%m-%dT%H:%M:%S.%NZ' 2>&1`" + if [ ! "${timestamp/illegal/xyz}" = "${timestamp}" ]; then + # old shell versions don't support %N or --utc + timestamp="`date -u '+%Y-%m-%dT%H:%M:%S.000000Z' 2>&1`" + fi + echo "${timestamp}" +} + +# +# Function to note that a validate error has occurred +# +function validationError { + printError $* + validateErrors=true +} + +# +# Function to cause the script to fail if there were any validation errors +# +function failIfValidationErrors { + if [ "$validateErrors" = true ]; then + printError 'The errors listed above must be resolved before the script can continue. Please see usage information below.' + usage 1 + fi +} + +# +# Function to lowercase a value and make it a legal DNS1123 name +# $1 - value to convert to DNS legal name +# $2 - return value containing DNS legal name. +function toDNS1123Legal { + local name=$1 + local __result=$2 + local val=`echo "${name}" | tr "[:upper:]" "[:lower:]"` + val=${val//"_"/"-"} + eval $__result="'$val'" +} + diff --git a/OracleIdentityGovernance/kubernetes/domain-lifecycle/introspectDomain.sh b/OracleIdentityGovernance/kubernetes/domain-lifecycle/introspectDomain.sh new file mode 100755 index 000000000..120eccec7 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/domain-lifecycle/introspectDomain.sh @@ -0,0 +1,105 @@ +# !/bin/sh +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# + +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" +source ${scriptDir}/helper.sh +if [ "${debug}" == "true" ]; then set -x; fi; + +function usage() { + + cat << EOF + + This script initiates introspection of a WebLogic domain by updating + the value of 'spec.introspectVersion' attribute of the domain resource. + + Usage: + + $(basename $0) [-n mynamespace] [-d mydomainuid] [-i introspectVersion] [-m kubecli] + + -d : Domain unique-id. Default is 'sample-domain1'. + + -n : Domain namespace. Default is 'sample-domain1-ns'. + + -i : Introspect version. If this parameter is not provided, + then the script will generate the 'introspectVersion' by + incrementing the existing value. If the 'spec.introspectVersion' + doesn't exist or its value is non-numeric, then the script + will set the 'spec.introspectVersion' value to '1'. + + -m : Kubernetes command line interface. Default is 'kubectl' + if KUBERNETES_CLI env variable is not set. Otherwise + the default is the value of KUBERNETES_CLI env variable. + + -v : Enables verbose mode. Default is 'false'. + + -h : This help. + +EOF +exit $1 +} + +kubernetesCli=${KUBERNETES_CLI:-kubectl} +clusterName="" +domainUid="sample-domain1" +domainNamespace="sample-domain1-ns" +verboseMode=false +patchJson="" +introspectVersion="" + +while getopts "vc:n:m:d:i:h" opt; do + case $opt in + n) domainNamespace="${OPTARG}" + ;; + d) domainUid="${OPTARG}" + ;; + i) introspectVersion="${OPTARG}" + ;; + m) kubernetesCli="${OPTARG}" + ;; + v) verboseMode=true; + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +set -eu + +# +# Function to perform validations, read files and initialize workspace +# +function initialize { + + validateErrors=false + + validateKubernetesCliAvailable + validateJqAvailable + + failIfValidationErrors +} + +initialize + +# Get the domain in json format +domainJson=$(${kubernetesCli} get domain ${domainUid} -n ${domainNamespace} -o json --ignore-not-found) +if [ -z "${domainJson}" ]; then + printError "Unable to get domain resource for domain '${domainUid}' in namespace '${domainNamespace}'. Please make sure the 'domain_uid' and 'namespace' specified by the '-d' and '-n' arguments are correct. Exiting." + exit 1 +fi + +# if the introspectVersion is not provided, generate the value of introspectVersion +if [ -z "${introspectVersion}" ]; then + generateDomainIntrospectVersion "${domainJson}" introspectVersion +fi + +printInfo "Patching introspectVersion for domain '${domainUid}' to '${introspectVersion}'." +createPatchJsonToUpdateDomainIntrospectVersion "${introspectVersion}" patchJson + +executePatchCommand "${kubernetesCli}" "${domainUid}" "${domainNamespace}" "${patchJson}" "${verboseMode}" + +printInfo "Successfully patched introspectVersion for domain '${domainUid}'!" diff --git a/OracleIdentityGovernance/kubernetes/domain-lifecycle/restartServer.sh b/OracleIdentityGovernance/kubernetes/domain-lifecycle/restartServer.sh new file mode 100755 index 000000000..0c7405aba --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/domain-lifecycle/restartServer.sh @@ -0,0 +1,106 @@ +# !/bin/sh +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# + +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" +source ${scriptDir}/helper.sh +if [ "${debug}" == "true" ]; then set -x; fi; +set -eu + +function usage() { + + cat << EOF + + This script restarts a running WebLogic server in a domain by deleting the server pod. + + Usage: + + $(basename $0) -s myserver [-n mynamespace] [-d mydomainuid] [-m kubecli] + + -s : The WebLogic server name (not the pod name). + This parameter is required. + + -d : Domain unique-id. Default is 'sample-domain1'. + + -n : Domain namespace. Default is 'sample-domain1-ns'. + + -m : Kubernetes command line interface. Default is 'kubectl' + if KUBERNETES_CLI env variable is not set. Otherwise the + default is the value of KUBERNETES_CLI env variable. + + -h : This help. + +EOF +exit $1 +} + +kubernetesCli=${KUBERNETES_CLI:-kubectl} +serverName="" +clusterName="" +domainUid="sample-domain1" +domainNamespace="sample-domain1-ns" +podName="" +legalDNSPodName="" + +while getopts "s:m:n:d:h" opt; do + case $opt in + s) serverName="${OPTARG}" + ;; + n) domainNamespace="${OPTARG}" + ;; + m) kubernetesCli="${OPTARG}" + ;; + d) domainUid="${OPTARG}" + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +# +# Function to perform validations, read files and initialize workspace +# +function initialize { + + validateErrors=false + + validateKubernetesCliAvailable + validateJqAvailable + + # Validate that server name parameter is specified. + if [ -z "${serverName}" ]; then + validationError "Please specify the server name using '-s' parameter e.g. '-s managed-server1'." + fi + + failIfValidationErrors +} + +initialize + +# Get the domain in json format +domainJson=$(${kubernetesCli} get domain ${domainUid} -n ${domainNamespace} -o json --ignore-not-found) +if [ -z "${domainJson}" ]; then + printError "Unable to get domain resource for domain '${domainUid}' in namespace '${domainNamespace}'. Please make sure the 'domain_uid' and 'namespace' specified by the '-d' and '-n' arguments are correct. Exiting." + exit 1 +fi + +# Validate that specified server is either part of a cluster or is an independent managed server +validateServerAndFindCluster "${domainUid}" "${domainNamespace}" "${serverName}" isValidServer clusterName isAdminServer +if [ "${isValidServer}" != 'true' ]; then + printError "Server ${serverName} is not part of any cluster and it's not an independent managed server. Please make sure that server name specified is correct." + exit 1 +fi + +podName=${domainUid}-${serverName} +toDNS1123Legal ${podName} legalDNSPodName +printInfo "Initiating restart of '${serverName}' by deleting server pod '${legalDNSPodName}'." +result=$(${kubernetesCli} -n ${domainNamespace} delete pod ${legalDNSPodName} --ignore-not-found) +if [ -z "${result}" ]; then + printError "Server '${serverName}' is not running." +else + printInfo "Server restart succeeded !" +fi diff --git a/OracleIdentityGovernance/kubernetes/domain-lifecycle/rollCluster.sh b/OracleIdentityGovernance/kubernetes/domain-lifecycle/rollCluster.sh new file mode 100755 index 000000000..858e41706 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/domain-lifecycle/rollCluster.sh @@ -0,0 +1,123 @@ +# !/bin/sh +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# + +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" +source ${scriptDir}/helper.sh +if [ "${debug}" == "true" ]; then set -x; fi; + +function usage() { + + cat << EOF + + This script initiates a rolling restart of the WebLogic cluster server pods in a domain by updating + the value of the 'spec.clusters[].restartVersion' attribute of the domain resource. + + Usage: + + $(basename $0) -c mycluster [-n mynamespace] [-d mydomainuid] [-r restartVersion] [-m kubecli] + + -c : Cluster name (required parameter). + + -d : Domain unique-id. Default is 'sample-domain1'. + + -n : Domain namespace. Default is 'sample-domain1-ns'. + + -r : Restart version. If this parameter is not provided, + then the script will generate the 'restartVersion' + value of the cluster by incrementing the existing + value. If the 'restartVersion' value doesn't exist + for the cluster then it will use the incremented value of + domain 'restartVersion'. If the domain 'restartVersion' also + doesn't exist or effective value is non-numeric, then + the script will set the 'restartVersion' value to '1'. + + -m : Kubernetes command line interface. Default is 'kubectl' + if KUBERNETES_CLI env variable is not set. Otherwise + the default is the value of the KUBERNETES_CLI env variable. + + -v : Enables verbose mode. Default is 'false'. + + -h : This help. + +EOF +exit $1 +} + +kubernetesCli=${KUBERNETES_CLI:-kubectl} +clusterName="" +domainUid="sample-domain1" +domainNamespace="sample-domain1-ns" +verboseMode=false +patchJson="" +restartVersion="" + +while getopts "vc:n:m:d:r:h" opt; do + case $opt in + c) clusterName="${OPTARG}" + ;; + n) domainNamespace="${OPTARG}" + ;; + d) domainUid="${OPTARG}" + ;; + r) restartVersion="${OPTARG}" + ;; + m) kubernetesCli="${OPTARG}" + ;; + v) verboseMode=true; + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +set -eu + +# +# Function to perform validations, read files and initialize workspace +# +function initialize { + + validateErrors=false + + validateKubernetesCliAvailable + validateJqAvailable + + if [ -z "${clusterName}" ]; then + validationError "Please specify cluster name using '-c' parameter e.g. '-c cluster-1'." + fi + + failIfValidationErrors +} + +initialize + +# Get the domain in json format +domainJson=$(${kubernetesCli} get domain ${domainUid} -n ${domainNamespace} -o json --ignore-not-found) +if [ -z "${domainJson}" ]; then + printError "Unable to get domain resource for domain '${domainUid}' in namespace '${domainNamespace}'. Please make sure the 'domain_uid' and 'namespace' specified by the '-d' and '-n' arguments are correct. Exiting." + exit 1 +fi + +isValidCluster="" +validateClusterName "${domainUid}" "${domainNamespace}" "${clusterName}" isValidCluster +if [ "${isValidCluster}" != 'true' ]; then + printError "cluster ${clusterName} is not part of domain ${domainUid} in namespace ${domainNamespace}. Please make sure that cluster name is correct." + exit 1 +fi + +# if the restartVersion is not provided, generate the value of restartVersion +if [ -z "${restartVersion}" ]; then + generateClusterRestartVersion "${domainJson}" "${clusterName}" restartVersion +fi + +printInfo "Patching restartVersion for cluster '${clusterName}' to '${restartVersion}'." +createPatchJsonToUpdateClusterRestartVersion "${domainJson}" "${clusterName}" "${restartVersion}" patchJson + +executePatchCommand "${kubernetesCli}" "${domainUid}" "${domainNamespace}" "${patchJson}" "${verboseMode}" + +printInfo "Successfully patched restartVersion for cluster '${clusterName}'!" diff --git a/OracleIdentityGovernance/kubernetes/domain-lifecycle/rollDomain.sh b/OracleIdentityGovernance/kubernetes/domain-lifecycle/rollDomain.sh new file mode 100755 index 000000000..4c821d8c8 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/domain-lifecycle/rollDomain.sh @@ -0,0 +1,105 @@ +# !/bin/sh +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# + +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" +source ${scriptDir}/helper.sh +if [ "${debug}" == "true" ]; then set -x; fi; + +function usage() { + + cat << EOF + + This script initiates a rolling restart of pods in a WebLogic domain by updating + the value of the 'spec.restartVersion' attribute of the domain resource. + + Usage: + + $(basename $0) [-n mynamespace] [-d mydomainuid] [-r restartVersion] [-m kubecli] + + -d : Domain unique-id. Default is 'sample-domain1'. + + -n : Domain namespace. Default is 'sample-domain1-ns'. + + -r : Restart version. If this parameter is not provided, + then the script will generate the 'restartVersion' by + incrementing the existing value. If the 'spec.restartVersion' + doesn't exist or its value is non-numeric, then the script + will set the 'spec.restartVersion' value to '1'. + + -m : Kubernetes command line interface. Default is 'kubectl' + if KUBERNETES_CLI env variable is not set. Otherwise + the default is the value of the KUBERNETES_CLI env variable. + + -v : Enables verbose mode. Default is 'false'. + + -h : This help. + +EOF +exit $1 +} + +kubernetesCli=${KUBERNETES_CLI:-kubectl} +clusterName="" +domainUid="sample-domain1" +domainNamespace="sample-domain1-ns" +verboseMode=false +patchJson="" +restartVersion="" + +while getopts "vc:n:m:d:r:h" opt; do + case $opt in + n) domainNamespace="${OPTARG}" + ;; + d) domainUid="${OPTARG}" + ;; + r) restartVersion="${OPTARG}" + ;; + m) kubernetesCli="${OPTARG}" + ;; + v) verboseMode=true; + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +set -eu + +# +# Function to perform validations, read files and initialize workspace +# +function initialize { + + validateErrors=false + + validateKubernetesCliAvailable + validateJqAvailable + + failIfValidationErrors +} + +initialize + +# Get the domain in json format +domainJson=$(${kubernetesCli} get domain ${domainUid} -n ${domainNamespace} -o json --ignore-not-found) +if [ -z "${domainJson}" ]; then + printError "Unable to get domain resource for domain '${domainUid}' in namespace '${domainNamespace}'. Please make sure the 'domain_uid' and 'namespace' specified by the '-d' and '-n' arguments are correct. Exiting." + exit 1 +fi + +# if the restartVersion is not provided, generate the value of restartVersion +if [ -z "${restartVersion}" ]; then + generateDomainRestartVersion "${domainJson}" restartVersion +fi + +printInfo "Patching restartVersion for domain '${domainUid}' to '${restartVersion}'." +createPatchJsonToUpdateDomainRestartVersion "${restartVersion}" patchJson + +executePatchCommand "${kubernetesCli}" "${domainUid}" "${domainNamespace}" "${patchJson}" "${verboseMode}" + +printInfo "Successfully patched restartVersion for domain '${domainUid}'!" diff --git a/OracleIdentityGovernance/kubernetes/domain-lifecycle/scaleCluster.sh b/OracleIdentityGovernance/kubernetes/domain-lifecycle/scaleCluster.sh new file mode 100755 index 000000000..947dd871c --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/domain-lifecycle/scaleCluster.sh @@ -0,0 +1,122 @@ +# !/bin/sh +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# + +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" +source ${scriptDir}/helper.sh +if [ "${debug}" == "true" ]; then set -x; fi; + +function usage() { + + cat << EOF + + This script scales a WebLogic cluster in a domain by patching the + 'spec.clusters[].replicas' attribute of the domain + resource. This change will cause the operator to perform a scaling + operation for the WebLogic cluster based on the value of replica count. + + Usage: + + $(basename $0) -c mycluster -r replicas [-n mynamespace] [-d mydomainuid] [-m kubecli] + + -c : Cluster name parameter is required. + + -r : Replica count, parameter is required. + + -d : Domain unique-id. Default is 'sample-domain1'. + + -n : Domain namespace. Default is 'sample-domain1-ns'. + + -m : Kubernetes command line interface. Default is 'kubectl' if KUBERNETES_CLI env + variable is not set. Otherwise default is the value of KUBERNETES_CLI env variable. + + -v : Enables verbose mode. Default is 'false'. + + -h : This help. + +EOF +exit $1 +} + +kubernetesCli=${KUBERNETES_CLI:-kubectl} +clusterName="" +domainUid="sample-domain1" +domainNamespace="sample-domain1-ns" +verboseMode=false +patchJson="" +replicas="" + +while getopts "vc:n:m:d:r:h" opt; do + case $opt in + c) clusterName="${OPTARG}" + ;; + n) domainNamespace="${OPTARG}" + ;; + d) domainUid="${OPTARG}" + ;; + r) replicas="${OPTARG}" + ;; + m) kubernetesCli="${OPTARG}" + ;; + v) verboseMode=true; + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +set -eu + +# +# Function to perform validations, read files and initialize workspace +# +function initialize { + + validateErrors=false + + validateKubernetesCliAvailable + validateJqAvailable + + if [ -z "${clusterName}" ]; then + validationError "Please specify cluster name using '-c' parameter e.g. '-c cluster-1'." + fi + + if [ -z "${replicas}" ]; then + validationError "Please specify replica count using '-r' parameter e.g. '-r 3'." + fi + + failIfValidationErrors +} + +initialize + +# Get the domain in json format +domainJson=$(${kubernetesCli} get domain ${domainUid} -n ${domainNamespace} -o json --ignore-not-found) +if [ -z "${domainJson}" ]; then + printError "Unable to get domain resource for domain '${domainUid}' in namespace '${domainNamespace}'. Please make sure the 'domain_uid' and 'namespace' specified by the '-d' and '-n' arguments are correct. Exiting." + exit 1 +fi + +isValidCluster="" +validateClusterName "${domainUid}" "${domainNamespace}" "${clusterName}" isValidCluster +if [ "${isValidCluster}" != 'true' ]; then + printError "cluster ${clusterName} is not part of domain ${domainUid} in namespace ${domainNamespace}. Please make sure that cluster name is correct." + exit 1 +fi + +isReplicasInAllowedRange "${domainJson}" "${clusterName}" "${replicas}" replicasInAllowedRange range +if [ "${replicasInAllowedRange}" == 'false' ]; then + printError "Replicas value is not in the allowed range of ${range}. Exiting." + exit 1 +fi + +printInfo "Patching replicas for cluster '${clusterName}' to '${replicas}'." +createPatchJsonToUpdateReplicas "${domainJson}" "${clusterName}" "${replicas}" patchJson + +executePatchCommand "${kubernetesCli}" "${domainUid}" "${domainNamespace}" "${patchJson}" "${verboseMode}" + +printInfo "Successfully patched replicas for cluster '${clusterName}'!" diff --git a/OracleIdentityGovernance/kubernetes/domain-lifecycle/startCluster.sh b/OracleIdentityGovernance/kubernetes/domain-lifecycle/startCluster.sh new file mode 100755 index 000000000..5c8bf034c --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/domain-lifecycle/startCluster.sh @@ -0,0 +1,129 @@ +# !/bin/sh +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# + +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" +source ${scriptDir}/helper.sh +if [ "${debug}" == "true" ]; then set -x; fi; + +function usage() { + + cat << EOF + + This script starts a WebLogic cluster in a domain by patching + 'spec.clusters[].serverStartPolicy' attribute of the domain + resource to 'IF_NEEDED'. This change will cause the operator to initiate + startup of cluster's WebLogic server instance pods if the pods are not + already running and the spec.replicas or + 'spec.clusters[].serverStartPolicy' is set higher than zero. + + Usage: + + $(basename $0) -c mycluster [-n mynamespace] [-d mydomainuid] [-m kubecli] + + -c : Cluster name (required parameter). + + -d : Domain unique-id. Default is 'sample-domain1'. + + -n : Domain namespace. Default is 'sample-domain1-ns'. + + -m : Kubernetes command line interface. Default is 'kubectl' if KUBERNETES_CLI env + variable is not set. Otherwise default is the value of KUBERNETES_CLI env variable. + + -v : Enables verbose mode. Default is 'false'. + + -h : This help. + +EOF +exit $1 +} + +set -eu + +kubernetesCli=${KUBERNETES_CLI:-kubectl} +clusterName="" +domainUid="sample-domain1" +domainNamespace="sample-domain1-ns" +verboseMode=false +patchJson="" + +while getopts "vc:n:m:d:h" opt; do + case $opt in + c) clusterName="${OPTARG}" + ;; + n) domainNamespace="${OPTARG}" + ;; + d) domainUid="${OPTARG}" + ;; + m) kubernetesCli="${OPTARG}" + ;; + v) verboseMode=true; + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +# +# Function to perform validations, read files and initialize workspace +# +function initialize { + + validateErrors=false + + validateKubernetesCliAvailable + validateJqAvailable + + if [ -z "${clusterName}" ]; then + validationError "Please specify cluster name using '-c' parameter e.g. '-c cluster-1'." + fi + + failIfValidationErrors + +} + +initialize + +# Get the domain in json format +domainJson=$(${kubernetesCli} get domain ${domainUid} -n ${domainNamespace} -o json --ignore-not-found) +if [ -z "${domainJson}" ]; then + printError "Unable to get domain resource for domain '${domainUid}' in namespace '${domainNamespace}'. Please make sure the 'domain_uid' and 'namespace' specified by the '-d' and '-n' arguments are correct. Exiting." + exit 1 +fi + +isValidCluster="" +validateClusterName "${domainUid}" "${domainNamespace}" "${clusterName}" isValidCluster +if [ "${isValidCluster}" != 'true' ]; then + printError "cluster ${clusterName} is not part of domain ${domainUid} in namespace ${domainNamespace}. Please make sure that cluster name is correct." + exit 1 +fi + +getDomainPolicy "${domainJson}" domainStartPolicy +# Fail if effective start policy of domain is NEVER or ADMIN_ONLY +if [[ "${domainStartPolicy}" == 'NEVER' || "${domainStartPolicy}" == 'ADMIN_ONLY' ]]; then + printError "Cannot start cluster '${clusterName}', the domain is configured with a 'spec.serverStartPolicy' attribute on the domain resource of 'NEVER' or 'ADMIN_ONLY'." + exit 1 +fi + +# Get server start policy for this cluster +getClusterPolicy "${domainJson}" "${clusterName}" startPolicy +if [ -z "${startPolicy}" ]; then + startPolicy=${domainStartPolicy} +fi + +if [ "${startPolicy}" == 'IF_NEEDED' ]; then + printInfo "No changes needed, exiting. The cluster '${clusterName}' is already started or starting. The effective value of 'spec.clusters[?(clusterName=\"${clusterName}\"].serverStartPolicy' attribute on the domain resource is 'IF_NEEDED'." + exit 0 +fi + +# Set policy value to IF_NEEDED +printInfo "Patching start policy of cluster '${clusterName}' from '${startPolicy}' to 'IF_NEEDED'." +createPatchJsonToUpdateClusterPolicy "${domainJson}" "${clusterName}" "IF_NEEDED" patchJson + +executePatchCommand "${kubernetesCli}" "${domainUid}" "${domainNamespace}" "${patchJson}" "${verboseMode}" + +printInfo "Successfully patched cluster '${clusterName}' with 'IF_NEEDED' start policy!." diff --git a/OracleIdentityGovernance/kubernetes/domain-lifecycle/startDomain.sh b/OracleIdentityGovernance/kubernetes/domain-lifecycle/startDomain.sh new file mode 100755 index 000000000..fea9cbbe5 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/domain-lifecycle/startDomain.sh @@ -0,0 +1,97 @@ +# !/bin/sh +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# + +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" +source ${scriptDir}/helper.sh + +function usage() { + + cat << EOF + + This script starts a deployed WebLogic domain by patching 'spec.serverStartPolicy' + attribute of the domain resource to 'IF_NEEDED'. This change will cause the operator + to initiate startup of domain's WebLogic server instance pods if the pods are not + already running. + + Usage: + + $(basename $0) [-n mynamespace] [-d mydomainuid] [-m kubecli] + + -d : Domain unique-id. Default is 'sample-domain1'. + + -n : Domain namespace. Default is 'sample-domain1-ns'. + + -m : Kubernetes command line interface. Default is 'kubectl' if KUBERNETES_CLI env + variable is not set. Otherwise default is the value of KUBERNETES_CLI env variable. + + -v : Enables verbose mode. Default is 'false'. + + -h : This help. + +EOF +exit $1 +} + +kubernetesCli=${KUBERNETES_CLI:-kubectl} +domainUid="sample-domain1" +domainNamespace="sample-domain1-ns" +verboseMode=false + +while getopts "vn:d:m:h" opt; do + case $opt in + n) domainNamespace="${OPTARG}" + ;; + d) domainUid="${OPTARG}" + ;; + m) kubernetesCli="${OPTARG}" + ;; + v) verboseMode=true; + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + + +set -eu +set -o pipefail + +function initialize { + + validateErrors=false + + validateKubernetesCliAvailable + validateJqAvailable + + failIfValidationErrors +} + +initialize + +# Get the domain in json format +domainJson=$(${kubernetesCli} get domain ${domainUid} -n ${domainNamespace} -o json --ignore-not-found) + +if [ -z "${domainJson}" ]; then + printError "Domain resource for domain '${domainUid}' not found in namespace '${domainNamespace}'. Exiting." + exit 1 +fi + +getDomainPolicy "${domainJson}" serverStartPolicy + +if [ "${serverStartPolicy}" == 'IF_NEEDED' ]; then + printInfo "No changes needed, exiting. The domain '${domainUid}' is already started or starting. The effective value of 'spec.serverStartPolicy' attribute on the domain resource is 'IF_NEEDED'." + exit 0 +fi + +printInfo "Patching domain '${domainUid}' from serverStartPolicy='${serverStartPolicy}' to 'IF_NEEDED'." + +createPatchJsonToUpdateDomainPolicy "IF_NEEDED" patchJson + +executePatchCommand "${kubernetesCli}" "${domainUid}" "${domainNamespace}" "${patchJson}" "${verboseMode}" + +printInfo "Successfully patched domain '${domainUid}' in namespace '${domainNamespace}' with 'IF_NEEDED' start policy!" diff --git a/OracleIdentityGovernance/kubernetes/domain-lifecycle/startServer.sh b/OracleIdentityGovernance/kubernetes/domain-lifecycle/startServer.sh new file mode 100755 index 000000000..37b120d71 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/domain-lifecycle/startServer.sh @@ -0,0 +1,242 @@ +# !/bin/sh +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# + +# This script starts a WebLogic managed server in a domain. +# Internal code notes :- +# - If server start policy is ALWAYS or policy is IF_NEEDED and the server is selected +# to start based on the replica count, it means that server is already started or is +# in the process of starting. In this case, script exits without making any changes. +# +# - If start policy of servers parent cluster or domain is 'NEVER', script +# fails as server can't be started. +# +# - If server is part of a cluster and keep_replica_constant option is false (the default) +# and the effective start policy of the server is IF_NEEDED and increasing replica count +# will naturally start the server, the script increases the replica count. +# +# - If server is part of a cluster and keep_replica_constant option is false (the default) +# and unsetting policy and increasing the replica count will start this server, script +# unsets the policy and increases replica count. For e.g. if replica count is 1 and +# start policy of server2 is NEVER, unsetting policy and increasing replica count will +# start server2. +# +# - If option to keep replica count constant ('-k') is selected and unsetting start policy +# will naturally start the server, script will unset the policy. For e.g. if replica count +# is 2 and start policy of server2 is NEVER, unsetting policy will start server2. +# +# - If above conditions are not true, it implies that either start policy is NEVER or policy +# is IF_NEEDED but server is not next in the order to start. In this case, script sets start +# policy to ALWAYS. For e.g. replica count is 3 and server10 needs to start. The script also +# increments the replica count by default. If option to keep replica count constant ('-k') +# is selected, it only sets the start policy to ALWAYS. +# + +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" +source ${scriptDir}/helper.sh +if [ "${debug}" == "true" ]; then set -x; fi; +set -eu + +function usage() { + + cat << EOF + + This script starts a WebLogic server in a domain. For the managed servers, it either + increases the value of 'spec.clusters[].replicas' by '1' or updates the + 'spec.managedServers[].serverStartPolicy' attribute of the domain + resource or both as necessary for starting the server. For the administration server, it + updates the value of 'spec.adminServer.serverStartPolicy' attribute of the domain resource. + The 'spec.clusters[].replicas' value can be kept constant by using '-k' option. + Please see README.md for more details. + + Usage: + + $(basename $0) -s myserver [-n mynamespace] [-d mydomainuid] [-k] [-m kubecli] [-v] + + -s : The WebLogic server name (not the pod name). + This parameter is required. + + -d : Domain unique-id. Default is 'sample-domain1'. + + -n : Domain namespace. Default is 'sample-domain1-ns'. + + -k : Keep replica count constant for the clustered servers. The default behavior + is to increment the replica count for the clustered servers. This parameter + is ignored for the administration and non-clustered managed servers. + + -m : Kubernetes command line interface. Default is 'kubectl' if KUBERNETES_CLI env + variable is not set. Otherwise default is the value of KUBERNETES_CLI env variable. + + -v : Enables verbose mode. Default is 'false'. + + -h : This help. + +EOF +exit $1 +} + +kubernetesCli=${KUBERNETES_CLI:-kubectl} +serverName="" +clusterName="" +domainUid="sample-domain1" +domainNamespace="sample-domain1-ns" +keepReplicaConstant=false +verboseMode=false +withReplicas="CONSTANT" +withPolicy="CONSTANT" +managedServerPolicy="" +effectivePolicy="" +isValidServer="" +patchJson="" +serverStarted="" +startsByPolicyUnset="" +startsByReplicaIncreaseAndPolicyUnset="" +isAdminServer=false + +while getopts "vkd:n:m:s:h" opt; do + case $opt in + s) serverName="${OPTARG}" + ;; + n) domainNamespace="${OPTARG}" + ;; + d) domainUid="${OPTARG}" + ;; + k) keepReplicaConstant=true; + ;; + m) kubernetesCli="${OPTARG}" + ;; + v) verboseMode=true; + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +# +# Function to perform validations, read files and initialize workspace +# +function initialize { + + # Validate the required files exist + validateErrors=false + + validateKubernetesCliAvailable + validateJqAvailable + + # Validate that server name parameter is specified. + if [ -z "${serverName}" ]; then + validationError "Please specify a server name using '-s' parameter e.g. '-s managed-server1'." + fi + + failIfValidationErrors +} + +initialize + +# Get the domain in json format +domainJson=$(${kubernetesCli} get domain ${domainUid} -n ${domainNamespace} -o json --ignore-not-found) +if [ -z "${domainJson}" ]; then + printError "Unable to get domain resource for domain '${domainUid}' in namespace '${domainNamespace}'. Please make sure the 'domain_uid' and 'namespace' specified by the '-d' and '-n' arguments are correct. Exiting." + exit 1 +fi + +# Validate that specified server is either part of a cluster or is an independent managed server +validateServerAndFindCluster "${domainUid}" "${domainNamespace}" "${serverName}" isValidServer clusterName isAdminServer +if [ "${isValidServer}" != 'true' ]; then + printError "Server ${serverName} is not part of any cluster and it's not an independent managed server. Please make sure that server name specified is correct." + exit 1 +fi + +getClusterPolicy "${domainJson}" "${clusterName}" clusterPolicy +if [ "${clusterPolicy}" == 'NEVER' ]; then + printError "Cannot start server '${serverName}', the server's parent cluster '.spec.clusters[?(clusterName=\"${clusterName}\"].serverStartPolicy' in the domain resource is set to 'NEVER'." + exit 1 +fi + +getDomainPolicy "${domainJson}" domainPolicy +if [ "${domainPolicy}" == 'NEVER' ] || [[ "${domainPolicy}" == 'ADMIN_ONLY' && "${isAdminServer}" != 'true' ]]; then + printError "Cannot start server '${serverName}', the .spec.serverStartPolicy in the domain resource is set to 'NEVER' or 'ADMIN_ONLY'." + exit 1 +fi + +getEffectivePolicy "${domainJson}" "${serverName}" "${clusterName}" effectivePolicy +if [ "${isAdminServer}" == 'true' ]; then + getEffectiveAdminPolicy "${domainJson}" effectivePolicy + if [[ "${effectivePolicy}" == "IF_NEEDED" || "${effectivePolicy}" == "ALWAYS" ]]; then + printInfo "No changes needed, exiting. Server should be already starting or started because effective sever start policy is '${effectivePolicy}'." + exit 0 + fi +fi + +if [ -n "${clusterName}" ]; then + # Server is part of a cluster, check currently started servers + checkStartedServers "${domainJson}" "${serverName}" "${clusterName}" "${withReplicas}" "${withPolicy}" serverStarted + if [[ ${effectivePolicy} == "IF_NEEDED" && ${serverStarted} == "true" ]]; then + printInfo "No changes needed, exiting. The server should be already started or it's in the process of starting. The start policy for server ${serverName} is ${effectivePolicy} and server is chosen to be started based on current replica count." + exit 0 + elif [[ "${effectivePolicy}" == "ALWAYS" && ${serverStarted} == "true" ]]; then + printInfo "No changes needed, exiting. The server should be already started or it's in the process of starting. The start policy for server ${serverName} is ${effectivePolicy}." + exit 0 + fi +else + # Server is an independent managed server. + if [[ "${effectivePolicy}" == "ALWAYS" || "${effectivePolicy}" == "IF_NEEDED" ]]; then + printInfo "No changes needed, exiting. The server should be already started or it's in the process of starting. The start policy for server ${serverName} is ${effectivePolicy}." + exit 0 + fi +fi + +getServerPolicy "${domainJson}" "${serverName}" managedServerPolicy +createServerStartPolicyPatch "${domainJson}" "${serverName}" "ALWAYS" alwaysStartPolicyPatch + +# if server is part of a cluster and replica count will increase +if [[ -n ${clusterName} && "${keepReplicaConstant}" != 'true' ]]; then + #check if server starts by increasing replicas and unsetting policy + withReplicas="INCREASED" + withPolicy="UNSET" + checkStartedServers "${domainJson}" "${serverName}" "${clusterName}" "${withReplicas}" "${withPolicy}" startsByReplicaIncreaseAndPolicyUnset + createReplicaPatch "${domainJson}" "${clusterName}" "INCREMENT" incrementReplicaPatch replicaCount + if [[ -n ${managedServerPolicy} && ${startsByReplicaIncreaseAndPolicyUnset} == "true" ]]; then + # Server starts by increasing replicas and policy unset, increment and unset + printInfo "Unsetting the current start policy '${managedServerPolicy}' for '${serverName}' and incrementing replica count ${replicaCount}." + createPatchJsonToUnsetPolicyAndUpdateReplica "${domainJson}" "${serverName}" "${incrementReplicaPatch}" patchJson + elif [[ -z ${managedServerPolicy} && ${startsByReplicaIncreaseAndPolicyUnset} == "true" ]]; then + # Start policy is not set, server starts by increasing replicas based on effective policy, increment replicas + printInfo "Updating replica count for cluster '${clusterName}' to ${replicaCount}." + createPatchJsonToUpdateReplica "${incrementReplicaPatch}" patchJson + else + # Patch server policy to always and increment replicas + printInfo "Patching start policy of server '${serverName}' from '${effectivePolicy}' to 'ALWAYS' and \ +incrementing replica count for cluster '${clusterName}' to ${replicaCount}." + createPatchJsonToUpdateReplicaAndPolicy "${incrementReplicaPatch}" "${alwaysStartPolicyPatch}" patchJson + fi +elif [[ -n ${clusterName} && "${keepReplicaConstant}" == 'true' ]]; then + # Replica count needs to stay constant, check if server starts by unsetting policy + withReplicas="CONSTANT" + withPolicy="UNSET" + checkStartedServers "${domainJson}" "${serverName}" "${clusterName}" "${withReplicas}" "${withPolicy}" startsByPolicyUnset + if [[ "${effectivePolicy}" == "NEVER" && ${startsByPolicyUnset} == "true" ]]; then + # Server starts by unsetting policy, unset policy + printInfo "Unsetting the current start policy '${effectivePolicy}' for '${serverName}'." + createPatchJsonToUnsetPolicy "${domainJson}" "${serverName}" patchJson + else + # Patch server policy to always + printInfo "Patching start policy for '${serverName}' to 'ALWAYS'." + createPatchJsonToUpdatePolicy "${alwaysStartPolicyPatch}" patchJson + fi +elif [ "${isAdminServer}" == 'true' ]; then + printInfo "Patching start policy of '${serverName}' from '${effectivePolicy}' to 'IF_NEEDED'." + createPatchJsonToUpdateAdminPolicy "${domainJson}" "IF_NEEDED" patchJson +else + # Server is an independent managed server + printInfo "Unsetting the current start policy '${effectivePolicy}' for '${serverName}'." + createPatchJsonToUnsetPolicy "${domainJson}" "${serverName}" patchJson +fi + +executePatchCommand "${kubernetesCli}" "${domainUid}" "${domainNamespace}" "${patchJson}" "${verboseMode}" + +printInfo "Patch command succeeded !" diff --git a/OracleIdentityGovernance/kubernetes/domain-lifecycle/stopCluster.sh b/OracleIdentityGovernance/kubernetes/domain-lifecycle/stopCluster.sh new file mode 100755 index 000000000..6e0410932 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/domain-lifecycle/stopCluster.sh @@ -0,0 +1,119 @@ +# !/bin/sh +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# + +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" +source ${scriptDir}/helper.sh +if [ "${debug}" == "true" ]; then set -x; fi; + +function usage() { + + cat << EOF + + This script stops a WebLogic cluster in a domain by patching + 'spec.clusters[].serverStartPolicy' attribute of the domain + resource to 'NEVER'. This change will cause the operator to initiate shutdown + of cluster's WebLogic server instance pods if the pods are running. + + Usage: + + $(basename $0) -c mycluster [-n mynamespace] [-d mydomainuid] [-m kubecli] + + -c : Cluster name (required parameter). + + -d : Domain unique-id. Default is 'sample-domain1'. + + -n : Domain namespace. Default is 'sample-domain1-ns'. + + -m : Kubernetes command line interface. Default is 'kubectl' if KUBERNETES_CLI env + variable is not set. Otherwise default is the value of KUBERNETES_CLI env variable. + + -v : Enables verbose mode. Default is 'false'. + + -h : This help. + +EOF +exit $1 +} + +kubernetesCli=${KUBERNETES_CLI:-kubectl} +clusterName="" +domainUid="sample-domain1" +domainNamespace="sample-domain1-ns" +verboseMode=false +patchJson="" + +while getopts "vc:n:m:d:h" opt; do + case $opt in + c) clusterName="${OPTARG}" + ;; + n) domainNamespace="${OPTARG}" + ;; + d) domainUid="${OPTARG}" + ;; + m) kubernetesCli="${OPTARG}" + ;; + v) verboseMode=true; + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +set -eu + +# +# Function to perform validations, read files and initialize workspace +# +function initialize { + + validateErrors=false + + validateKubernetesCliAvailable + validateJqAvailable + + if [ -z "${clusterName}" ]; then + validationError "Please specify cluster name using '-c' parameter e.g. '-c cluster-1'." + fi + + failIfValidationErrors +} + +initialize + +# Get the domain in json format +domainJson=$(${kubernetesCli} get domain ${domainUid} -n ${domainNamespace} -o json --ignore-not-found) +if [ -z "${domainJson}" ]; then + printError "Unable to get domain resource for domain '${domainUid}' in namespace '${domainNamespace}'. Please make sure the 'domain_uid' and 'namespace' specified by the '-d' and '-n' arguments are correct. Exiting." + exit 1 +fi + +isValidCluster="" +validateClusterName "${domainUid}" "${domainNamespace}" "${clusterName}" isValidCluster +if [ "${isValidCluster}" != 'true' ]; then + printError "cluster ${clusterName} is not part of domain ${domainUid} in namespace ${domainNamespace}. Please make sure that cluster name is correct." + exit 1 +fi + +# Get server start policy for this server +getClusterPolicy "${domainJson}" "${clusterName}" startPolicy +if [ -z "${startPolicy}" ]; then + getDomainPolicy "${domainJson}" startPolicy +fi + +if [[ "${startPolicy}" == 'NEVER' || "${startPolicy}" == 'ADMIN_ONLY' ]]; then + printInfo "No changes needed, exiting. The cluster '${clusterName}' is already stopped or stopping. The effective value of spec.clusters[?(clusterName="${clusterName}"].serverStartPolicy attribute on the domain resource is 'NEVER' or 'ADMIN_ONLY'." + exit 0 +fi + +# Set policy value to NEVER +printInfo "Patching start policy of cluster '${clusterName}' from '${startPolicy}' to 'NEVER'." +createPatchJsonToUpdateClusterPolicy "${domainJson}" "${clusterName}" "NEVER" patchJson + +executePatchCommand "${kubernetesCli}" "${domainUid}" "${domainNamespace}" "${patchJson}" "${verboseMode}" + +printInfo "Successfully patched cluster '${clusterName}' with 'NEVER' start policy!" diff --git a/OracleIdentityGovernance/kubernetes/domain-lifecycle/stopDomain.sh b/OracleIdentityGovernance/kubernetes/domain-lifecycle/stopDomain.sh new file mode 100755 index 000000000..d62f6b280 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/domain-lifecycle/stopDomain.sh @@ -0,0 +1,95 @@ +# !/bin/sh +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# + +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" +source ${scriptDir}/helper.sh + +function usage() { + + cat << EOF + + This script stops a deployed WebLogic domain by patching + 'spec.serverStartPolicy' attribute of domain resource to 'NEVER'. + This change will cause the operator to initiate shutdown of the + domain's WebLogic server instance pods if the pods are running. + + Usage: + + $(basename $0) [-n mynamespace] [-d mydomainuid] [-m kubecli] + + -d : Domain unique-id. Default is 'sample-domain1'. + + -n : Domain namespace. Default is 'sample-domain1-ns'. + + -m : Kubernetes command line interface. Default is 'kubectl' if KUBERNETES_CLI env + variable is not set. Otherwise default is the value of KUBERNETES_CLI env variable. + + -v : Enables verbose mode. Default is 'false'. + + -h : This help. + +EOF +exit $1 +} + +kubernetesCli=${KUBERNETES_CLI:-kubectl} +domainUid="sample-domain1" +domainNamespace="sample-domain1-ns" +verboseMode=false + +while getopts "vn:d:m:h" opt; do + case $opt in + n) domainNamespace="${OPTARG}" + ;; + d) domainUid="${OPTARG}" + ;; + m) kubernetesCli="${OPTARG}" + ;; + v) verboseMode=true; + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +set -eu +set -o pipefail + +function initialize { + + validateErrors=false + + validateKubernetesCliAvailable + validateJqAvailable + failIfValidationErrors +} + +initialize + +# Get the domain in json format +domainJson=$(${kubernetesCli} get domain ${domainUid} -n ${domainNamespace} -o json --ignore-not-found) + +if [ -z "${domainJson}" ]; then + printError "Domain resource for domain '${domainUid}' not found in namespace '${domainNamespace}'. Exiting." + exit 1 +fi + +getDomainPolicy "${domainJson}" serverStartPolicy + +if [ "${serverStartPolicy}" == 'NEVER' ]; then + printInfo "No changes needed, exiting. The domain '${domainUid}' is already stopped or stopping. The value of 'spec.serverStartPolicy' attribute on the domain resource is 'NEVER'." + exit 0 +fi + +printInfo "Patching domain '${domainUid}' in namespace '${domainNamespace}' from serverStartPolicy='${serverStartPolicy}' to 'NEVER'." + +createPatchJsonToUpdateDomainPolicy "NEVER" patchJson + +executePatchCommand "${kubernetesCli}" "${domainUid}" "${domainNamespace}" "${patchJson}" "${verboseMode}" + +printInfo "Successfully patched domain '${domainUid}' in namespace '${domainNamespace}' with 'NEVER' start policy!" diff --git a/OracleIdentityGovernance/kubernetes/domain-lifecycle/stopServer.sh b/OracleIdentityGovernance/kubernetes/domain-lifecycle/stopServer.sh new file mode 100755 index 000000000..ec35b4d97 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/domain-lifecycle/stopServer.sh @@ -0,0 +1,248 @@ +# !/bin/sh +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# + +# This script stops a WebLogic managed server in a domain. +# Internal code notes :- +# - If server start policy is NEVER or policy is IF_NEEDED and the server is not +# selected to start based on the replica count, it means that server is already +# stopped or is in the process of stopping. In this case, script exits without +# making any changes. +# +# - If server is part of a cluster and keep_replica_constant option is false (the default) +# and the effective start policy of the server is IF_NEEDED and decreasing replica count +# will naturally stop the server, the script decreases the replica count. +# +# - If server is part of a cluster and keep_replica_constant option is false (the default) +# and unsetting policy and decreasing the replica count will stop the server, script +# unsets the policy and decreases replica count. For e.g. if replica count is 2 and +# start policy of server2 is ALWAYS, unsetting policy and decreasing replica count will +# stop server2. +# +# - If option to keep replica count constant ('-k') is selected and unsetting start policy +# will naturally stop the server, script will unset the policy. For e.g. if replica count +# is 1 and start policy of server2 is ALWAYS, unsetting policy will stop server2. +# +# - If above conditions are not true, it implies that server policy is IF_NEEDED and server +# is selected to start. In this case, script sets start policy to NEVER. For e.g. replica +# count is 2 and server1 needs to be stopped. The script also decrements the replica count +# by default. If option to keep replica count constant ('-k') is selected, it only sets the +# start policy to NEVER. +# + +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" +source ${scriptDir}/helper.sh +if [ "${debug}" == "true" ]; then set -x; fi; +set -eu + +function usage() { + + cat << EOF + + This script stops a running WebLogic server in a domain. For managed servers, it either + decreases the value of 'spec.clusters[].replicas' or updates the + 'spec.managedServers[].serverStartPolicy' attribute of the domain + resource or both as necessary to stop the server. For the administration server, it updates + the value of 'spec.adminServer.serverStartPolicy' attribute of the domain resource. The + 'spec.clusters[].replicas' value can be kept constant by using '-k' option. + Please see README.md for more details. + + Usage: + + $(basename $0) -s myserver [-n mynamespace] [-d mydomainuid] [-k] [-m kubecli] [-v] + + -s : The WebLogic server name (not the pod name). + This parameter is required. + + -d : Domain unique-id. Default is 'sample-domain1'. + + -n : Domain namespace. Default is 'sample-domain1-ns'. + + -k : Keep replica count constant for the clustered servers. The default behavior + is to decrement the replica count for the clustered servers. This parameter + is ignored for the administration and non-clustered managed servers. + + -m : Kubernetes command line interface. Default is 'kubectl' if KUBERNETES_CLI env + variable is not set. Otherwise default is the value of KUBERNETES_CLI env variable. + + -v : Enables verbose mode. Default is 'false'. + + -h : This help. + +EOF +exit $1 +} + +kubernetesCli=${KUBERNETES_CLI:-kubectl} +serverName="" +clusterName="" +domainUid="sample-domain1" +domainNamespace="sample-domain1-ns" +keepReplicaConstant=false +verboseMode=false +serverStartPolicy=NEVER +serverStarted="" +effectivePolicy="" +managedServerPolicy="" +stoppedWhenAlwaysPolicyReset="" +replicasEqualsMinReplicas="" +withReplicas="CONSTANT" +withPolicy="CONSTANT" +patchJson="" +isAdminServer=false + +while getopts "vks:m:n:d:h" opt; do + case $opt in + s) serverName="${OPTARG}" + ;; + n) domainNamespace="${OPTARG}" + ;; + m) kubernetesCli="${OPTARG}" + ;; + d) domainUid="${OPTARG}" + ;; + k) keepReplicaConstant=true; + ;; + v) verboseMode=true; + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +# +# Function to perform validations, read files and initialize workspace +# +function initialize { + + validateErrors=false + + validateKubernetesCliAvailable + validateJqAvailable + + # Validate that server name parameter is specified. + if [ -z "${serverName}" ]; then + validationError "Please specify the server name using '-s' parameter e.g. '-s managed-server1'." + fi + + failIfValidationErrors +} + +initialize + +# Get the domain in json format +domainJson=$(${kubernetesCli} get domain ${domainUid} -n ${domainNamespace} -o json --ignore-not-found) +if [ -z "${domainJson}" ]; then + printError "Unable to get domain resource for domain '${domainUid}' in namespace '${domainNamespace}'. Please make sure the 'domain_uid' and 'namespace' specified by the '-d' and '-n' arguments are correct. Exiting." + exit 1 +fi + +# Validate that specified server is either part of a cluster or is an independent managed server +validateServerAndFindCluster "${domainUid}" "${domainNamespace}" "${serverName}" isValidServer clusterName isAdminServer +if [ "${isValidServer}" != 'true' ]; then + printError "Server ${serverName} is not part of any cluster and it's not an independent managed server. Please make sure that server name specified is correct." + exit 1 +fi + +getEffectivePolicy "${domainJson}" "${serverName}" "${clusterName}" effectivePolicy +if [ "${isAdminServer}" == 'true' ]; then + getEffectiveAdminPolicy "${domainJson}" effectivePolicy + if [ "${effectivePolicy}" == "NEVER" ]; then + printInfo "No changes needed, exiting. Server should be already stopping or stopped because effective sever start policy is 'NEVER'." + exit 0 + fi +fi + +if [ -n "${clusterName}" ]; then + # Server is part of a cluster, check currently started servers + checkStartedServers "${domainJson}" "${serverName}" "${clusterName}" "${withReplicas}" "${withPolicy}" serverStarted + if [[ "${effectivePolicy}" == "NEVER" || "${effectivePolicy}" == "ADMIN_ONLY" || "${serverStarted}" != "true" ]]; then + printInfo "No changes needed, exiting. Server should be already stopping or stopped. This is either because of the sever start policy or server is chosen to be stopped based on current replica count." + exit 0 + fi +else + # Server is an independent managed server. + if [ "${effectivePolicy}" == "NEVER" ] || [[ "${effectivePolicy}" == "ADMIN_ONLY" && "${isAdminServer}" != 'true' ]]; then + printInfo "No changes needed, exiting. Server should be already stopping or stopped because effective sever start policy is 'NEVER' or 'ADMIN_ONLY'." + exit 0 + fi +fi + +if [[ -n "${clusterName}" && "${keepReplicaConstant}" == 'false' ]]; then + # check if replica count can decrease below current value + isReplicaCountEqualToMinReplicas "${domainJson}" "${clusterName}" replicasEqualsMinReplicas + if [ "${replicasEqualsMinReplicas}" == 'true' ]; then + printInfo "Not decreasing the replica count value: it is at its minimum. \ + (See 'domain.spec.allowReplicasBelowMinDynClusterSize' and \ + 'domain.status.clusters[].minimumReplicas' for details)." + keepReplicaConstant=true + fi +fi + +# Create server start policy patch with NEVER value +createServerStartPolicyPatch "${domainJson}" "${serverName}" "${serverStartPolicy}" neverStartPolicyPatch +getServerPolicy "${domainJson}" "${serverName}" managedServerPolicy +if [ -n "${managedServerPolicy}" ]; then + effectivePolicy=${managedServerPolicy} +fi +if [[ -n "${clusterName}" && "${effectivePolicy}" == "ALWAYS" ]]; then + # Server is part of a cluster and start policy is ALWAYS. + withReplicas="CONSTANT" + withPolicy="UNSET" + checkStartedServers "${domainJson}" "${serverName}" "${clusterName}" "${withReplicas}" "${withPolicy}" startedWhenAlwaysPolicyReset +fi + +if [[ -n "${clusterName}" && "${keepReplicaConstant}" != 'true' ]]; then + # server is part of a cluster and replica count will decrease + withReplicas="DECREASED" + withPolicy="UNSET" + checkStartedServers "${domainJson}" "${serverName}" "${clusterName}" "${withReplicas}" "${withPolicy}" startedWhenRelicaReducedAndPolicyReset + createReplicaPatch "${domainJson}" "${clusterName}" "DECREMENT" replicaPatch replicaCount + + if [[ -n ${managedServerPolicy} && "${startedWhenRelicaReducedAndPolicyReset}" != "true" ]]; then + # Server shuts down by unsetting start policy and decrementing replica count, unset and decrement + printInfo "Unsetting the current start policy '${managedServerPolicy}' for '${serverName}' \ + and decrementing replica count to ${replicaCount}." + createPatchJsonToUnsetPolicyAndUpdateReplica "${domainJson}" "${serverName}" "${replicaPatch}" patchJson + elif [[ -z ${managedServerPolicy} && "${startedWhenRelicaReducedAndPolicyReset}" != "true" ]]; then + # Start policy is not set, server shuts down by decrementing replica count, decrement replicas + printInfo "Updating replica count for cluster ${clusterName} to ${replicaCount}." + createPatchJsonToUpdateReplica "${replicaPatch}" patchJson + elif [[ ${managedServerPolicy} == "ALWAYS" && "${startedWhenAlwaysPolicyReset}" != "true" ]]; then + # Server shuts down by unsetting the start policy, unset and decrement replicas + printInfo "Unsetting the current start policy '${managedServerPolicy}' for '${serverName}' \ + and decrementing replica count to ${replicaCount}." + createPatchJsonToUnsetPolicyAndUpdateReplica "${domainJson}" "${serverName}" "${replicaPatch}" patchJson + else + # Patch server start policy to NEVER and decrement replica count + printInfo "Patching start policy of server '${serverName}' from '${effectivePolicy}' to 'NEVER' \ + and decrementing replica count for cluster '${clusterName}' to ${replicaCount}." + createPatchJsonToUpdateReplicaAndPolicy "${replicaPatch}" "${neverStartPolicyPatch}" patchJson + fi +elif [[ -n ${clusterName} && "${keepReplicaConstant}" == 'true' ]]; then + # Server is part of a cluster and replica count needs to stay constant + if [[ ${managedServerPolicy} == "ALWAYS" && "${startedWhenAlwaysPolicyReset}" != "true" ]]; then + # Server start policy is AlWAYS and server shuts down by unsetting the policy, unset policy + printInfo "Unsetting the current start policy '${effectivePolicy}' for '${serverName}'." + createPatchJsonToUnsetPolicy "${domainJson}" "${serverName}" patchJson + else + # Patch server start policy to NEVER + printInfo "Patching start policy of '${serverName}' from '${effectivePolicy}' to 'NEVER'." + createPatchJsonToUpdatePolicy "${neverStartPolicyPatch}" patchJson + fi +elif [ "${isAdminServer}" == 'true' ]; then + printInfo "Patching start policy of '${serverName}' from '${effectivePolicy}' to 'NEVER'." + createPatchJsonToUpdateAdminPolicy "${domainJson}" "${serverStartPolicy}" patchJson +else + # Server is an independent managed server, patch server start policy to NEVER + printInfo "Patching start policy of '${serverName}' from '${effectivePolicy}' to 'NEVER'." + createPatchJsonToUpdatePolicy "${neverStartPolicyPatch}" patchJson +fi + +executePatchCommand "${kubernetesCli}" "${domainUid}" "${domainNamespace}" "${patchJson}" "${verboseMode}" + +printInfo "Patch command succeeded !" diff --git a/OracleIdentityGovernance/kubernetes/elasticsearch-and-kibana/README.md b/OracleIdentityGovernance/kubernetes/elasticsearch-and-kibana/README.md new file mode 100755 index 000000000..bd62bcc56 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/elasticsearch-and-kibana/README.md @@ -0,0 +1,31 @@ +# Sample to deploy Elasticsearch and Kibana + + +When you install the WebLogic operator Helm chart, you can set +`elkIntegrationEnabled` to `true` in your `values.yaml` file to tell the operator to send the contents of the operator's logs to Elasticsearch. + +Typically, you would have already configured Elasticsearch and Kibana in the +Kubernetes cluster, and also would have specified `elasticSearchHost` and `elasticSearchPort` in your `values.yaml` file to point to where Elasticsearch is already running. + +This sample configures the Elasticsearch and Kibana deployments and services. +It's useful for trying out the operator in a Kubernetes cluster that doesn't already +have them configured. + +It runs the Elastic Stack on the same host and port that the operator's Helm chart defaults +to, therefore, you only need to set `elkIntegrationEnabled` to `true` in your +`values.yaml` file. + +To control Elasticsearch memory parameters (Heap allocation and Enabling/Disabling swapping) please open the file `elasticsearch_and_kibana.yaml`, search for env variables of the elasticsearch container and change the values of the following. + +* ES_JAVA_OPTS: value may contain for example -Xms512m -Xmx512m to lower the default memory usage (please be aware that this value is only applicable for demo purpose and it is not the one recommended by Elasticsearch itself) +* bootstrap.memory_lock: value may contain true (enables the usage of mlockall to try to lock the process address space into RAM, preventing any Elasticsearch memory from being swapped out) or false (disables the usage of mlockall to try to lock the process address space into RAM, preventing any Elasticsearch memory from being swapped out). + +To install Elasticsearch and Kibana, use: +```shell +$ kubectl apply -f kubernetes/samples/scripts/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml +``` + +To remove them, use: +```shell +$ kubectl delete -f kubernetes/samples/scripts/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml +``` diff --git a/OracleIdentityGovernance/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml b/OracleIdentityGovernance/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml new file mode 100755 index 000000000..97b0b9186 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml @@ -0,0 +1,117 @@ +# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +# When a user installs the WebLogic operator Helm chart, the user can set +# elkIntegrationEnabled to true in their values.yaml to tell the operator to send the +# contents of the operator's log to Elasticsearch. +# +# Typically, a user would have already configured Elasticsearch and Kibana in the +# Kubernetes cluster, and also would specify elasticSearchHost and elasticSearchPort +# in their values.yaml file to point to where Elasticsearch is already running. +# +# This sample configures the Elasticsearch and Kibana deployments and services. +# It's useful for trying out the operator in a Kubernetes cluster that doesn't already +# have them configured. +# +# It runs Elasticstack on the same host and port that the operator's Helm chart defaults +# to, therefore, the customer only needs to set elkIntegrationEnabled to true in their +# values.yaml file. +# +# To configure them, use: +# kubectl apply -f kubernetes/samples/scripts/elasticsearch_and_kibana.yaml +# +# To remove them, use: +# kubectl delete -f kubernetes/samples/scripts/elasticsearch_and_kibana.yaml + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + namespace: "default" + name: "elasticsearch" + labels: + app: "elasticsearch" +spec: + replicas: 1 + selector: + matchLabels: + app: "elasticsearch" + template: + metadata: + labels: + app: "elasticsearch" + spec: + initContainers: + - name: set-vm-max-map-count + image: busybox + imagePullPolicy: IfNotPresent + command: ['sysctl', '-w', 'vm.max_map_count=262144'] + securityContext: + privileged: true + containers: + - name: "elasticsearch" + image: "elasticsearch:6.8.0" + ports: + - containerPort: 9200 + - containerPort: 9300 + env: + - name: ES_JAVA_OPTS + value: -Xms1024m -Xmx1024m + +--- +kind: "Service" +apiVersion: "v1" +metadata: + namespace: "default" + name: "elasticsearch" +spec: + ports: + - name: "http" + protocol: "TCP" + port: 9200 + targetPort: 9200 + - name: "https" + protocol: "TCP" + port: 9300 + targetPort: 9300 + selector: + app: "elasticsearch" + +--- +apiVersion: "apps/v1" +kind: "Deployment" +metadata: + namespace: "default" + name: "kibana" + labels: + app: "kibana" +spec: + replicas: 1 + selector: + matchLabels: + app: "kibana" + template: + metadata: + labels: + app: "kibana" + spec: + containers: + - name: "kibana" + image: "kibana:6.8.0" + ports: + - containerPort: 5601 + +--- +apiVersion: "v1" +kind: "Service" +metadata: + namespace: "default" + name: "kibana" + labels: + app: "kibana" +spec: + type: "NodePort" + ports: + - port: 5601 + selector: + app: "kibana" diff --git a/OracleIdentityGovernance/kubernetes/logging-services/logstash/README.md b/OracleIdentityGovernance/kubernetes/logging-services/logstash/README.md new file mode 100755 index 000000000..1d4a169d3 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/logging-services/logstash/README.md @@ -0,0 +1,59 @@ +## Publish OracleIdentityGovernance server and diagnostics logs into Elasticsearch + +## Prerequisites +See [here](https://oracle.github.io/weblogic-kubernetes-operator/samples/simple/elastic-stack/) for the steps to integrate Elasticsearch for the WebLogic Kubernetes operator. + +Before deploying the WebLogic Kubernetes operator edit `values.yaml` in weblogic-kubernetes-operator/kubernetes/charts/weblogic-operator/ to enable elastic search integration. +Configure the following variables: +```bash +# elkIntegrationEnabled specifies whether or not ELK integration is enabled. +elkIntegrationEnabled: true +# logStashImage specifies the docker image containing logstash. +# This parameter is ignored if 'elkIntegrationEnabled' is false. +logStashImage: "logstash:6.6.0" + +# elasticSearchHost specifies the hostname of where Elasticsearch is running. +# This parameter is ignored if 'elkIntegrationEnabled' is false. +elasticSearchHost: "elasticsearch.default.svc.cluster.local" + +# elasticSearchPort specifies the port number of where Elasticsearch is running. +# This parameter is ignored if 'elkIntegrationEnabled' is false. +elasticSearchPort: 9200 +``` +Deployment of WebLogic Kubernetes operator with above changes, will create an additional logstash container as sidecar. This logstash container will push the operator logs to the configured Elasticsearch server. + +### WebLogic Server logs + +The WebLogic server logs or diagnostics logs can be pushed to Elasticsearch server using logstash pod. The logstash pod should have access to the shared domain home or the log location. The persistent volume of the domain home can be used in the logstash pod. + +### Create the logstash pod + +1. Get Domain home persistence volume claim details +Get the persistent volume details of the domain home of the WebLogic server(s). + + ```bash + $ kubectl get pvc -n oimcluster + ``` + +1. Create logstash configuration. +Create logstash configuration file. The logstash configuration file can be loaded from a volume. + ```bash + $ kubectl cp logstash.conf oimcluster/oimcluster-adminserver:/u01/oracle/user_projects/domains --namespace oimcluster + ``` + + You can use sample logstash configuration file generated to push server and diagnostic logs of all servers available at DOMAIN_HOME/servers//logs/-diagnostic.log + +1. Copy the logstash.conf into say /u01/oracle/user_projects/domains so that it can be used for logstash deployment, using Administration Server pod + +1. Create deployment YAML for logstash pod. +You can use sample logstash.yaml file generated to create deployment for logstash pod. The mounted persistent volume of the domain home will provide access to the WebLogic server logs to logstash pod. +Make sure to point the logstash configuration file to correct location and also correct domain home persistence volume claim. + +1. Deploy logstash to start publish logs to Elasticsearch: + + ```bash + $ kubectl create -f logstash.yaml + ``` + +1. Now, you can view the diagnostics logs using Kibana with index pattern `logstash-*`. + diff --git a/OracleIdentityGovernance/kubernetes/logging-services/logstash/logstash.conf b/OracleIdentityGovernance/kubernetes/logging-services/logstash/logstash.conf new file mode 100755 index 000000000..760939f1a --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/logging-services/logstash/logstash.conf @@ -0,0 +1,25 @@ +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# + +input { + file { + path => "/u01/oracle/user_projects/domains/oimcluster/servers/**/logs/*-diagnostic.log" + start_position => beginning + } + file { + path => "/u01/oracle/user_projects/domains/logs/oimcluster/*.log" + start_position => beginning + } +} + +filter { + grok { + match => [ "message", "<%{DATA:log_timestamp}> <%{WORD:log_level}> <%{WORD:thread}> <%{HOSTNAME:hostname}> <%{HOSTNAME:servername}> <%{DATA:timer}> <<%{DATA:kernel}>> <> <%{DATA:uuid}> <%{NUMBER:timestamp}> <%{DATA:misc}> <%{DATA:log_number}> <%{DATA:log_message}>" ] + } +} +output { + elasticsearch { + hosts => ["elasticsearch.default.svc.cluster.local:9200"] + } +} diff --git a/OracleIdentityGovernance/kubernetes/logging-services/logstash/logstash.yaml b/OracleIdentityGovernance/kubernetes/logging-services/logstash/logstash.yaml new file mode 100755 index 000000000..71d8f30a0 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/logging-services/logstash/logstash.yaml @@ -0,0 +1,39 @@ +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: logstash + namespace: oimcluster +spec: + selector: + matchLabels: + app: logstash + template: # create pods using pod definition in this template + metadata: + labels: + app: logstash + spec: + volumes: + - name: domain-storage-volume + persistentVolumeClaim: + claimName: domain-pvc + - name: shared-logs + emptyDir: {} + containers: + - name: logstash + image: logstash:6.6.0 + command: ["/bin/sh"] + args: ["/usr/share/logstash/bin/logstash", "-f", "/u01/oracle/user_projects/domains/logstash.conf"] + imagePullPolicy: IfNotPresent + volumeMounts: + - mountPath: /u01/oracle/user_projects + name: domain-storage-volume + - name: shared-logs + mountPath: /shared-logs + ports: + - containerPort: 5044 + name: logstash + diff --git a/OracleIdentityGovernance/kubernetes/logging-services/weblogic-logging-exporter/README.md b/OracleIdentityGovernance/kubernetes/logging-services/weblogic-logging-exporter/README.md new file mode 100755 index 000000000..c4a680071 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/logging-services/weblogic-logging-exporter/README.md @@ -0,0 +1,131 @@ +## Publish WebLogic Server logs into Elasticsearch + +The WebLogic Logging Exporter adds a log event handler to WebLogic Server, such that WebLogic Server logs can be integrated into Elastic Stack in Kubernetes directly, by using the Elasticsearch REST API. + +## Prerequisite + +This document assumes that you have already deployed Elasticsearch/Kibana environment. If you have not, please use a sample/demo deployment of Elasticsearch/Kibana from WebLogic Kubernetes operator. + +To deploy Elasticsearch and Kibana on the Kubernetes cluster: +```bash +$ kubectl create -f https://raw.githubusercontent.com/oracle/weblogic-kubernetes-operator/master/kubernetes/samples/scripts/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml +``` + +Follow these steps to setup WebLogic Logging Exporter in a WebLogic operator environment and push the WebLogic server logs to Elasticsearch/Kibana + +1. Download WebLogic logging exporter binaries + + The WebLogic logging exporter pre-built binaries are available in the github releases page: [Release 1.0.1](https://github.com/oracle/weblogic-logging-exporter/releases) + + ```bash + $ wget https://github.com/oracle/weblogic-logging-exporter/releases/download/v1.0.1/weblogic-logging-exporter.jar + ``` + + Download weblogic-logging-exporter.jar from the github release link above. Also download dependency jar - snakeyaml-1.27.jar from Maven Central. + + ```bash + $ wget -O snakeyaml-1.27.jar https://search.maven.org/remotecontent?filepath=org/yaml/snakeyaml/1.27/snakeyaml-1.27.jar + ``` +1. Copy JAR files into the Kubernetes WebLogic Administration Server Pod + + Copy weblogic-logging-exporter.jar and snakeyaml-1.27.jar to the domain home folder in the Administration server pod. + + ```bash + $ kubectl cp weblogic-logging-exporter.jar oimcluster/oimcluster-adminserver:/u01/oracle/user_projects/domains/oimcluster/ + $ kubectl cp snakeyaml-1.27.jar oimcluster/oimcluster-adminserver:/u01/oracle/user_projects/domains/oimcluster/ + ``` + +1. Add a startup class to the domain configuration + + In this step, we configure weblogic-logging-exporter JAR as a startup class in the WebLogic servers where we intend to collect the logs. + + a) In the Administration Console, navigate to `Environment` then `Startup and Shutdown classes` in the main menu. + + b) Add a new Startup class. You may choose any descriptive name and the class name must be `weblogic.logging.exporter.Startup`. + + c) Target the startup class to each server that you want to export logs from. + + You can verify this by checking for the update in your config.xml which should be similar to this example: + + ```bash + + LoggingExporterStartupClass + AdminServer + weblogic.logging.exporter.Startup + + ``` + +1. Update WebLogic Server CLASS Path. + + In this step, we set the class path for weblogic-logging-exporter and its dependencies. + + a) Copy setDomainEnv.sh from the pod to local folder. + ```bash + $ kubectl cp oimcluster/oimcluster-adminserver:/u01/oracle/user_projects/domains/oimcluster/bin/setDomainEnv.sh setDomainEnv.sh + ``` + b) Modify setDomainEnv.sh to update the Server Class path. + ```bash + CLASSPATH=/u01/oracle/user_projects/domains/oimcluster/weblogic-logging-exporter.jar:/u01/oracle/user_projects/domains/oimcluster/snakeyaml-1.27.jar:${CLASSPATH} + export CLASSPATH + ``` + + c) Copy back the modified setDomainEnv.sh to the pod. + ```bash + $ kubectl cp setDomainEnv.sh oimcluster/oimcluster-adminserver:/u01/oracle/user_projects/domains/oimcluster/bin/setDomainEnv.sh + ``` + +1. Create configuration file for the WebLogic Logging Exporter. +Copy WebLogicLoggingExporter.yaml to the domain folder in the WebLogic server pod. YAML specifies the elasticsearch server host and port number. + ```bash + $ kubectl cp WebLogicLoggingExporter.yaml oimcluster/oimcluster-adminserver:/u01/oracle/user_projects/domains/oimcluster/config/ + ``` + +1. Restart WebLogic Servers + + Now we can restart the WebLogic servers for the weblogic-logging-exporter to get loaded in the servers. + + To restart the servers, use stopDomain.sh and startDomain.sh scripts from https://github.com/oracle/weblogic-kubernetes-operator/tree/master/kubernetes/samples/scripts/domain-lifecycle + + The stopDomain.sh script shuts down a domain by patching the `spec.serverStartPolicy` attribute of the domain resource to `NEVER`. The operator will shut down the WebLogic Server instance Pods that are part of the domain after the `spec.serverStartPolicy` attribute is updated to `NEVER`. See the script usage information by using the -h option. + + ```bash + $ stopDomain.sh -d oimcluster -n oimcluster + ``` + Sample output: + ```bash + [INFO] Patching domain 'oimcluster' in namespace 'oimcluster' from serverStartPolicy='IF_NEEDED' to 'NEVER'. + domain.weblogic.oracle/oimcluster patched + [INFO] Successfully patched domain 'oimcluster' in namespace 'oimcluster' with 'NEVER' start policy! + ``` + + Verify servers by checking the pod status. + ```bash + $ kubectl get pods -n oimcluster + ``` + + After all the servers are shutdown, run startDomain.sh script to start again. + + The startDomain.sh script starts a deployed domain by patching the `spec.serverStartPolicy` attribute of the domain resource to `IF_NEEDED`. The operator will start the WebLogic Server instance Pods that are part of the domain after the `spec.serverStartPolicy` attribute of the domain resource is updated to `IF_NEEDED`. See the script usage information by using the -h option. + + ```bash + $ startDomain.sh -d oimcluster -n oimcluster + ``` + Sample output: + ```bash + [INFO] Patching domain 'oimcluster' from serverStartPolicy='NEVER' to 'IF_NEEDED'. + domain.weblogic.oracle/oimcluster patched + [INFO] Successfully patched domain 'oimcluster' in namespace 'oimcluster' with 'IF_NEEDED' start policy! + ``` + + Verify servers by checking the pod status. Pod status will be RUNNING. + ```bash + $ kubectl get pods -n oimcluster + ``` + In the server logs, you will be able to see the weblogic-logging-exporter class being called. + +1. Create an index pattern in Kibana + + We need to create an index pattern in Kibana for the logs to be available in the dashboard. + + Create an index pattern `wls*` in `Kibana` > `Management`. After the server starts, you will be able to see the log data from the WebLogic servers in the Kibana dashboard, + diff --git a/OracleIdentityGovernance/kubernetes/logging-services/weblogic-logging-exporter/WebLogicLoggingExporter.yaml b/OracleIdentityGovernance/kubernetes/logging-services/weblogic-logging-exporter/WebLogicLoggingExporter.yaml new file mode 100755 index 000000000..0ad5cf944 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/logging-services/weblogic-logging-exporter/WebLogicLoggingExporter.yaml @@ -0,0 +1,13 @@ +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# + +weblogicLoggingIndexName: wls +publishHost: elasticsearch.default.svc.cluster.local +publishPort: 9200 +domainUID: oimcluster +weblogicLoggingExporterEnabled: true +weblogicLoggingExporterSeverity: Notice +weblogicLoggingExporterBulkSize: 1 + + diff --git a/OracleIdentityGovernance/kubernetes/monitoring-service/README.md b/OracleIdentityGovernance/kubernetes/monitoring-service/README.md new file mode 100755 index 000000000..8a43c25bb --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/monitoring-service/README.md @@ -0,0 +1,120 @@ +## Monitor the OracleIdentityGovernance instance using Prometheus and Grafana +Using the `WebLogic Monitoring Exporter` you can scrape runtime information from a running OracleIdentityGovernance instance and monitor them using Prometheus and Grafana. + +### Prerequisites + +- Have Docker and a Kubernetes cluster running and have `kubectl` installed and configured. +- Have Helm installed. +- An OracleIdentityGovernance domain deployed by `weblogic-operator` is running in the Kubernetes cluster. + +### Prepare to use the setup monitoring script + +The sample scripts for setup monitoring for OracleIdentityGovernance domain are available at `${WORKDIR}/monitoring-service`. + +You must edit `monitoring-inputs.yaml`(or a copy of it) to provide the details of your domain. Refer to the configuration parameters below to understand the information that you must provide in this file. + +#### Configuration parameters + +The following parameters can be provided in the inputs file. + +| Parameter | Description | Default | +| --- | --- | --- | +| `domainUID` | domainUID of the OracleIdentityGovernance domain. | `oimcluster` | +| `domainNamespace` | Kubernetes namespace of the OracleIdentityGovernance domain. | `oimcluster` | +| `setupKubePrometheusStack` | Boolean value indicating whether kube-prometheus-stack (Prometheus, Grafana and Alertmanager) to be installed | `true` | +| `additionalParamForKubePrometheusStack` | The script install's kube-prometheus-stack with `service.type` as NodePort and values for `service.nodePort` as per the parameters defined in `monitoring-inputs.yaml`. Use `additionalParamForKubePrometheusStack` parameter to further configure with additional parameters as per [values.yaml](https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/values.yaml). Sample value to disable NodeExporter, Prometheus-Operator TLS support and Admission webhook support for PrometheusRules resources is `--set nodeExporter.enabled=false --set prometheusOperator.tls.enabled=false --set prometheusOperator.admissionWebhooks.enabled=false`| | +| `monitoringNamespace` | Kubernetes namespace for monitoring setup. | `monitoring` | +| `adminServerName` | Name of the Administration Server. | `AdminServer` | +| `adminServerPort` | Port number for the Administration Server inside the Kubernetes cluster. | `7001` | +| `soaClusterName` | Name of the soaCluster. | `soa_cluster` | +| `soaManagedServerPort` | Port number of the managed servers in the soaCluster. | `8001` | +| `wlsMonitoringExporterTosoaCluster` | Boolean value indicating whether to deploy WebLogic Monitoring Exporter to soaCluster. | `false` | +| `oimClusterName` | Name of the oimCluster. | `oim_cluster` | +| `oimManagedServerPort` | Port number of the managed servers in the oimCluster. | `14000` | +| `wlsMonitoringExporterTooimCluster` | Boolean value indicating whether to deploy WebLogic Monitoring Exporter to oimCluster. | `false` | +| `exposeMonitoringNodePort` | Boolean value indicating if the Monitoring Services (Prometheus, Grafana and Alertmanager) is exposed outside of the Kubernetes cluster. | `false` | +| `prometheusNodePort` | Port number of the Prometheus outside the Kubernetes cluster. | `32101` | +| `grafanaNodePort` | Port number of the Grafana outside the Kubernetes cluster. | `32100` | +| `alertmanagerNodePort` | Port number of the Alertmanager outside the Kubernetes cluster. | `32102` | +| `weblogicCredentialsSecretName` | Name of the Kubernetes secret which has Administration Server’s user name and password. | `oimcluster-domain-credentials` | + +Note that the values specified in the `monitoring-inputs.yaml` file will be used to install kube-prometheus-stack (Prometheus, Grafana and Alertmanager) and deploying WebLogic Monitoring Exporter into the OracleIdentityGovernance domain. Hence make the domain specific values to be same as that used during domain creation. + +### Run the setup monitoring script + +Update the values in `monitoring-inputs.yaml` as per your requirement and run the `setup-monitoring.sh` script, specifying your inputs file: + +```bash +$ cd ${WORKDIR}/monitoring-service +$ ./setup-monitoring.sh \ + -i monitoring-inputs.yaml +``` +The script will perform the following steps: + +- Helm install `prometheus-community/kube-prometheus-stack` of version "16.5.0" if `setupKubePrometheusStack` is set to `true`. +- Deploys WebLogic Monitoring Exporter to Administration Server. +- Deploys WebLogic Monitoring Exporter to `soaCluster` if `wlsMonitoringExporterTosoaCluster` is set to `true`. +- Exposes the Monitoring Services (Prometheus at `32101`, Grafana at `32100` and Alertmanager at `32102`) outside of the Kubernetes cluster if `exposeMonitoringNodePort` is set to `true`. +- Imports the WebLogic Server Grafana Dashboard if `setupKubePrometheusStack` is set to `true`. +- Deploys WebLogic Monitoring Exporter to Administration Server. +- Deploys WebLogic Monitoring Exporter to `oimCluster` if `wlsMonitoringExporterTooimCluster` is set to `true`. +- Exposes the Monitoring Services (Prometheus at `32101`, Grafana at `32100` and Alertmanager at `32102`) outside of the Kubernetes cluster if `exposeMonitoringNodePort` is set to `true`. +- Imports the WebLogic Server Grafana Dashboard if `setupKubePrometheusStack` is set to `true`. + +### Verify the results +The setup monitoring script will report failure if there was any error. However, verify that required resources were created by the script. + +#### Verify the kube-prometheus-stack + +To confirm that `prometheus-community/kube-prometheus-stack` was installed when `setupKubePrometheusStack` is set to `true`, run the following command: + +```bash +$ helm ls -n +``` +Replace with value for Kubernetes namespace used for monitoring. + +Sample output: +```bash +$ helm ls -n monitoring +NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION +monitoring monitoring 1 2021-06-18 12:58:35.177221969 +0000 UTC deployed kube-prometheus-stack-16.5.0 0.48.0 +$ +``` + +#### Verify the Prometheus, Grafana and Alertmanager setup + +When `exposeMonitoringNodePort` was set to `true`, verify that monitoring services are accessible outside of the Kubernetes cluster: + +- `32100` is the external port for Grafana and with credentials `admin:admin` +- `32101` is the external port for Prometheus +- `32102` is the external port for Alertmanager + +#### Verify the service discovery of WebLogic Monitoring Exporter + +Verify whether prometheus is able to discover wls-exporter and collect the metrics: + +1. Access the Prometheus dashboard at http://mycompany.com:32101/ + +1. Navigate to Status to see the Service Discovery details. + +1. Verify that wls-exporter is listed in the discovered services. + +#### Verify the WebLogic Server dashoard + +You can access the Grafana dashboard at http://mycompany.com:32100/. + +1. Log in to Grafana dashboard with username: `admin` and password: `admin`. + +1. Navigate to "WebLogic Server Dashboard" under General and verify. + +### Delete the monitoring setup + +To delete the monitoring setup created by [Run the setup monitoring script](#run-the-setup-monitoring-script), run the below command: + +```bash +$ cd ${WORKDIR}/monitoring-service +$ ./delete-monitoring.sh \ + -i monitoring-inputs.yaml +``` + + diff --git a/OracleIdentityGovernance/kubernetes/monitoring-service/config/config.yml.template b/OracleIdentityGovernance/kubernetes/monitoring-service/config/config.yml.template new file mode 100755 index 000000000..792f64d27 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/monitoring-service/config/config.yml.template @@ -0,0 +1,64 @@ +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +metricsNameSnakeCase: true +restPort: %PORT% +queries: +- key: name + keyName: location + prefix: wls_server_ + applicationRuntimes: + key: name + keyName: app + componentRuntimes: + prefix: wls_webapp_config_ + type: WebAppComponentRuntime + key: name + values: [deploymentState, contextRoot, sourceInfo, openSessionsHighCount, openSessionsCurrentCount, sessionsOpenedTotalCount, sessionCookieMaxAgeSecs, sessionInvalidationIntervalSecs, sessionTimeoutSecs, singleThreadedServletPoolSize, sessionIDLength, servletReloadCheckSecs, jSPPageCheckSecs] + servlets: + prefix: wls_servlet_ + key: servletName + +- JVMRuntime: + prefix: wls_jvm_ + key: name + +- executeQueueRuntimes: + prefix: wls_socketmuxer_ + key: name + values: [pendingRequestCurrentCount] + +- workManagerRuntimes: + prefix: wls_workmanager_ + key: name + values: [stuckThreadCount, pendingRequests, completedRequests] + +- threadPoolRuntime: + prefix: wls_threadpool_ + key: name + values: [executeThreadTotalCount, queueLength, stuckThreadCount, hoggingThreadCount] + +- JMSRuntime: + key: name + keyName: jmsruntime + prefix: wls_jmsruntime_ + JMSServers: + prefix: wls_jms_ + key: name + keyName: jmsserver + destinations: + prefix: wls_jms_dest_ + key: name + keyName: destination + +- persistentStoreRuntimes: + prefix: wls_persistentstore_ + key: name +- JDBCServiceRuntime: + JDBCDataSourceRuntimeMBeans: + prefix: wls_datasource_ + key: name +- JTARuntime: + prefix: wls_jta_ + key: name + diff --git a/OracleIdentityGovernance/kubernetes/monitoring-service/config/weblogic-server-dashboard-import.json b/OracleIdentityGovernance/kubernetes/monitoring-service/config/weblogic-server-dashboard-import.json new file mode 100644 index 000000000..0b8444e35 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/monitoring-service/config/weblogic-server-dashboard-import.json @@ -0,0 +1,3312 @@ +{ + "__inputs": [], + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "5.2.4" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph", + "version": "5.0.0" + }, + { + "type": "panel", + "id": "singlestat", + "name": "Singlestat", + "version": "5.0.0" + }, + { + "type": "panel", + "id": "table", + "name": "Table", + "version": "5.0.0" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": null, + "iteration": 1563266678971, + "links": [], + "panels": [ + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 32, + "panels": [], + "title": "Servers", + "type": "row" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "decimals": 0, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 13, + "x": 0, + "y": 1 + }, + "hideTimeOverride": true, + "id": 16, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "count(count (wls_jvm_uptime{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\"}) by (name))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{weblogic_serverName}}", + "refId": "A" + } + ], + "thresholds": "", + "timeFrom": null, + "timeShift": null, + "title": "Running Servers", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 11, + "x": 13, + "y": 1 + }, + "id": 23, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "count(count(wls_webapp_config_deployment_state{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\"}) by (app))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "timeFrom": null, + "timeShift": null, + "title": "Deployed Applications", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "decimals": 1, + "description": "", + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 6, + "x": 0, + "y": 4 + }, + "hideTimeOverride": true, + "id": 104, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "repeat": "serverName", + "repeatDirection": "v", + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "weblogic_serverName", + "targets": [ + { + "expr": "wls_server_activation_time{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\",weblogic_serverName=\"$serverName\"}", + "format": "table", + "hide": false, + "instant": true, + "interval": "10s", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "50,80", + "timeFrom": null, + "timeShift": null, + "title": "Server Name", + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "", + "value": "" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "#56A64B", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 6, + "y": 4 + }, + "id": 84, + "interval": "", + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "repeat": "serverName", + "repeatDirection": "v", + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "wls_server_state_val{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=\"$serverName\"}", + "format": "time_series", + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": "", + "timeFrom": null, + "timeShift": null, + "title": "Server Status", + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "SHUTDOWN", + "value": "0" + }, + { + "op": "=", + "text": "STARTING", + "value": "1" + }, + { + "op": "=", + "text": "RUNNING", + "value": "2" + }, + { + "op": "=", + "text": "STANDBY", + "value": "3" + }, + { + "op": "=", + "text": "FAILED", + "value": "8" + }, + { + "op": "=", + "text": "FAILED", + "value": "17" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "decimals": 1, + "description": "", + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 10, + "y": 4 + }, + "hideTimeOverride": true, + "id": 27, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "repeat": "serverName", + "repeatDirection": "v", + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "instance", + "targets": [ + { + "expr": "100 - wls_jvm_heap_free_percent{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=\"$serverName\"}", + "format": "time_series", + "hide": false, + "instant": true, + "interval": "10s", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "50,80", + "timeFrom": null, + "timeShift": null, + "title": "Heap Usage", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "", + "value": "" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorPostfix": false, + "colorValue": true, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "decimals": 1, + "description": "", + "format": "ms", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 5, + "x": 14, + "y": 4 + }, + "hideTimeOverride": true, + "id": 91, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "repeat": "serverName", + "repeatDirection": "v", + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "instance", + "targets": [ + { + "expr": "wls_jvm_uptime{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=\"$serverName\"}", + "format": "time_series", + "hide": false, + "instant": true, + "interval": "10s", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "50,80", + "timeFrom": null, + "timeShift": null, + "title": "Running Time", + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "", + "value": "" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorPostfix": false, + "colorValue": true, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "decimals": 0, + "description": "", + "format": "short", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 5, + "x": 19, + "y": 4 + }, + "hideTimeOverride": true, + "id": 96, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "repeat": "serverName", + "repeatDirection": "v", + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "instance", + "targets": [ + { + "expr": "wls_server_open_sockets_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=\"$serverName\"}", + "format": "time_series", + "hide": false, + "instant": true, + "interval": "10s", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "50,80", + "timeFrom": null, + "timeShift": null, + "title": "Open Sockets", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "", + "value": "" + } + ], + "valueName": "current" + }, + { + "aliasColors": { + " heap free managed-server-1": "super-light-green", + " heap free managed-server-2": "dark-green", + "heap size managed-server-1 ": "super-light-red", + "heap size managed-server-2 ": "dark-red" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 10, + "w": 24, + "x": 0, + "y": 7 + }, + "id": 12, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "wls_jvm_heap_free_current{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": " Heap Free ({{weblogic_serverName}})", + "refId": "B" + }, + { + "expr": "wls_jvm_heap_size_current{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", + "format": "time_series", + "instant": false, + "intervalFactor": 1, + "legendFormat": "Heap Size ({{weblogic_serverName}})", + "refId": "A" + }, + { + "expr": "wls_jvm_heap_size_max{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", + "format": "time_series", + "hide": true, + "intervalFactor": 1, + "legendFormat": "Heap Max ({{weblogic_serverName}})", + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "JVM Heap", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "decbytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + " heap free managed-server-1": "super-light-green", + " heap free managed-server-2": "dark-green", + "heap size managed-server-1 ": "super-light-red", + "heap size managed-server-2 ": "dark-red" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 17 + }, + "id": 21, + "legend": { + "alignAsTable": false, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "wls_jvm_process_cpu_load{weblogic_domainUID=~\"$domainName\", weblogic_clusterName=~\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"} * 100", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": " {{weblogic_serverName}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "CPU Load", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percent", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 17 + }, + "id": 10, + "legend": { + "alignAsTable": false, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "wls_threadpool_execute_thread_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Total Threads ({{weblogic_serverName}})", + "refId": "A" + }, + { + "expr": "wls_threadpool_stuck_thread_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Stuck Threads ({{weblogic_serverName}})", + "refId": "D" + }, + { + "expr": "wls_threadpool_queue_length{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", + "format": "time_series", + "hide": true, + "intervalFactor": 1, + "legendFormat": "queue", + "refId": "C" + }, + { + "expr": "wls_threadpool_hogging_thread_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", + "format": "time_series", + "hide": true, + "intervalFactor": 1, + "legendFormat": "hogging", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Thread Pool", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 24 + }, + "id": 35, + "panels": [ + { + "columns": [], + "datasource": "$datasource", + "fontSize": "100%", + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 28 + }, + "hideTimeOverride": true, + "id": 126, + "links": [], + "pageSize": null, + "scroll": true, + "showHeader": true, + "sort": { + "col": 13, + "desc": true + }, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Webapp", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "app", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "Total Sessions", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "hidden", + "unit": "short" + } + ], + "targets": [ + { + "expr": "topk($topN,sum(wls_webapp_config_sessions_opened_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (app))", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "B" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Total Sessions (top $topN)", + "transform": "table", + "type": "table" + }, + { + "columns": [], + "datasource": "$datasource", + "fontSize": "100%", + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 28 + }, + "hideTimeOverride": true, + "id": 136, + "links": [], + "pageSize": null, + "scroll": true, + "showHeader": true, + "sort": { + "col": 13, + "desc": true + }, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Webapp", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "app", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "Total Requests", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "hidden", + "unit": "short" + } + ], + "targets": [ + { + "expr": "topk($topN,sum(wls_servlet_invocation_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (app))", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "B" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Total Requests (top $topN)", + "transform": "table", + "type": "table" + }, + { + "columns": [], + "datasource": "$datasource", + "fontSize": "100%", + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 28 + }, + "hideTimeOverride": true, + "id": 134, + "links": [], + "pageSize": null, + "scroll": true, + "showHeader": true, + "sort": { + "col": 13, + "desc": true + }, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Webapp", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "app", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "Total Time", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value", + "thresholds": [], + "type": "number", + "unit": "ms" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "hidden", + "unit": "short" + } + ], + "targets": [ + { + "expr": "topk($topN,sum(wls_servlet_execution_time_total{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (app))", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "B" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Total Execution Time (top $topN)", + "transform": "table", + "type": "table" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 35 + }, + "id": 14, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(wls_webapp_config_open_sessions_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (app)", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{app}}", + "refId": "A" + }, + { + "expr": "", + "format": "time_series", + "intervalFactor": 1, + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Current Sessions ", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 35 + }, + "id": 128, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": false + }, + "lines": false, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": " sum(irate(wls_webapp_config_sessions_opened_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])) by (app)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{app}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Session Rate ", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": "per second", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 43 + }, + "id": 132, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "(sum(wls_servlet_execution_time_average{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (app)) / (count(wls_servlet_execution_time_average{weblogic_domainUID=\"domain1\", weblogic_clusterName=\"cluster-1\"}) by (app))", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{app}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Execution Time per Request ", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "ms", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 43 + }, + "id": 138, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": false + }, + "lines": false, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(wls_servlet_invocation_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])) by (app)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{app}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Request Rate ", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": "per second", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "title": "Web Applications", + "type": "row" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 25 + }, + "id": 43, + "panels": [ + { + "columns": [], + "datasource": "$datasource", + "fontSize": "100%", + "gridPos": { + "h": 4, + "w": 24, + "x": 0, + "y": 29 + }, + "hideTimeOverride": true, + "id": 111, + "links": [], + "pageSize": null, + "scroll": true, + "showHeader": true, + "sort": { + "col": 0, + "desc": true + }, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Server", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "weblogic_serverName", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "Name", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "name", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "Active Connections", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #A", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Current Capacity", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #C", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Total Connections", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #D", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Total Connections", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #D", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "hidden", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sum(wls_datasource_curr_capacity{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (weblogic_serverName,name)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "C" + }, + { + "expr": "sum(wls_datasource_active_connections_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (weblogic_serverName,name)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "A" + }, + { + "expr": "sum(wls_datasource_connections_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (weblogic_serverName,name)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "D" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Overview", + "transform": "table", + "type": "table" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 8, + "w": 13, + "x": 0, + "y": 33 + }, + "id": 50, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "wls_datasource_active_connections_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": " {{name}} @ {{weblogic_serverName}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Active Connections", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 8, + "w": 11, + "x": 13, + "y": 33 + }, + "id": 71, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "irate(wls_datasource_connections_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": " {{name}} @ {{weblogic_serverName}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Connection Rate", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": "per second", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 9, + "w": 11, + "x": 0, + "y": 41 + }, + "id": 46, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "wls_datasource_waiting_for_connection_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": " {{name}} @ {{weblogic_serverName}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Pending Connection Requests", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 9, + "w": 13, + "x": 11, + "y": 41 + }, + "id": 73, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "wls_datasource_connection_delay_time{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": " {{name}} @ {{weblogic_serverName}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Average Connection Delay Time", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "title": "Data Sources", + "type": "row" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 26 + }, + "id": 40, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 6, + "w": 12, + "x": 0, + "y": 30 + }, + "id": 145, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(wls_jmsruntime_connections_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (weblogic_serverName)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{weblogic_serverName}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "JMS Connections", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 6, + "w": 12, + "x": 12, + "y": 30 + }, + "id": 147, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(wls_jmsruntime_connections_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])) by (weblogic_serverName)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{weblogic_serverName}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "JMS Connection Rate", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "columns": [], + "datasource": "$datasource", + "fontSize": "100%", + "gridPos": { + "h": 4, + "w": 24, + "x": 0, + "y": 36 + }, + "hideTimeOverride": true, + "id": 113, + "links": [], + "pageSize": null, + "scroll": true, + "showHeader": true, + "sort": { + "col": 0, + "desc": true + }, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Name", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "jmsserver", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "Current Dests", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #B", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Total Msgs", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #A", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Total Bytes", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #D", + "thresholds": [], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Total Dests", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #E", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "hidden", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sum(wls_jms_destinations_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "B" + }, + { + "expr": "sum(wls_jms_messages_received_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "A" + }, + { + "expr": "sum(wls_jms_bytes_received_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "D" + }, + { + "expr": "sum(wls_jms_destinations_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "E" + }, + { + "expr": "sum(wls_jms_destinations_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "F" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "JMSServer Overview", + "transform": "table", + "type": "table" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 40 + }, + "id": 54, + "legend": { + "alignAsTable": false, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(wls_jms_messages_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Current ({{jmsserver}})", + "refId": "A" + }, + { + "expr": "sum(wls_jms_messages_pending_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Pending ({{jmsserver}})", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Messages", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 40 + }, + "id": 56, + "legend": { + "alignAsTable": false, + "avg": false, + "current": true, + "max": false, + "min": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(wls_jms_bytes_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Current ({{jmsserver}})", + "refId": "A" + }, + { + "expr": "sum(wls_jms_bytes_pending_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Pending ({{jmsserver}})", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Bytes", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 47 + }, + "id": 58, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(wls_jms_messages_received_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])) by (jmsserver)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{jmsserver}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Received Message Rate ", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 47 + }, + "id": 117, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(wls_jms_bytes_received_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])) by (jmsserver)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{jmsserver}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Received Byte Rate", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "columns": [], + "datasource": "$datasource", + "fontSize": "100%", + "gridPos": { + "h": 5, + "w": 24, + "x": 0, + "y": 54 + }, + "hideTimeOverride": true, + "id": 119, + "links": [], + "pageSize": null, + "scroll": true, + "showHeader": true, + "sort": { + "col": 3, + "desc": false + }, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Destination", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "destination", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "Current Consumers", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #A", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Current Msgs", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #B", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Pending Msgs", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #C", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Currrent Bytes", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #D", + "thresholds": [], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Pending Bytes", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #E", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Total Msgs", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #F", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Total Bytes", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #G", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "hidden", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sum(wls_jms_dest_consumers_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (destination)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "A" + }, + { + "expr": "sum(wls_jms_dest_messages_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (destination)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "B" + }, + { + "expr": "sum(wls_jms_dest_messages_pending_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (destination)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "C" + }, + { + "expr": "sum(wls_jms_dest_bytes_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (destination)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "D" + }, + { + "expr": "sum(wls_jms_dest_bytes_pending_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (destination)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "E" + }, + { + "expr": "sum(wls_jms_dest_messages_received_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (destination)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 1, + "refId": "F" + }, + { + "expr": "sum(wls_jms_dest_bytes_received_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (destination)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "G" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Destinations Overview", + "transform": "table", + "type": "table" + } + ], + "title": "JMS Services", + "type": "row" + } + ], + "refresh": false, + "schemaVersion": 16, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "current": { + "text": "prometheus", + "value": "prometheus" + }, + "hide": 0, + "label": null, + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": {}, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "Domain", + "multi": false, + "name": "domainName", + "options": [], + "query": "label_values(weblogic_domainUID)", + "refresh": 1, + "regex": "", + "sort": 2, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": {}, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "Cluster", + "multi": false, + "name": "clusterName", + "options": [], + "query": "label_values(wls_jvm_uptime{weblogic_domainUID=\"$domainName\"},weblogic_clusterName)", + "refresh": 1, + "regex": "", + "sort": 2, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": {}, + "datasource": "$datasource", + "hide": 0, + "includeAll": true, + "label": "Server", + "multi": true, + "name": "serverName", + "options": [], + "query": "label_values(wls_jvm_uptime{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\"},weblogic_serverName)", + "refresh": 1, + "regex": "", + "sort": 2, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "selected": false, + "tags": [], + "text": "5", + "value": "5" + }, + "hide": 0, + "includeAll": false, + "label": "Top N", + "multi": false, + "name": "topN", + "options": [ + { + "selected": false, + "text": "3", + "value": "3" + }, + { + "selected": true, + "text": "5", + "value": "5" + }, + { + "selected": false, + "text": "7", + "value": "7" + }, + { + "selected": false, + "text": "10", + "value": "10" + } + ], + "query": "3, 5, 7, 10", + "skipUrlSync": false, + "type": "custom" + } + ] + }, + "time": { + "from": "now-30m", + "to": "now" + }, + "timepicker": { + "hidden": false, + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "WebLogic Server Dashboard", + "uid": "5yUwzbZWz", + "version": 6 +} diff --git a/OracleIdentityGovernance/kubernetes/monitoring-service/config/weblogic-server-dashboard.json b/OracleIdentityGovernance/kubernetes/monitoring-service/config/weblogic-server-dashboard.json new file mode 100755 index 000000000..338eb6397 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/monitoring-service/config/weblogic-server-dashboard.json @@ -0,0 +1,3314 @@ +{ + "dashboard": { + "__inputs": [], + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "5.2.4" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph", + "version": "5.0.0" + }, + { + "type": "panel", + "id": "singlestat", + "name": "Singlestat", + "version": "5.0.0" + }, + { + "type": "panel", + "id": "table", + "name": "Table", + "version": "5.0.0" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": null, + "iteration": 1563266678971, + "links": [], + "panels": [ + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 32, + "panels": [], + "title": "Servers", + "type": "row" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "decimals": 0, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 13, + "x": 0, + "y": 1 + }, + "hideTimeOverride": true, + "id": 16, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "count(count (wls_jvm_uptime{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\"}) by (name))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{weblogic_serverName}}", + "refId": "A" + } + ], + "thresholds": "", + "timeFrom": null, + "timeShift": null, + "title": "Running Servers", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 11, + "x": 13, + "y": 1 + }, + "id": 23, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "count(count(wls_webapp_config_deployment_state{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\"}) by (app))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "timeFrom": null, + "timeShift": null, + "title": "Deployed Applications", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "decimals": 1, + "description": "", + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 6, + "x": 0, + "y": 4 + }, + "hideTimeOverride": true, + "id": 104, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "repeat": "serverName", + "repeatDirection": "v", + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "weblogic_serverName", + "targets": [ + { + "expr": "wls_server_activation_time{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\",weblogic_serverName=\"$serverName\"}", + "format": "table", + "hide": false, + "instant": true, + "interval": "10s", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "50,80", + "timeFrom": null, + "timeShift": null, + "title": "Server Name", + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "", + "value": "" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "#56A64B", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 6, + "y": 4 + }, + "id": 84, + "interval": "", + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "repeat": "serverName", + "repeatDirection": "v", + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "wls_server_state_val{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=\"$serverName\"}", + "format": "time_series", + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": "", + "timeFrom": null, + "timeShift": null, + "title": "Server Status", + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "SHUTDOWN", + "value": "0" + }, + { + "op": "=", + "text": "STARTING", + "value": "1" + }, + { + "op": "=", + "text": "RUNNING", + "value": "2" + }, + { + "op": "=", + "text": "STANDBY", + "value": "3" + }, + { + "op": "=", + "text": "FAILED", + "value": "8" + }, + { + "op": "=", + "text": "FAILED", + "value": "17" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "decimals": 1, + "description": "", + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 10, + "y": 4 + }, + "hideTimeOverride": true, + "id": 27, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "repeat": "serverName", + "repeatDirection": "v", + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "instance", + "targets": [ + { + "expr": "100 - wls_jvm_heap_free_percent{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=\"$serverName\"}", + "format": "time_series", + "hide": false, + "instant": true, + "interval": "10s", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "50,80", + "timeFrom": null, + "timeShift": null, + "title": "Heap Usage", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "", + "value": "" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorPostfix": false, + "colorValue": true, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "decimals": 1, + "description": "", + "format": "ms", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 5, + "x": 14, + "y": 4 + }, + "hideTimeOverride": true, + "id": 91, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "repeat": "serverName", + "repeatDirection": "v", + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "instance", + "targets": [ + { + "expr": "wls_jvm_uptime{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=\"$serverName\"}", + "format": "time_series", + "hide": false, + "instant": true, + "interval": "10s", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "50,80", + "timeFrom": null, + "timeShift": null, + "title": "Running Time", + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "", + "value": "" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorPostfix": false, + "colorValue": true, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "decimals": 0, + "description": "", + "format": "short", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 5, + "x": 19, + "y": 4 + }, + "hideTimeOverride": true, + "id": 96, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "repeat": "serverName", + "repeatDirection": "v", + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "instance", + "targets": [ + { + "expr": "wls_server_open_sockets_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=\"$serverName\"}", + "format": "time_series", + "hide": false, + "instant": true, + "interval": "10s", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "50,80", + "timeFrom": null, + "timeShift": null, + "title": "Open Sockets", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "", + "value": "" + } + ], + "valueName": "current" + }, + { + "aliasColors": { + " heap free managed-server-1": "super-light-green", + " heap free managed-server-2": "dark-green", + "heap size managed-server-1 ": "super-light-red", + "heap size managed-server-2 ": "dark-red" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 10, + "w": 24, + "x": 0, + "y": 7 + }, + "id": 12, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "wls_jvm_heap_free_current{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": " Heap Free ({{weblogic_serverName}})", + "refId": "B" + }, + { + "expr": "wls_jvm_heap_size_current{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", + "format": "time_series", + "instant": false, + "intervalFactor": 1, + "legendFormat": "Heap Size ({{weblogic_serverName}})", + "refId": "A" + }, + { + "expr": "wls_jvm_heap_size_max{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", + "format": "time_series", + "hide": true, + "intervalFactor": 1, + "legendFormat": "Heap Max ({{weblogic_serverName}})", + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "JVM Heap", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "decbytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + " heap free managed-server-1": "super-light-green", + " heap free managed-server-2": "dark-green", + "heap size managed-server-1 ": "super-light-red", + "heap size managed-server-2 ": "dark-red" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 17 + }, + "id": 21, + "legend": { + "alignAsTable": false, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "wls_jvm_process_cpu_load{weblogic_domainUID=~\"$domainName\", weblogic_clusterName=~\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"} * 100", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": " {{weblogic_serverName}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "CPU Load", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percent", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 17 + }, + "id": 10, + "legend": { + "alignAsTable": false, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "wls_threadpool_execute_thread_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Total Threads ({{weblogic_serverName}})", + "refId": "A" + }, + { + "expr": "wls_threadpool_stuck_thread_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Stuck Threads ({{weblogic_serverName}})", + "refId": "D" + }, + { + "expr": "wls_threadpool_queue_length{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", + "format": "time_series", + "hide": true, + "intervalFactor": 1, + "legendFormat": "queue", + "refId": "C" + }, + { + "expr": "wls_threadpool_hogging_thread_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", + "format": "time_series", + "hide": true, + "intervalFactor": 1, + "legendFormat": "hogging", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Thread Pool", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 24 + }, + "id": 35, + "panels": [ + { + "columns": [], + "datasource": "$datasource", + "fontSize": "100%", + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 28 + }, + "hideTimeOverride": true, + "id": 126, + "links": [], + "pageSize": null, + "scroll": true, + "showHeader": true, + "sort": { + "col": 13, + "desc": true + }, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Webapp", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "app", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "Total Sessions", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "hidden", + "unit": "short" + } + ], + "targets": [ + { + "expr": "topk($topN,sum(wls_webapp_config_sessions_opened_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (app))", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "B" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Total Sessions (top $topN)", + "transform": "table", + "type": "table" + }, + { + "columns": [], + "datasource": "$datasource", + "fontSize": "100%", + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 28 + }, + "hideTimeOverride": true, + "id": 136, + "links": [], + "pageSize": null, + "scroll": true, + "showHeader": true, + "sort": { + "col": 13, + "desc": true + }, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Webapp", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "app", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "Total Requests", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "hidden", + "unit": "short" + } + ], + "targets": [ + { + "expr": "topk($topN,sum(wls_servlet_invocation_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (app))", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "B" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Total Requests (top $topN)", + "transform": "table", + "type": "table" + }, + { + "columns": [], + "datasource": "$datasource", + "fontSize": "100%", + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 28 + }, + "hideTimeOverride": true, + "id": 134, + "links": [], + "pageSize": null, + "scroll": true, + "showHeader": true, + "sort": { + "col": 13, + "desc": true + }, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Webapp", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "app", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "Total Time", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value", + "thresholds": [], + "type": "number", + "unit": "ms" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "hidden", + "unit": "short" + } + ], + "targets": [ + { + "expr": "topk($topN,sum(wls_servlet_execution_time_total{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (app))", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "B" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Total Execution Time (top $topN)", + "transform": "table", + "type": "table" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 35 + }, + "id": 14, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(wls_webapp_config_open_sessions_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (app)", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{app}}", + "refId": "A" + }, + { + "expr": "", + "format": "time_series", + "intervalFactor": 1, + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Current Sessions ", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 35 + }, + "id": 128, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": false + }, + "lines": false, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": " sum(irate(wls_webapp_config_sessions_opened_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])) by (app)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{app}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Session Rate ", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": "per second", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 43 + }, + "id": 132, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "(sum(wls_servlet_execution_time_average{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (app)) / (count(wls_servlet_execution_time_average{weblogic_domainUID=\"domain1\", weblogic_clusterName=\"cluster-1\"}) by (app))", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{app}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Execution Time per Request ", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "ms", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 43 + }, + "id": 138, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": false + }, + "lines": false, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(wls_servlet_invocation_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])) by (app)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{app}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Request Rate ", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": "per second", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "title": "Web Applications", + "type": "row" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 25 + }, + "id": 43, + "panels": [ + { + "columns": [], + "datasource": "$datasource", + "fontSize": "100%", + "gridPos": { + "h": 4, + "w": 24, + "x": 0, + "y": 29 + }, + "hideTimeOverride": true, + "id": 111, + "links": [], + "pageSize": null, + "scroll": true, + "showHeader": true, + "sort": { + "col": 0, + "desc": true + }, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Server", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "weblogic_serverName", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "Name", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "name", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "Active Connections", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #A", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Current Capacity", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #C", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Total Connections", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #D", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Total Connections", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #D", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "hidden", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sum(wls_datasource_curr_capacity{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (weblogic_serverName,name)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "C" + }, + { + "expr": "sum(wls_datasource_active_connections_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (weblogic_serverName,name)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "A" + }, + { + "expr": "sum(wls_datasource_connections_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (weblogic_serverName,name)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "D" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Overview", + "transform": "table", + "type": "table" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 8, + "w": 13, + "x": 0, + "y": 33 + }, + "id": 50, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "wls_datasource_active_connections_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": " {{name}} @ {{weblogic_serverName}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Active Connections", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 8, + "w": 11, + "x": 13, + "y": 33 + }, + "id": 71, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "irate(wls_datasource_connections_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": " {{name}} @ {{weblogic_serverName}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Connection Rate", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": "per second", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 9, + "w": 11, + "x": 0, + "y": 41 + }, + "id": 46, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "wls_datasource_waiting_for_connection_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": " {{name}} @ {{weblogic_serverName}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Pending Connection Requests", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 9, + "w": 13, + "x": 11, + "y": 41 + }, + "id": 73, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "wls_datasource_connection_delay_time{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": " {{name}} @ {{weblogic_serverName}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Average Connection Delay Time", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "title": "Data Sources", + "type": "row" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 26 + }, + "id": 40, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 6, + "w": 12, + "x": 0, + "y": 30 + }, + "id": 145, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(wls_jmsruntime_connections_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (weblogic_serverName)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{weblogic_serverName}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "JMS Connections", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 6, + "w": 12, + "x": 12, + "y": 30 + }, + "id": 147, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(wls_jmsruntime_connections_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])) by (weblogic_serverName)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{weblogic_serverName}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "JMS Connection Rate", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "columns": [], + "datasource": "$datasource", + "fontSize": "100%", + "gridPos": { + "h": 4, + "w": 24, + "x": 0, + "y": 36 + }, + "hideTimeOverride": true, + "id": 113, + "links": [], + "pageSize": null, + "scroll": true, + "showHeader": true, + "sort": { + "col": 0, + "desc": true + }, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Name", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "jmsserver", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "Current Dests", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #B", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Total Msgs", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #A", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Total Bytes", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #D", + "thresholds": [], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Total Dests", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #E", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "hidden", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sum(wls_jms_destinations_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "B" + }, + { + "expr": "sum(wls_jms_messages_received_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "A" + }, + { + "expr": "sum(wls_jms_bytes_received_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "D" + }, + { + "expr": "sum(wls_jms_destinations_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "E" + }, + { + "expr": "sum(wls_jms_destinations_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "F" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "JMSServer Overview", + "transform": "table", + "type": "table" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 40 + }, + "id": 54, + "legend": { + "alignAsTable": false, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(wls_jms_messages_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Current ({{jmsserver}})", + "refId": "A" + }, + { + "expr": "sum(wls_jms_messages_pending_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Pending ({{jmsserver}})", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Messages", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 40 + }, + "id": 56, + "legend": { + "alignAsTable": false, + "avg": false, + "current": true, + "max": false, + "min": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(wls_jms_bytes_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Current ({{jmsserver}})", + "refId": "A" + }, + { + "expr": "sum(wls_jms_bytes_pending_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Pending ({{jmsserver}})", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Bytes", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 47 + }, + "id": 58, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(wls_jms_messages_received_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])) by (jmsserver)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{jmsserver}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Received Message Rate ", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 47 + }, + "id": 117, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(wls_jms_bytes_received_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])) by (jmsserver)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{jmsserver}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Received Byte Rate", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "columns": [], + "datasource": "$datasource", + "fontSize": "100%", + "gridPos": { + "h": 5, + "w": 24, + "x": 0, + "y": 54 + }, + "hideTimeOverride": true, + "id": 119, + "links": [], + "pageSize": null, + "scroll": true, + "showHeader": true, + "sort": { + "col": 3, + "desc": false + }, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Destination", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "destination", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "Current Consumers", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #A", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Current Msgs", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #B", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Pending Msgs", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #C", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Currrent Bytes", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #D", + "thresholds": [], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Pending Bytes", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #E", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Total Msgs", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #F", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Total Bytes", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #G", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "hidden", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sum(wls_jms_dest_consumers_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (destination)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "A" + }, + { + "expr": "sum(wls_jms_dest_messages_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (destination)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "B" + }, + { + "expr": "sum(wls_jms_dest_messages_pending_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (destination)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "C" + }, + { + "expr": "sum(wls_jms_dest_bytes_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (destination)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "D" + }, + { + "expr": "sum(wls_jms_dest_bytes_pending_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (destination)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "E" + }, + { + "expr": "sum(wls_jms_dest_messages_received_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (destination)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 1, + "refId": "F" + }, + { + "expr": "sum(wls_jms_dest_bytes_received_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (destination)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "G" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Destinations Overview", + "transform": "table", + "type": "table" + } + ], + "title": "JMS Services", + "type": "row" + } + ], + "refresh": false, + "schemaVersion": 16, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "current": { + "text": "prometheus", + "value": "prometheus" + }, + "hide": 0, + "label": null, + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": {}, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "Domain", + "multi": false, + "name": "domainName", + "options": [], + "query": "label_values(weblogic_domainUID)", + "refresh": 1, + "regex": "", + "sort": 2, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": {}, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "Cluster", + "multi": false, + "name": "clusterName", + "options": [], + "query": "label_values(wls_jvm_uptime{weblogic_domainUID=\"$domainName\"},weblogic_clusterName)", + "refresh": 1, + "regex": "", + "sort": 2, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": {}, + "datasource": "$datasource", + "hide": 0, + "includeAll": true, + "label": "Server", + "multi": true, + "name": "serverName", + "options": [], + "query": "label_values(wls_jvm_uptime{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\"},weblogic_serverName)", + "refresh": 1, + "regex": "", + "sort": 2, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "selected": false, + "tags": [], + "text": "5", + "value": "5" + }, + "hide": 0, + "includeAll": false, + "label": "Top N", + "multi": false, + "name": "topN", + "options": [ + { + "selected": false, + "text": "3", + "value": "3" + }, + { + "selected": true, + "text": "5", + "value": "5" + }, + { + "selected": false, + "text": "7", + "value": "7" + }, + { + "selected": false, + "text": "10", + "value": "10" + } + ], + "query": "3, 5, 7, 10", + "skipUrlSync": false, + "type": "custom" + } + ] + }, + "time": { + "from": "now-30m", + "to": "now" + }, + "timepicker": { + "hidden": false, + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "WebLogic Server Dashboard", + "uid": "5yUwzbZWz", + "version": 6 + } +} diff --git a/OracleIdentityGovernance/kubernetes/monitoring-service/config/weblogic.xml b/OracleIdentityGovernance/kubernetes/monitoring-service/config/weblogic.xml new file mode 100755 index 000000000..c4e2df0c5 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/monitoring-service/config/weblogic.xml @@ -0,0 +1,18 @@ + + + + wls-exporter + + + com.google.gson.* + javax.inject.* + org.apache.commons.* + org.apache.http.* + org.glassfish.hk2.* + org.jvnet.hk2.* + org.jvnet.tiger_types.* + org.yaml.snakeyaml.* + + + + diff --git a/OracleIdentityGovernance/kubernetes/monitoring-service/delete-monitoring.sh b/OracleIdentityGovernance/kubernetes/monitoring-service/delete-monitoring.sh new file mode 100755 index 000000000..b676e9b40 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/monitoring-service/delete-monitoring.sh @@ -0,0 +1,122 @@ +#!/bin/bash +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# delete-monitoring.sh + +# Initialize +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" +OLD_PWD=`pwd` + + +# +## Function to exit and print an error message +## $1 - text of message +function fail { + printError $* + exit 1 +} + +# Function to print an error message +function removeFileIfExists { + echo "input is $1" + if [ -f $1 ]; then + rm -f $1 + fi +} + +function exitIfError { + if [ "$1" != "0" ]; then + echo "$2" + exit $1 + fi +} +# +# Function to parse a yaml file and generate the bash exports +# $1 - Input filename +# $2 - Output filename +function parseYaml { + local s='[[:space:]]*' w='[a-zA-Z0-9_]*' fs=$(echo @|tr @ '\034') + sed -ne "s|^\($s\):|\1|" \ + -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \ + -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $1 | + awk -F$fs '{ + if (length($3) > 0) { + # javaOptions may contain tokens that are not allowed in export command + # we need to handle it differently. + if ($2=="javaOptions") { + printf("%s=%s\n", $2, $3); + } else { + printf("export %s=\"%s\"\n", $2, $3); + } + } + }' > $2 +} + +function usage { + echo usage: ${script} -i file [-v] [-h] + echo " -i Parameter inputs file, must be specified." + echo " -h Help" + exit $1 +} + + +function deleteKubePrometheusStack { + helm delete ${monitoringNamespace} --namespace ${monitoringNamespace} +} + +#Parse the inputs +while getopts "hi:" opt; do + case $opt in + i) valuesInputFile="${OPTARG}" + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +if [ -z ${valuesInputFile} ]; then + echo "${script}: -i must be specified." + missingRequiredOption="true" +fi + +if [ "${missingRequiredOption}" == "true" ]; then + usage 1 +fi + +if [ ! -f ${valuesInputFile} ]; then + echo "Unable to locate the input parameters file ${valuesInputFile}" + fail 'The error listed above must be resolved before the script can continue' +fi + +exportValuesFile=$(mktemp /tmp/export-values-XXXXXXXXX.sh) +parseYaml ${valuesInputFile} ${exportValuesFile} + + +source ${exportValuesFile} +rm ${exportValuesFile} + +# Setting up the WebLogic Monitoring Exporter + +echo "Undeploy WebLogic Monitoring Exporter started" +serviceMonitor=${scriptDir}/manifests/wls-exporter-ServiceMonitor.yaml +kubectl delete --ignore-not-found=true -f ${serviceMonitor} +script=${scriptDir}/scripts/undeploy-weblogic-monitoring-exporter.sh +sh ${script} +if [ "$?" != "0" ]; then + echo "ERROR: $script failed." + echo "Undeploy WebLogic Monitoring Exporter completed with errors. Review the logs and rerun" +else + echo "Undeploy WebLogic Monitoring Exporter completed." +fi + +if [ "${setupKubePrometheusStack}" = "true" ]; then + echo "Deleting Prometheus and grafana started" + deleteKubePrometheusStack + echo "Deleting Prometheus and grafana completed" +fi +cd $OLD_PWD + diff --git a/OracleIdentityGovernance/kubernetes/monitoring-service/manifests/prometheus-roleBinding-domain-namespace.yaml b/OracleIdentityGovernance/kubernetes/monitoring-service/manifests/prometheus-roleBinding-domain-namespace.yaml new file mode 100755 index 000000000..e71790de3 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/monitoring-service/manifests/prometheus-roleBinding-domain-namespace.yaml @@ -0,0 +1,20 @@ +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +apiVersion: rbac.authorization.k8s.io/v1 +items: +- apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: prometheus-k8s + namespace: oimcluster + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: prometheus-k8s + subjects: + - kind: ServiceAccount + name: prometheus-k8s + namespace: monitoring +kind: RoleBindingList + diff --git a/OracleIdentityGovernance/kubernetes/monitoring-service/manifests/prometheus-roleSpecific-domain-namespace.yaml b/OracleIdentityGovernance/kubernetes/monitoring-service/manifests/prometheus-roleSpecific-domain-namespace.yaml new file mode 100755 index 000000000..37fc2f8f0 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/monitoring-service/manifests/prometheus-roleSpecific-domain-namespace.yaml @@ -0,0 +1,23 @@ +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +apiVersion: rbac.authorization.k8s.io/v1 +items: +- apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: prometheus-k8s + namespace: oimcluster + rules: + - apiGroups: + - "" + resources: + - services + - endpoints + - pods + verbs: + - get + - list + - watch +kind: RoleList + diff --git a/OracleIdentityGovernance/kubernetes/monitoring-service/manifests/wls-exporter-ServiceMonitor.yaml b/OracleIdentityGovernance/kubernetes/monitoring-service/manifests/wls-exporter-ServiceMonitor.yaml new file mode 100755 index 000000000..833a05d62 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/monitoring-service/manifests/wls-exporter-ServiceMonitor.yaml @@ -0,0 +1,44 @@ +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +apiVersion: v1 +kind: Secret +metadata: + name: basic-auth + namespace: oimcluster +data: + password: V2VsY29tZTE= + user: d2VibG9naWM= +type: Opaque +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: wls-exporter + namespace: oimcluster + labels: + k8s-app: wls-exporter + release: monitoring +spec: + namespaceSelector: + matchNames: + - oimcluster + selector: + matchLabels: + weblogic.domainName: oimcluster + endpoints: + - basicAuth: + password: + name: basic-auth + key: password + username: + name: basic-auth + key: user + port: default + relabelings: + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + interval: 10s + honorLabels: true + path: /wls-exporter/metrics + diff --git a/OracleIdentityGovernance/kubernetes/monitoring-service/manifests/wls-exporter-ServiceMonitor.yaml.template b/OracleIdentityGovernance/kubernetes/monitoring-service/manifests/wls-exporter-ServiceMonitor.yaml.template new file mode 100755 index 000000000..ba6d56881 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/monitoring-service/manifests/wls-exporter-ServiceMonitor.yaml.template @@ -0,0 +1,44 @@ +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +apiVersion: v1 +kind: Secret +metadata: + name: basic-auth + namespace: oimcluster +data: + password: %PASSWORD% + user: %USERNAME% +type: Opaque +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: wls-exporter + namespace: oimcluster + labels: + k8s-app: wls-exporter + release: monitoring +spec: + namespaceSelector: + matchNames: + - oimcluster + selector: + matchLabels: + weblogic.domainName: oimcluster + endpoints: + - basicAuth: + password: + name: basic-auth + key: password + username: + name: basic-auth + key: user + port: default + relabelings: + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + interval: 10s + honorLabels: true + path: /wls-exporter/metrics + diff --git a/OracleIdentityGovernance/kubernetes/monitoring-service/monitoring-inputs.yaml b/OracleIdentityGovernance/kubernetes/monitoring-service/monitoring-inputs.yaml new file mode 100755 index 000000000..6dab6efca --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/monitoring-service/monitoring-inputs.yaml @@ -0,0 +1,65 @@ +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# The version of this inputs file. Do not modify. +version: create-oimcluster-monitoring-inputs-v1 + +# Unique ID identifying your domain. +# This ID must not contain an underscope ("_"), and must be lowercase and unique across all domains in a Kubernetes cluster. +domainUID: oimcluster + +# Name of the domain namespace +domainNamespace: oimcluster + +# Boolean value indicating whether to install kube-prometheus-stack +setupKubePrometheusStack: true + +# Additional parameters for helm install kube-prometheus-stack +# Refer https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/values.yaml for additional parameters +# Sample : +# additionalParamForKubePrometheusStack: --set nodeExporter.enabled=false --set prometheusOperator.tls.enabled=false --set prometheusOperator.admissionWebhooks.enabled=false +additionalParamForKubePrometheusStack: + +# Name of the monitoring namespace +monitoringNamespace: monitoring + +# Name of the Admin Server +adminServerName: AdminServer +# +# Port number for admin server +adminServerPort: 7001 + +# Cluster name +soaClusterName: soa_cluster + +# Port number for managed server +soaManagedServerPort: 8001 + +# WebLogic Monitoring Exporter to Cluster +wlsMonitoringExporterTosoaCluster: true + +# Cluster name +oimClusterName: oim_cluster + +# Port number for managed server +oimManagedServerPort: 14000 + +# WebLogic Monitoring Exporter to Cluster +wlsMonitoringExporterTooimCluster: true + + +# Boolean to indicate if the adminNodePort will be exposed +exposeMonitoringNodePort: true + +# NodePort to expose Prometheus +prometheusNodePort: 32101 + +# NodePort to expose Grafana +grafanaNodePort: 32100 + +# NodePort to expose Alertmanager +alertmanagerNodePort: 32102 + +# Name of the Kubernetes secret for the Admin Server's username and password +weblogicCredentialsSecretName: oimcluster-domain-credentials + diff --git a/OracleIdentityGovernance/kubernetes/monitoring-service/scripts/deploy-weblogic-monitoring-exporter.py b/OracleIdentityGovernance/kubernetes/monitoring-service/scripts/deploy-weblogic-monitoring-exporter.py new file mode 100755 index 000000000..97d0d8b6b --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/monitoring-service/scripts/deploy-weblogic-monitoring-exporter.py @@ -0,0 +1,105 @@ +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +import sys +#======================================================= +# Function for fresh plain deployment +#======================================================= +def newDeploy(appName,target): + try: + print 'Deploying .........' + deploy(appName,'/u01/oracle/wls-exporter-deploy/'+appName+'.war', target, upload="true",remote="true") + startApplication(appName) + except Exception, ex: + print ex.toString() + +#======================================================== +# Main program here... +# Target you can change as per your need +#======================================================== + +def usage(): + argsList = ' -domainName -adminServerName -adminURL -username -password ' + argsList=argsList + ' -soaClusterName ' + ' -wlsMonitoringExporterTosoaCluster ' + argsList=argsList + ' -oimClusterName ' + ' -wlsMonitoringExporterTooimCluster ' + print sys.argv[0] + argsList + sys.exit(0) + +if len(sys.argv) < 1: + usage() + +# domainName will be passed by command line parameter -domainName. +domainName = "oimcluster" + +# adminServerName will be passed by command line parameter -adminServerName +adminServerName = "AdminServer" + +# adminURL will be passed by command line parameter -adminURL +adminURL = "oimcluster-adminserver:7001" + +# soaClusterName will be passed by command line parameter -soaClusterName +soaClusterName = "soa_cluster" + +# wlsMonitoringExporterTosoaCluster will be passed by command line parameter -wlsMonitoringExporterTosoaCluster +wlsMonitoringExporterTosoaCluster = "true" + + +# oimClusterName will be passed by command line parameter -oimClusterName +oimClusterName = "oim_cluster" + +# wlsMonitoringExporterTooimCluster will be passed by command line parameter -wlsMonitoringExporterTooimCluster +wlsMonitoringExporterTooimCluster = "true" + +# username will be passed by command line parameter -username +username = "weblogic" + +# password will be passed by command line parameter -password +password = "Welcome1" + +i=1 +while i < len(sys.argv): + if sys.argv[i] == '-domainName': + domainName = sys.argv[i+1] + i += 2 + elif sys.argv[i] == '-adminServerName': + adminServerName = sys.argv[i+1] + i += 2 + elif sys.argv[i] == '-adminURL': + adminURL = sys.argv[i+1] + i += 2 + elif sys.argv[i] == '-username': + username = sys.argv[i+1] + i += 2 + elif sys.argv[i] == '-password': + password = sys.argv[i+1] + i += 2 + elif sys.argv[i] == '-soaClusterName': + soaClusterName = sys.argv[i+1] + i += 2 + elif sys.argv[i] == '-wlsMonitoringExporterTosoaCluster': + wlsMonitoringExporterTosoaCluster = sys.argv[i+1] + i += 2 + elif sys.argv[i] == '-oimClusterName': + oimClusterName = sys.argv[i+1] + i += 2 + elif sys.argv[i] == '-wlsMonitoringExporterTooimCluster': + wlsMonitoringExporterTooimCluster = sys.argv[i+1] + i += 2 + else: + print 'Unexpected argument switch at position ' + str(i) + ': ' + str(sys.argv[i]) + usage() + sys.exit(1) + +# Deployment +connect(username, password, 't3://' + adminURL) +cd('AppDeployments') +newDeploy('wls-exporter-adminserver',adminServerName) +if 'true' == wlsMonitoringExporterTosoaCluster: + newDeploy('wls-exporter-soa',soaClusterName) + +if 'true' == wlsMonitoringExporterTooimCluster: + newDeploy('wls-exporter-oim',oimClusterName) + +disconnect() +exit() + diff --git a/OracleIdentityGovernance/kubernetes/monitoring-service/scripts/deploy-weblogic-monitoring-exporter.sh b/OracleIdentityGovernance/kubernetes/monitoring-service/scripts/deploy-weblogic-monitoring-exporter.sh new file mode 100755 index 000000000..765bf1b67 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/monitoring-service/scripts/deploy-weblogic-monitoring-exporter.sh @@ -0,0 +1,37 @@ +#!/bin/bash +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# Initialize +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" +warDir=$PWD +source ${scriptDir}/utils.sh + +# Setting default values +initialize +# Function to lowercase a value and make it a legal DNS1123 name +# $1 - value to convert to lowercase +function toDNS1123Legal { + local val=`echo $1 | tr "[:upper:]" "[:lower:]"` + val=${val//"_"/"-"} + echo "$val" +} + +# username and password from Kubernetes secret +username=`kubectl get secrets ${weblogicCredentialsSecretName} -n ${domainNamespace} -o=jsonpath='{.data.username}'|base64 --decode` +password=`kubectl get secrets ${weblogicCredentialsSecretName} -n ${domainNamespace} -o=jsonpath='{.data.password}'|base64 --decode` + +adminServerPodName="${domainUID}-$(toDNS1123Legal ${adminServerName})" + +InputParameterList=" -domainName ${domainUID} -adminServerName ${adminServerName} -adminURL ${adminServerPodName}:${adminServerPort} -username ${username} -password ${password}" +InputParameterList="${InputParameterList} -soaClusterName ${soaClusterName} -wlsMonitoringExporterTosoaCluster ${wlsMonitoringExporterTosoaCluster}" +InputParameterList="${InputParameterList} -oimClusterName ${oimClusterName} -wlsMonitoringExporterTooimCluster ${wlsMonitoringExporterTooimCluster}" + +echo "Deploying WebLogic Monitoring Exporter with domainNamespace[$domainNamespace], domainUID[$domainUID], adminServerPodName[$adminServerPodName]" +. $scriptDir/get-wls-exporter.sh +kubectl cp $scriptDir/wls-exporter-deploy ${domainNamespace}/${adminServerPodName}:/u01/oracle +kubectl cp $scriptDir/deploy-weblogic-monitoring-exporter.py ${domainNamespace}/${adminServerPodName}:/u01/oracle/wls-exporter-deploy +EXEC_DEPLOY="kubectl exec -it -n ${domainNamespace} ${adminServerPodName} -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/deploy-weblogic-monitoring-exporter.py ${InputParameterList}" +eval ${EXEC_DEPLOY} + diff --git a/OracleIdentityGovernance/kubernetes/monitoring-service/scripts/get-wls-exporter.sh b/OracleIdentityGovernance/kubernetes/monitoring-service/scripts/get-wls-exporter.sh new file mode 100755 index 000000000..d4db517f0 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/monitoring-service/scripts/get-wls-exporter.sh @@ -0,0 +1,46 @@ +#!/bin/bash +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# Initialize +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" +source ${scriptDir}/utils.sh +warDir=$scriptDir/../bin +mkdir -p $warDir +curl -L -o $warDir/wls-exporter.war https://github.com/oracle/weblogic-monitoring-exporter/releases/download/v2.0.0/wls-exporter.war +mkdir -p $scriptDir/wls-exporter-deploy +echo "created $scriptDir/wls-exporter-deploy dir" + +function update_wls_exporter_war { + servername=$1 + port=$2 + tmp_dir=$(mktemp -d -t ci-XXXXXXXXXX) + echo "created $tmp_dir" + mkdir -p $tmp_dir/WEB-INF + cp $scriptDir/../config/config.yml.template $tmp_dir/config.yml + cp $scriptDir/../config/weblogic.xml $tmp_dir/WEB-INF/weblogic.xml + cp $warDir/wls-exporter.war $tmp_dir/wls-exporter.war + + sed -i -e "s:%PORT%:${port}:g" $tmp_dir/config.yml + pushd $tmp_dir + echo "in temp dir" + zip wls-exporter.war WEB-INF/weblogic.xml + zip wls-exporter.war config.yml + + cp wls-exporter.war ${scriptDir}/wls-exporter-deploy/wls-exporter-${servername}.war + popd +} + +initialize + +update_wls_exporter_war adminserver ${adminServerPort} +if [[ ${wlsMonitoringExporterTosoaCluster} == "true" ]]; +then + update_wls_exporter_war soa ${soaManagedServerPort} +fi +if [[ ${wlsMonitoringExporterTooimCluster} == "true" ]]; +then + update_wls_exporter_war oim ${oimManagedServerPort} +fi + diff --git a/OracleIdentityGovernance/kubernetes/monitoring-service/scripts/undeploy-weblogic-monitoring-exporter.py b/OracleIdentityGovernance/kubernetes/monitoring-service/scripts/undeploy-weblogic-monitoring-exporter.py new file mode 100755 index 000000000..377545063 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/monitoring-service/scripts/undeploy-weblogic-monitoring-exporter.py @@ -0,0 +1,103 @@ +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +import sys +#======================================================= +# Function for undeployment +#======================================================= +def unDeploy(appName,target): + print 'Undeploying .........' + try: + stopApplication(appName) + undeploy(appName, target) + except Exception, ex: + print ex.toString() + +#======================================================== +# Main program here... +# Target you can change as per your need +#======================================================== +def usage(): + argsList = ' -domainName -adminServerName -adminURL -username -password ' + argsList=argsList + ' -soaClusterName ' + ' -wlsMonitoringExporterTosoaCluster ' + argsList=argsList + ' -oimClusterName ' + ' -wlsMonitoringExporterTooimCluster ' + print sys.argv[0] + argsList + sys.exit(0) + +if len(sys.argv) < 1: + usage() + +# domainName will be passed by command line parameter -domainName. +domainName = "oimcluster" + +# adminServerName will be passed by command line parameter -adminServerName +adminServerName = "AdminServer" + +# adminURL will be passed by command line parameter -adminURL +adminURL = "oimcluster-adminserver:7001" + +# soaClusterName will be passed by command line parameter -soaClusterName +soaClusterName = "soa_cluster" + +# wlsMonitoringExporterTosoaCluster will be passed by command line parameter -wlsMonitoringExporterTosoaCluster +wlsMonitoringExporterTosoaCluster = "true" +# oimClusterName will be passed by command line parameter -oimClusterName +oimClusterName = "oim_cluster" + +# wlsMonitoringExporterTooimCluster will be passed by command line parameter -wlsMonitoringExporterTooimCluster +wlsMonitoringExporterTooimCluster = "true" + +# username will be passed by command line parameter -username +username = "weblogic" + +# password will be passed by command line parameter -password +password = "Welcome1" + + +i=1 +while i < len(sys.argv): + if sys.argv[i] == '-domainName': + domainName = sys.argv[i+1] + i += 2 + elif sys.argv[i] == '-adminServerName': + adminServerName = sys.argv[i+1] + i += 2 + elif sys.argv[i] == '-adminURL': + adminURL = sys.argv[i+1] + i += 2 + elif sys.argv[i] == '-username': + username = sys.argv[i+1] + i += 2 + elif sys.argv[i] == '-password': + password = sys.argv[i+1] + i += 2 + elif sys.argv[i] == '-soaClusterName': + soaClusterName = sys.argv[i+1] + i += 2 + elif sys.argv[i] == '-wlsMonitoringExporterTosoaCluster': + wlsMonitoringExporterTosoaCluster = sys.argv[i+1] + i += 2 + elif sys.argv[i] == '-oimClusterName': + oimClusterName = sys.argv[i+1] + i += 2 + elif sys.argv[i] == '-wlsMonitoringExporterTooimCluster': + wlsMonitoringExporterTooimCluster = sys.argv[i+1] + i += 2 + + else: + print 'Unexpected argument switch at position ' + str(i) + ': ' + str(sys.argv[i]) + usage() + sys.exit(1) + +# Undeploy +connect(username, password, 't3://' + adminURL) +unDeploy('wls-exporter-adminserver',adminServerName) +if 'true' == wlsMonitoringExporterTosoaCluster: + unDeploy('wls-exporter-soa',soaClusterName) + +if 'true' == wlsMonitoringExporterTooimCluster: + unDeploy('wls-exporter-oim',oimClusterName) + +disconnect() +exit() + diff --git a/OracleIdentityGovernance/kubernetes/monitoring-service/scripts/undeploy-weblogic-monitoring-exporter.sh b/OracleIdentityGovernance/kubernetes/monitoring-service/scripts/undeploy-weblogic-monitoring-exporter.sh new file mode 100755 index 000000000..a11338a8c --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/monitoring-service/scripts/undeploy-weblogic-monitoring-exporter.sh @@ -0,0 +1,39 @@ +#!/bin/bash +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# Initialize +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" +source ${scriptDir}/utils.sh + +# Function to lowercase a value and make it a legal DNS1123 name +# $1 - value to convert to lowercase +function toDNS1123Legal { + local val=`echo $1 | tr "[:upper:]" "[:lower:]"` + val=${val//"_"/"-"} + echo "$val" +} + +initialize + +# username and password from Kubernetes secret +username=`kubectl get secrets ${weblogicCredentialsSecretName} -n ${domainNamespace} -o=jsonpath='{.data.username}'|base64 --decode` +password=`kubectl get secrets ${weblogicCredentialsSecretName} -n ${domainNamespace} -o=jsonpath='{.data.password}'|base64 --decode` + +adminServerPodName="${domainUID}-$(toDNS1123Legal ${adminServerName})" + +InputParameterList="-domainName ${domainUID} -adminServerName ${adminServerName} -adminURL ${adminServerPodName}:${adminServerPort} -username ${username} -password ${password}" +InputParameterList="${InputParameterList} -soaClusterName ${soaClusterName} -wlsMonitoringExporterTosoaCluster ${wlsMonitoringExporterTosoaCluster}" +InputParameterList="${InputParameterList} -oimClusterName ${oimClusterName} -wlsMonitoringExporterTooimCluster ${wlsMonitoringExporterTooimCluster}" + +# Copy weblogic monitoring exporter jars for deployment +echo "Undeploying WebLogic Monitoring Exporter: domainNamespace[$domainNamespace], domainUID[$domainUID], adminServerPodName[$adminServerPodName]" + +kubectl cp $scriptDir/undeploy-weblogic-monitoring-exporter.py ${domainNamespace}/${adminServerPodName}:/u01/oracle/undeploy-weblogic-monitoring-exporter.py +EXEC_UNDEPLOY="kubectl exec -it -n ${domainNamespace} ${adminServerPodName} -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/undeploy-weblogic-monitoring-exporter.py ${InputParameterList}" +eval ${EXEC_UNDEPLOY} + +# Cleanup the local wars +rm -rf ${scriptDir}/wls-exporter-deploy + diff --git a/OracleIdentityGovernance/kubernetes/monitoring-service/scripts/utils.sh b/OracleIdentityGovernance/kubernetes/monitoring-service/scripts/utils.sh new file mode 100755 index 000000000..d9c998e98 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/monitoring-service/scripts/utils.sh @@ -0,0 +1,61 @@ +#!/bin/bash +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# + +function initialize { + if [ -z ${domainNamespace} ]; then + echo "domainNamespace is empty, setting to default oimcluster" + domainNamespace="oimcluster" + fi + + if [ -z ${domainUID} ]; then + echo "domainUID is empty, setting to default oimcluster" + domainUID="oimcluster" + fi + + if [ -z ${weblogicCredentialsSecretName} ]; then + echo "weblogicCredentialsSecretName is empty, setting to default \"oimcluster-domain-credentials\"" + weblogicCredentialsSecretName="oimcluster-domain-credentials" + fi + + if [ -z ${adminServerName} ]; then + echo "adminServerName is empty, setting to default \"AdminServer\"" + adminServerName="AdminServer" + fi + + if [ -z ${adminServerPort} ]; then + echo "adminServerPort is empty, setting to default \"7001\"" + adminServerPort="7001" + fi + + if [ -z ${soaClusterName} ]; then + echo "soaClusterName is empty, setting to default \"soa_cluster\"" + soaClusterName="soa_cluster" + fi + + if [ -z ${soaManagedServerPort} ]; then + echo "soaManagedServerPort is empty, setting to default \"8001\"" + soaManagedServerPort="8001" + fi + + if [ -z ${wlsMonitoringExporterTosoaCluster} ]; then + echo "wlsMonitoringExporterTosoaCluster is empty, setting to default \"false\"" + wlsMonitoringExporterTosoaCluster="true" + fi + if [ -z ${oimClusterName} ]; then + echo "oimClusterName is empty, setting to default \"oim_cluster\"" + oimClusterName="oim_cluster" + fi + + if [ -z ${oimManagedServerPort} ]; then + echo "oimManagedServerPort is empty, setting to default \"14000\"" + oimManagedServerPort="14000" + fi + + if [ -z ${wlsMonitoringExporterTooimCluster} ]; then + echo "wlsMonitoringExporterTooimCluster is empty, setting to default \"false\"" + wlsMonitoringExporterTooimCluster="true" + fi +} + diff --git a/OracleIdentityGovernance/kubernetes/monitoring-service/setup-monitoring.sh b/OracleIdentityGovernance/kubernetes/monitoring-service/setup-monitoring.sh new file mode 100755 index 000000000..f6d6f5f0c --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/monitoring-service/setup-monitoring.sh @@ -0,0 +1,194 @@ +#!/bin/bash +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# setup-monitoring.sh + +# Initialize +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" +OLD_PWD=`pwd` + + + +# +# Function to exit and print an error message +# $1 - text of message +function fail { + printError $* + exit 1 +} + +# Function to print an error message +function printError { + echo [ERROR] $* +} + + +# +# Function to remove a file if it exists +# +function removeFileIfExists { + echo "input is $1" + if [ -f $1 ]; then + rm -f $1 + fi +} + +function exitIfError { + if [ "$1" != "0" ]; then + echo "$2" + exit $1 + fi +} + +# +# Function to parse a yaml file and generate the bash exports +# $1 - Input filename +# $2 - Output filename +function parseYaml { + local s='[[:space:]]*' w='[a-zA-Z0-9_]*' fs=$(echo @|tr @ '\034') + sed -ne "s|^\($s\):|\1|" \ + -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \ + -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $1 | + awk -F$fs '{ + if (length($3) > 0) { + # javaOptions may contain tokens that are not allowed in export command + # we need to handle it differently. + if ($2=="javaOptions") { + printf("%s=%s\n", $2, $3); + } else { + printf("export %s=\"%s\"\n", $2, $3); + } + } + }' > $2 +} + +function usage { + echo usage: ${script} -i file [-v] [-h] + echo " -i Parameter inputs file, must be specified." + echo " -h Help" + exit $1 +} + +function installKubePrometheusStack { + helm repo add prometheus-community https://prometheus-community.github.io/helm-charts + helm repo update + echo "Setup prometheus-community/kube-prometheus-stack in progress" + if [ ${exposeMonitoringNodePort} == "true" ]; then + + helm install ${monitoringNamespace} prometheus-community/kube-prometheus-stack \ + --namespace ${monitoringNamespace} \ + --set prometheus.service.type=NodePort --set prometheus.service.nodePort=${prometheusNodePort} \ + --set alertmanager.service.type=NodePort --set alertmanager.service.nodePort=${alertmanagerNodePort} \ + --set grafana.adminPassword=admin --set grafana.service.type=NodePort --set grafana.service.nodePort=${grafanaNodePort} \ + --version "16.5.0" ${additionalParamForKubePrometheusStack} \ + --atomic --wait + else + helm install ${monitoringNamespace} prometheus-community/kube-prometheus-stack \ + --namespace ${monitoringNamespace} \ + --set grafana.adminPassword=admin \ + --version "16.5.0" ${additionalParamForKubePrometheusStack} \ + --atomic --wait + fi + exitIfError $? "ERROR: prometheus-community/kube-prometheus-stack install failed." +} + +#Parse the inputs +while getopts "hi:" opt; do + case $opt in + i) valuesInputFile="${OPTARG}" + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +if [ -z ${valuesInputFile} ]; then + echo "${script}: -i must be specified." + missingRequiredOption="true" +fi + +if [ "${missingRequiredOption}" == "true" ]; then + usage 1 +fi + +if [ ! -f ${valuesInputFile} ]; then + echo "Unable to locate the input parameters file ${valuesInputFile}" + fail 'The error listed above must be resolved before the script can continue' +fi + + +exportValuesFile=$(mktemp /tmp/export-values-XXXXXXXXX.sh) +parseYaml ${valuesInputFile} ${exportValuesFile} + + +source ${exportValuesFile} +rm ${exportValuesFile} + + +if [ "${setupKubePrometheusStack}" = "true" ]; then + if test "$(kubectl get namespace ${monitoringNamespace} --ignore-not-found | wc -l)" = 0; then + echo "The namespace ${monitoringNamespace} for install prometheus-community/kube-prometheus-stack does not exist. Creating the namespace ${monitoringNamespace}" + kubectl create namespace ${monitoringNamespace} + fi + echo -e "Monitoring setup in ${monitoringNamespace} in progress\n" + + # Create the namespace and CRDs, and then wait for them to be availble before creating the remaining resources + kubectl label nodes --all kubernetes.io/os=linux --overwrite=true + + echo "Setup prometheus-community/kube-prometheus-stack started" + installKubePrometheusStack + cd $OLD_PWD + + echo "Setup prometheus-community/kube-prometheus-stack completed" +fi + +username=`kubectl get secrets ${weblogicCredentialsSecretName} -n ${domainNamespace} -o=jsonpath='{.data.username}'|base64 --decode` +password=`kubectl get secrets ${weblogicCredentialsSecretName} -n ${domainNamespace} -o=jsonpath='{.data.password}'|base64 --decode` + +# Setting up the WebLogic Monitoring Exporter +echo "Deploy WebLogic Monitoring Exporter started" +script=${scriptDir}/scripts/deploy-weblogic-monitoring-exporter.sh +sh ${script} +exitIfError $? "ERROR: $script failed." +echo "Deploy WebLogic Monitoring Exporter completed" + + +# Deploy servicemonitors +serviceMonitor=${scriptDir}/manifests/wls-exporter-ServiceMonitor.yaml +cp "${serviceMonitor}.template" "${serviceMonitor}" +sed -i -e "s/release: monitoring/release: ${monitoringNamespace}/g" ${serviceMonitor} +sed -i -e "s/user: %USERNAME%/user: `echo -n $username|base64 -w0`/g" ${serviceMonitor} +sed -i -e "s/password: %PASSWORD%/password: `echo -n $password|base64 -w0`/g" ${serviceMonitor} +sed -i -e "s/namespace:.*/namespace: ${domainNamespace}/g" ${serviceMonitor} +sed -i -e "s/weblogic.domainName:.*/weblogic.domainName: ${domainUID}/g" ${serviceMonitor} +sed -i -e "$!N;s/matchNames:\n -.*/matchNames:\n - ${domainNamespace}/g;P;D" ${serviceMonitor} + +kubectl apply -f ${serviceMonitor} + + +if [ "${setupKubePrometheusStack}" = "true" ]; then + # Deploying WebLogic Server Grafana Dashboard + echo "Deploying WebLogic Server Grafana Dashboard...." + grafanaEndpointIP=$(kubectl get endpoints ${monitoringNamespace}-grafana -n ${monitoringNamespace} -o=jsonpath="{.subsets[].addresses[].ip}") + grafanaEndpointPort=$(kubectl get endpoints ${monitoringNamespace}-grafana -n ${monitoringNamespace} -o=jsonpath="{.subsets[].ports[].port}") + grafanaEndpoint="${grafanaEndpointIP}:${grafanaEndpointPort}" + curl --noproxy "*" -X POST -H "Content-Type: application/json" -d @config/weblogic-server-dashboard.json http://admin:admin@${grafanaEndpoint}/api/dashboards/db + echo "" + echo "Deployed WebLogic Server Grafana Dashboard successfully" + echo "" + if [ ${exposeMonitoringNodePort} == "true" ]; then + echo "Grafana is available at NodePort: ${grafanaNodePort}" + echo "Prometheus is available at NodePort: ${prometheusNodePort}" + echo "Altermanager is available at NodePort: ${alertmanagerNodePort}" + echo "==============================================================" + fi +else + echo "Please import config/weblogic-server-dashboard.json manually into Grafana" +fi + +echo "" + diff --git a/OracleIdentityGovernance/kubernetes/rest/README.md b/OracleIdentityGovernance/kubernetes/rest/README.md new file mode 100755 index 000000000..f0e09b088 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/rest/README.md @@ -0,0 +1,38 @@ +# Sample to create certificates and keys for the operator + +When a user enables the operator's external REST API (by setting +`externalRestEnabled` to `true` when installing the operator Helm chart), the user needs +to provide the certificate and private key for api's SSL identity too (by creating a +`tls secret` before the installation of the operator helm chart). + +This sample script generates a self-signed certificate and private key that can be used +for the operator's external REST api when experimenting with the operator. They should +not be used in a production environment. + +The syntax of the script is: +```shell +$ kubernetes/samples/scripts/rest/generate-external-rest-identity.sh -n [-s ] +``` + +Where `` lists the subject alternative names to put into the generated self-signed +certificate for the external WebLogic Operator REST HTTPS interface, should match +the namespace where the operator will be installed, and optionally the secret name, which defaults +to `weblogic-operator-external-rest-identity`. Each must be prefaced +by `DNS:` (for a name) or `IP:` (for an address), for example: +``` +DNS:myhost,DNS:localhost,IP:127.0.0.1 +``` + +You should include the addresses of all masters and load balancers in this list. The certificate +cannot be conveniently changed after installation of the operator. + +The script creates the secret in the weblogic-operator namespace with the self-signed +certificate and private key + +Example usage: +```shell +$ generate-external-rest-identity.sh IP:127.0.0.1 -n weblogic-operator > my_values.yaml +$ echo "externalRestEnabled: true" >> my_values.yaml + ... +$ helm install my_operator kubernetes/charts/weblogic-operator --namespace my_operator-ns --values my_values.yaml --wait +``` diff --git a/OracleIdentityGovernance/kubernetes/rest/generate-external-rest-identity.sh b/OracleIdentityGovernance/kubernetes/rest/generate-external-rest-identity.sh new file mode 100755 index 000000000..e645d3925 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/rest/generate-external-rest-identity.sh @@ -0,0 +1,200 @@ +#!/usr/bin/env bash +# Copyright (c) 2017, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# When the customer enables the operator's external REST api (by setting +# externalRestEnabled to true when installing the operator helm chart), the customer needs +# to provide the certificate and private key for api's SSL identity too (by creating a +# tls secret before the installation of the operator helm chart). +# +# This sample script generates a self-signed certificate and private key that can be used +# for the operator's external REST api when experimenting with the operator. They should +# not be used in a production environment. +# +# The sytax of the script is: +# +# kubernetes/samples/scripts/rest/generate-external-rest-identity.sh -a -n +# +# Where lists the subject alternative names to put into the generated self-signed +# certificate for the external WebLogic Operator REST https interface, for example: +# +# DNS:myhost,DNS:localhost,IP:127.0.0.1 +# +# You should include the addresses of all masters and load balancers in this list. The certificate +# cannot be conveniently changed after installation of the operator. +# +# The script creates the secret in the weblogic-operator namespace with the self-signed +# certificate and private key +# +# Example usage: +# generate-external-rest-identity.sh -a IP:127.0.0.1 -n weblogic-operator > my_values.yaml +# echo "externalRestEnabled: true" >> my_values.yaml +# ... +# helm install my_operator kubernetes/charts/weblogic-operator --namespace my_operator-ns --values my_values.yaml --wait +usage(){ +cat < -n +Options: +-a SANS Required, the SANs for the certificate +-n NAMESPACE Required, the namespace where the secret will be created. +-s SECRET_NAME Optional, the name of the kubernetes secret. Default is: weblogic-operator-external-rest-identity. +-h, --help Display this help text. +EOF +exit 1 +} + +if [ ! -x "$(command -v keytool)" ]; then + echo "Can't find keytool. Please add it to the path." + exit 1 +fi + +if [ ! -x "$(command -v openssl)" ]; then + echo "Can't find openssl. Please add it to the path." + exit 1 +fi + +if [ ! -x "$(command -v base64)" ]; then + echo "Can't find base64. Please add it to the path." + exit 1 +fi + +TEMP_DIR=`mktemp -d` +if [ $? -ne 0 ]; then + echo "$0: Can't create temp directory." + exit 1 +fi + +if [ -z $TEMP_DIR ]; then + echo "Can't create temp directory." + exit 1 +fi + +function cleanup { + rm -r $TEMP_DIR + if [[ $SUCCEEDED != "true" ]]; then + exit 1 + fi +} + +set -e +#set -x + +trap "cleanup" EXIT + +SECRET_NAME="weblogic-operator-external-rest-identity" + +while [ $# -gt 0 ] + do + key="$1" + case $key in + -a) + shift # past argument + if [ $# -eq 0 ] || [ ${1:0:1} == "-" ]; then echo "SANs is required and is missing"; usage; fi + SANS=$1 + shift # past value + ;; + -n) + shift # past argument + if [ $# -eq 0 ] || [ ${1:0:1} == "-" ]; then echo "Namespace is required and is missing"; usage; fi + NAMESPACE=$1 + shift # past value + ;; + -s) + shift # past argument + if [ $# -eq 0 ] || [ ${1:0:1} == "-" ]; then echo "Invalid secret name $1"; usage; fi + SECRET_NAME=$1 + shift # past value + ;; + -h) + shift # past argument + ;; + *) + SANS=$1 + shift # past argument + ;; + esac +done + +if [ -z "$SANS" ] +then + 1>&2 + echo "SANs is required and is missing" + usage +fi + +if [ -z "$NAMESPACE" ] +then + 1>&2 + echo "Namespace is required and is missing" + usage +fi + +DAYS_VALID="3650" +TEMP_PW="temp_password" +OP_PREFIX="weblogic-operator" +OP_ALIAS="${OP_PREFIX}-alias" +OP_JKS="${TEMP_DIR}/${OP_PREFIX}.jks" +OP_PKCS12="${TEMP_DIR}/${OP_PREFIX}.p12" +OP_CSR="${TEMP_DIR}/${OP_PREFIX}.csr" +OP_CERT_PEM="${TEMP_DIR}/${OP_PREFIX}.cert.pem" +OP_KEY_PEM="${TEMP_DIR}/${OP_PREFIX}.key.pem" + +# generate a keypair for the operator's REST service, putting it in a keystore +keytool \ + -genkey \ + -keystore ${OP_JKS} \ + -alias ${OP_ALIAS} \ + -storepass ${TEMP_PW} \ + -keypass ${TEMP_PW} \ + -keysize 2048 \ + -keyalg RSA \ + -validity ${DAYS_VALID} \ + -dname "CN=weblogic-operator" \ + -ext KU=digitalSignature,nonRepudiation,keyEncipherment,dataEncipherment,keyAgreement \ + -ext SAN="${SANS}" \ +2> /dev/null + +# extract the cert to a pem file +keytool \ + -exportcert \ + -keystore ${OP_JKS} \ + -storepass ${TEMP_PW} \ + -alias ${OP_ALIAS} \ + -rfc \ +> ${OP_CERT_PEM} 2> /dev/null + +# convert the keystore to a pkcs12 file +keytool \ + -importkeystore \ + -srckeystore ${OP_JKS} \ + -srcstorepass ${TEMP_PW} \ + -destkeystore ${OP_PKCS12} \ + -srcstorepass ${TEMP_PW} \ + -deststorepass ${TEMP_PW} \ + -deststoretype PKCS12 \ +2> /dev/null + +# extract the private key from the pkcs12 file to a pem file +openssl \ + pkcs12 \ + -in ${OP_PKCS12} \ + -passin pass:${TEMP_PW} \ + -nodes \ + -nocerts \ + -out ${OP_KEY_PEM} \ +2> /dev/null + +set +e +# Check if namespace exist +kubectl get namespace $NAMESPACE >/dev/null 2>/dev/null +if [ $? -eq 1 ]; then + echo "Namespace $NAMESPACE does not exist" + exit 1 +fi +kubectl get secret $SECRET_NAME -n $NAMESPACE >/dev/null 2>/dev/null +if [ $? -eq 1 ]; then + kubectl create secret tls "$SECRET_NAME" --cert=${OP_CERT_PEM} --key=${OP_KEY_PEM} -n $NAMESPACE >/dev/null +fi +echo "externalRestIdentitySecret: $SECRET_NAME" + +SUCCEEDED=true diff --git a/OracleIdentityGovernance/kubernetes/scaling/scalingAction.sh b/OracleIdentityGovernance/kubernetes/scaling/scalingAction.sh new file mode 100755 index 000000000..0da098e68 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/scaling/scalingAction.sh @@ -0,0 +1,504 @@ +#!/bin/bash +# Copyright (c) 2017, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +# script parameters +scaling_action="" +wls_domain_uid="" +wls_cluster_name="" +wls_domain_namespace="default" +operator_service_name="internal-weblogic-operator-svc" +operator_namespace="weblogic-operator" +operator_service_account="weblogic-operator" +scaling_size=1 +access_token="" +no_op="" +kubernetes_master="https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT}" +log_file_name="scalingAction.log" + +# timestamp +# purpose: echo timestamp in the form yyyy-mm-ddThh:mm:ss.nnnnnnZ +# example: 2018-10-01T14:00:00.000001Z +function timestamp() { + local timestamp="`date --utc '+%Y-%m-%dT%H:%M:%S.%NZ' 2>&1`" + if [ ! "${timestamp/illegal/xyz}" = "${timestamp}" ]; then + # old shell versions don't support %N or --utc + timestamp="`date -u '+%Y-%m-%dT%H:%M:%S.000000Z' 2>&1`" + fi + echo "${timestamp}" +} + +function trace() { + echo "@[$(timestamp)][$wls_domain_namespace][$wls_domain_uid][$wls_cluster_name][INFO]" "$@" >> ${log_file_name} +} + +function print_usage() { + echo "Usage: scalingAction.sh --action=[scaleUp | scaleDown] --domain_uid= --cluster_name= [--kubernetes_master=https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT}] [--access_token=] [--wls_domain_namespace=default] [--operator_namespace=weblogic-operator] [--operator_service_name=weblogic-operator] [--scaling_size=1] [--no_op]" + echo " where" + echo " action - scaleUp or scaleDown" + echo " domain_uid - WebLogic Domain Unique Identifier" + echo " cluster_name - WebLogic Cluster Name" + echo " kubernetes_master - Kubernetes master URL, default=https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT}" + echo " access_token - Service Account Bearer token for authentication and authorization for access to REST Resources" + echo " wls_domain_namespace - Kubernetes name space WebLogic Domain is defined in, default=default" + echo " operator_service_name - WebLogic Operator Service name, default=internal-weblogic-operator-svc" + echo " operator_service_account - Kubernetes Service Account for WebLogic Operator, default=weblogic-operator" + echo " operator_namespace - WebLogic Operator Namespace, default=weblogic-operator" + echo " scaling_size - number of WebLogic server instances by which to scale up or down, default=1" + echo " no_op - if specified, returns without doing anything. For use by unit test to include methods in the script" + exit 1 +} + +# Retrieve WebLogic Operator Service Account Token for Authorization +function initialize_access_token() { + if [ -z "$access_token" ] + then + access_token=`cat /var/run/secrets/kubernetes.io/serviceaccount/token` + fi +} + +function logScalingParameters() { + trace "scaling_action: $scaling_action" + trace "wls_domain_uid: $wls_domain_uid" + trace "wls_cluster_name: $wls_cluster_name" + trace "wls_domain_namespace: $wls_domain_namespace" + trace "operator_service_name: $operator_service_name" + trace "operator_service_account: $operator_service_account" + trace "operator_namespace: $operator_namespace" + trace "scaling_size: $scaling_size" +} + +function jq_available() { + if [ -x "$(command -v jq)" ] && [ -z "$DONT_USE_JQ" ]; then + return; + fi + false +} + +# Query WebLogic Operator Service Port +function get_operator_internal_rest_port() { + local STATUS=$(curl \ + -v \ + --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt \ + -H "Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" \ + -X GET $kubernetes_master/api/v1/namespaces/$operator_namespace/services/$operator_service_name/status) + if [ $? -ne 0 ] + then + trace "Failed to retrieve status of $operator_service_name in name space: $operator_namespace" + trace "STATUS: $STATUS" + exit 1 + fi + + local port + if jq_available; then + local extractPortCmd="(.spec.ports[] | select (.name == \"rest\") | .port)" + port=$(echo "${STATUS}" | jq "${extractPortCmd}" 2>> ${log_file_name}) + else +cat > cmds-$$.py << INPUT +import sys, json +for i in json.load(sys.stdin)["spec"]["ports"]: + if i["name"] == "rest": + print(i["port"]) +INPUT +port=$(echo "${STATUS}" | python cmds-$$.py 2>> ${log_file_name}) + fi + echo "$port" +} + +# Retrieve the api version of the deployed Custom Resource Domain +function get_domain_api_version() { + # Retrieve Custom Resource Definition for WebLogic domain + local APIS=$(curl \ + -v \ + --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt \ + -H "Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" \ + -X GET \ + $kubernetes_master/apis) + if [ $? -ne 0 ] + then + trace "Failed to retrieve list of APIs from Kubernetes cluster" + trace "APIS: $APIS" + exit 1 + fi + +# Find domain version + local domain_api_version + if jq_available; then + local extractVersionCmd="(.groups[] | select (.name == \"weblogic.oracle\") | .preferredVersion.version)" + domain_api_version=$(echo "${APIS}" | jq -r "${extractVersionCmd}" 2>> ${log_file_name}) + else +cat > cmds-$$.py << INPUT +import sys, json +for i in json.load(sys.stdin)["groups"]: + if i["name"] == "weblogic.oracle": + print(i["preferredVersion"]["version"]) +INPUT +domain_api_version=`echo ${APIS} | python cmds-$$.py 2>> ${log_file_name}` + fi + echo "$domain_api_version" +} + +# Retrieve Custom Resource Domain +function get_custom_resource_domain() { + local DOMAIN=$(curl \ + -v \ + --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt \ + -H "Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" \ + $kubernetes_master/apis/weblogic.oracle/$domain_api_version/namespaces/$wls_domain_namespace/domains/$wls_domain_uid) + if [ $? -ne 0 ]; then + trace "Failed to retrieve WebLogic Domain Custom Resource Definition" + exit 1 + fi + echo "$DOMAIN" +} + +# Verify if cluster is defined in clusters of the Custom Resource Domain +# args: +# $1 Custom Resource Domain +function is_defined_in_clusters() { + local DOMAIN="$1" + local in_cluster_startup="False" + + if jq_available; then + local inClusterStartupCmd="(.spec.clusters[] | select (.clusterName == \"${wls_cluster_name}\"))" + local clusterDefinedInCRD=$(echo "${DOMAIN}" | jq "${inClusterStartupCmd}" 2>> ${log_file_name}) + if [ "${clusterDefinedInCRD}" != "" ]; then + in_cluster_startup="True" + fi + else +cat > cmds-$$.py << INPUT +import sys, json +outer_loop_must_break = False +for j in json.load(sys.stdin)["spec"]["clusters"]: + if j["clusterName"] == "$wls_cluster_name": + outer_loop_must_break = True + print (True) + break +if outer_loop_must_break == False: + print (False) +INPUT +in_cluster_startup=`echo ${DOMAIN} | python cmds-$$.py 2>> ${log_file_name}` + fi + echo "$in_cluster_startup" +} + +# Gets the current replica count of the cluster +# args: +# $1 Custom Resource Domain +function get_num_ms_in_cluster() { + local DOMAIN="$1" + local num_ms + if jq_available; then + local numManagedServersCmd="(.spec.clusters[] | select (.clusterName == \"${wls_cluster_name}\") | .replicas)" + num_ms=$(echo "${DOMAIN}" | jq "${numManagedServersCmd}" 2>> ${log_file_name}) + else +cat > cmds-$$.py << INPUT +import sys, json +for j in json.load(sys.stdin)["spec"]["clusters"]: + if j["clusterName"] == "$wls_cluster_name": + print (j["replicas"]) +INPUT + num_ms=`echo ${DOMAIN} | python cmds-$$.py 2>> ${log_file_name}` + fi + + if [ "${num_ms}" == "null" ] || [ "${num_ms}" == '' ] ; then + num_ms=0 + fi + + echo "$num_ms" +} + +# Gets the replica count at the Domain level +# args: +# $1 Custom Resource Domain +function get_num_ms_domain_scope() { + local DOMAIN="$1" + local num_ms + if jq_available; then + num_ms=$(echo "${DOMAIN}" | jq -r '.spec.replicas' 2>> ${log_file_name}) + else +cat > cmds-$$.py << INPUT +import sys, json +print (json.load(sys.stdin)["spec"]["replicas"]) +INPUT + num_ms=`echo ${DOMAIN} | python cmds-$$.py 2>> ${log_file_name}` + fi + + if [ "${num_ms}" == "null" ] || [ "${num_ms}" == '' ] ; then + # if not defined then default to 0 + num_ms=0 + fi + + echo "$num_ms" +} + +# +# Function to get minimum replica count for cluster +# $1 - Domain resource in json format +# $2 - Name of the cluster +# $3 - Return value containing minimum replica count +# +function get_min_replicas { + local domainJson=$1 + local clusterName=$2 + local __result=$3 + + eval $__result=0 + if jq_available; then + minReplicaCmd="(.status.clusters[] | select (.clusterName == \"${clusterName}\")) \ + | .minimumReplicas" + minReplicas=$(echo ${domainJson} | jq "${minReplicaCmd}" 2>> ${log_file_name}) + else +cat > cmds-$$.py << INPUT +import sys, json +for j in json.load(sys.stdin)["status"]["clusters"]: + if j["clusterName"] == "$clusterName": + print (j["minimumReplicas"]) +INPUT + minReplicas=`echo ${DOMAIN} | python cmds-$$.py 2>> ${log_file_name}` + fi + eval $__result=${minReplicas} +} + +# Get the current replica count for the WLS cluster if defined in the CRD's Cluster +# configuration. If WLS cluster is not defined in the CRD then return the Domain +# scoped replica value, if present. Returns replica count = 0 if no replica count found. +# args: +# $1 "True" if WLS cluster configuration defined in CRD, "False" otherwise +# $2 Custom Resource Domain +function get_replica_count() { + local in_cluster_startup="$1" + local DOMAIN="$2" + local num_ms + if [ "$in_cluster_startup" == "True" ] + then + trace "$wls_cluster_name defined in clusters" + num_ms=$(get_num_ms_in_cluster "$DOMAIN") + else + trace "$wls_cluster_name NOT defined in clusters" + num_ms=$(get_num_ms_domain_scope "$DOMAIN") + fi + + get_min_replicas "${DOMAIN}" "${wls_cluster_name}" minReplicas + if [[ "${num_ms}" -lt "${minReplicas}" ]]; then + # Reset managed server count to minimum replicas + num_ms=${minReplicas} + fi + + echo "$num_ms" +} + +# Determine the nuber of managed servers to scale +# args: +# $1 scaling action (scaleUp or scaleDown) +# $2 current replica count +# $3 scaling increment value +function calculate_new_ms_count() { + local scaling_action="$1" + local current_replica_count="$2" + local scaling_size="$3" + local new_ms + if [ "$scaling_action" == "scaleUp" ]; + then + # Scale up by specified scaling size + # shellcheck disable=SC2004 + new_ms=$(($current_replica_count + $scaling_size)) + else + # Scale down by specified scaling size + new_ms=$(($current_replica_count - $scaling_size)) + fi + echo "$new_ms" +} + +# Verify if requested managed server scaling count is less than the configured +# minimum replica count for the cluster. +# args: +# $1 Managed server count +# $2 Custom Resource Domain +# $3 Cluster name +function verify_minimum_ms_count_for_cluster() { + local new_ms="$1" + local domainJson="$2" + local clusterName="$3" + # check if replica count is less than minimum replicas + get_min_replicas "${domainJson}" "${clusterName}" minReplicas + if [ "${new_ms}" -lt "${minReplicas}" ]; then + trace "Scaling request to new managed server count $new_ms is less than configured minimum \ + replica count $minReplicas" + exit 1 + fi +} + +# Create the REST endpoint CA certificate in PEM format +# args: +# $1 certificate file name to create +function create_ssl_certificate_file() { + local pem_filename="$1" + if [ ${INTERNAL_OPERATOR_CERT} ]; + then + echo ${INTERNAL_OPERATOR_CERT} | base64 --decode > $pem_filename + else + trace "Operator Cert File not found" + exit 1 + fi +} + +# Create request body for scaling request +# args: +# $1 replica count +function get_request_body() { +local new_ms="$1" +local request_body=$(cat <=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +{{- if eq .Values.type "NGINX" }} +{{- if or (eq .Values.sslType "NONSSL") (eq .Values.sslType "SSL") }} +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ .Values.wlsDomain.domainUID }}-nginx + namespace: {{ .Release.Namespace }} + annotations: + kubernetes.io/ingress.class: 'nginx' + nginx.ingress.kubernetes.io/affinity: 'cookie' + nginx.ingress.kubernetes.io/session-cookie-name: 'sticky' + nginx.ingress.kubernetes.io/affinity-mode: 'persistent' +{{- if eq .Values.sslType "SSL" }} + nginx.ingress.kubernetes.io/configuration-snippet: | + more_clear_input_headers "WL-Proxy-Client-IP" "WL-Proxy-SSL"; + more_set_input_headers "X-Forwarded-Proto: https"; + more_set_input_headers "WL-Proxy-SSL: true"; + nginx.ingress.kubernetes.io/ingress.allow-http: 'false' +{{- end }} +spec: + rules: + - host: '{{ .Values.nginx.hostname }}' + http: + paths: + - path: /console + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /em + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /weblogic/ready + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + {{- if or (eq .Values.domainType "soa") }} + - path: / + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.soaManagedServerPort }} + {{- end }} + {{- if or (eq .Values.domainType "osb") }} + - path: / + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.osbClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.osbManagedServerPort }} + {{- end }} + {{- if or (eq .Values.domainType "soa") (eq .Values.domainType "soaosb") (eq .Values.domainType "soab2b") (eq .Values.domainType "soaosbb2b") }} + - path: /soa-infra + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.soaManagedServerPort }} + {{- end }} + {{- if or (eq .Values.domainType "soa") (eq .Values.domainType "soaosb") (eq .Values.domainType "soab2b") (eq .Values.domainType "soaosbb2b") }} + - path: /soa/composer + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.soaManagedServerPort }} + {{- end }} + {{- if or (eq .Values.domainType "soa") (eq .Values.domainType "soaosb") (eq .Values.domainType "soab2b") (eq .Values.domainType "soaosbb2b") }} + - path: /integration/worklistapp + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.soaManagedServerPort }} + {{- end }} + {{- if or (eq .Values.domainType "osb") (eq .Values.domainType "soaosb") (eq .Values.domainType "soaosbb2b") }} + - path: /servicebus + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + {{- end }} + {{- if or (eq .Values.domainType "osb") (eq .Values.domainType "soaosb") (eq .Values.domainType "soaosbb2b") }} + - path: /lwpfconsole + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + {{- end }} + {{- if or (eq .Values.domainType "osb") (eq .Values.domainType "soaosb") (eq .Values.domainType "soaosbb2b") }} + - path: /xbusrouting + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + {{- end }} + {{- if or (eq .Values.domainType "osb") (eq .Values.domainType "soaosb") (eq .Values.domainType "soaosbb2b") }} + - path: /xbustransform + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + {{- end }} + {{- if or (eq .Values.domainType "soa") (eq .Values.domainType "soaosb") (eq .Values.domainType "soab2b") (eq .Values.domainType "soaosbb2b") }} + - path: /ess + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.soaManagedServerPort }} + {{- end }} + {{- if or (eq .Values.domainType "soa") (eq .Values.domainType "soaosb") (eq .Values.domainType "soab2b") (eq .Values.domainType "soaosbb2b") }} + - path: /EssHealthCheck + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.soaManagedServerPort }} + {{- end }} + {{- if or (eq .Values.domainType "soab2b") (eq .Values.domainType "soaosbb2b") }} + - path: /b2bconsole + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.soaManagedServerPort }} + {{- end }} +{{- if eq .Values.sslType "SSL" }} + tls: + - hosts: + - '{{ .Values.nginx.hostname }}' + secretName: domain1-tls-cert +{{- end }} +{{- else }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower }}-nginx-ssl + namespace: {{ .Release.Namespace }} +spec: + ports: + - port: {{ .Values.wlsDomain.adminServerSSLPort }} + protocol: TCP + targetPort: {{ .Values.wlsDomain.adminServerSSLPort }} + selector: + weblogic.domainUID: {{ .Values.wlsDomain.domainUID }} + weblogic.serverName: {{ .Values.wlsDomain.adminServerName }} + type: ClusterIP + +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ .Values.wlsDomain.domainUID }}-nginx-e2essl-admin + namespace: {{ .Release.Namespace }} + annotations: + kubernetes.io/ingress.class: 'nginx' + nginx.ingress.kubernetes.io/affinity: 'cookie' + nginx.ingress.kubernetes.io/session-cookie-name: 'sticky' + nginx.ingress.kubernetes.io/ssl-passthrough: 'true' +spec: + tls: + - hosts: + - '{{ .Values.hostName.admin }}' + secretName: domain1-tls-cert + rules: + - host: '{{ .Values.hostName.admin }}' + http: + paths: + - path: + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}-nginx-ssl' + port: + number: {{ .Values.wlsDomain.adminServerSSLPort }} +{{- if or (eq .Values.domainType "soa") (eq .Values.domainType "soaosb") (eq .Values.domainType "soab2b") (eq .Values.domainType "soaosbb2b") }} +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ .Values.wlsDomain.domainUID }}-nginx-e2essl-soa + namespace: {{ .Release.Namespace }} + annotations: + kubernetes.io/ingress.class: 'nginx' + nginx.ingress.kubernetes.io/affinity: 'cookie' + nginx.ingress.kubernetes.io/session-cookie-name: 'sticky' + nginx.ingress.kubernetes.io/ssl-passthrough: 'true' +spec: + tls: + - hosts: + - '{{ .Values.hostName.soa }}' + secretName: domain1-tls-cert + rules: + - host: '{{ .Values.hostName.soa }}' + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.soaManagedServerSSLPort }} +{{- end }} +{{- if or (eq .Values.domainType "osb") (eq .Values.domainType "soaosb") (eq .Values.domainType "soaosbb2b") }} +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ .Values.wlsDomain.domainUID }}-nginx-e2essl-osb + namespace: {{ .Release.Namespace }} + annotations: + kubernetes.io/ingress.class: 'nginx' + nginx.ingress.kubernetes.io/affinity: 'cookie' + nginx.ingress.kubernetes.io/session-cookie-name: 'sticky' + nginx.ingress.kubernetes.io/ssl-passthrough: 'true' +spec: + tls: + - hosts: + - '{{ .Values.hostName.osb }}' + secretName: domain1-tls-cert + rules: + - host: '{{ .Values.hostName.osb }}' + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.osbClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.osbManagedServerSSLPort }} +{{- end }} + +{{- end }} +{{- end }} +{{- end }} + diff --git a/OracleSOASuite/kubernetes/charts/ingress-per-domain/templates/nginx-ingress.yaml b/OracleSOASuite/kubernetes/charts/ingress-per-domain/templates/nginx-ingress.yaml index 86b9ce74a..bb74202c2 100755 --- a/OracleSOASuite/kubernetes/charts/ingress-per-domain/templates/nginx-ingress.yaml +++ b/OracleSOASuite/kubernetes/charts/ingress-per-domain/templates/nginx-ingress.yaml @@ -1,6 +1,7 @@ # Copyright (c) 2020, 2021, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. # +{{- if semverCompare "<1.19-0" .Capabilities.KubeVersion.GitVersion -}} {{- if eq .Values.type "NGINX" }} {{- if or (eq .Values.sslType "NONSSL") (eq .Values.sslType "SSL") }} --- @@ -13,12 +14,12 @@ metadata: kubernetes.io/ingress.class: 'nginx' nginx.ingress.kubernetes.io/affinity: 'cookie' nginx.ingress.kubernetes.io/session-cookie-name: 'sticky' - nginx.ingress.kubernetes.io/affinity-mode: persistent -{{- if eq .Values.sslType "SSL" }} + nginx.ingress.kubernetes.io/affinity-mode: 'persistent' +{{- if eq .Values.sslType "SSL" }} nginx.ingress.kubernetes.io/configuration-snippet: | - more_clear_input_headers "WL-Proxy-Client-IP" "WL-Proxy-SSL"; - more_set_input_headers "X-Forwarded-Proto: https"; - more_set_input_headers "WL-Proxy-SSL: true"; + more_clear_input_headers "WL-Proxy-Client-IP" "WL-Proxy-SSL"; + more_set_input_headers "X-Forwarded-Proto: https"; + more_set_input_headers "WL-Proxy-SSL: true"; nginx.ingress.kubernetes.io/ingress.allow-http: 'false' {{- end }} spec: @@ -140,10 +141,10 @@ metadata: name: {{ .Values.wlsDomain.domainUID }}-nginx-e2essl-admin namespace: {{ .Release.Namespace }} annotations: - kubernetes.io/ingress.class: nginx + kubernetes.io/ingress.class: 'nginx' nginx.ingress.kubernetes.io/affinity: 'cookie' nginx.ingress.kubernetes.io/session-cookie-name: 'sticky' - nginx.ingress.kubernetes.io/ssl-passthrough: "true" + nginx.ingress.kubernetes.io/ssl-passthrough: 'true' spec: tls: - hosts: @@ -153,11 +154,10 @@ spec: - host: '{{ .Values.hostName.admin }}' http: paths: - - path: + - path: backend: serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}-nginx-ssl' servicePort: {{ .Values.wlsDomain.adminServerSSLPort }} - {{- if or (eq .Values.domainType "soa") (eq .Values.domainType "soaosb") (eq .Values.domainType "soab2b") (eq .Values.domainType "soaosbb2b") }} --- apiVersion: extensions/v1beta1 @@ -166,10 +166,10 @@ metadata: name: {{ .Values.wlsDomain.domainUID }}-nginx-e2essl-soa namespace: {{ .Release.Namespace }} annotations: - kubernetes.io/ingress.class: nginx + kubernetes.io/ingress.class: 'nginx' nginx.ingress.kubernetes.io/affinity: 'cookie' nginx.ingress.kubernetes.io/session-cookie-name: 'sticky' - nginx.ingress.kubernetes.io/ssl-passthrough: "true" + nginx.ingress.kubernetes.io/ssl-passthrough: 'true' spec: tls: - hosts: @@ -183,7 +183,7 @@ spec: backend: serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' servicePort: {{ .Values.wlsDomain.soaManagedServerSSLPort }} -{{- end}} +{{- end }} {{- if or (eq .Values.domainType "osb") (eq .Values.domainType "soaosb") (eq .Values.domainType "soaosbb2b") }} --- apiVersion: extensions/v1beta1 @@ -192,10 +192,10 @@ metadata: name: {{ .Values.wlsDomain.domainUID }}-nginx-e2essl-osb namespace: {{ .Release.Namespace }} annotations: - kubernetes.io/ingress.class: nginx + kubernetes.io/ingress.class: 'nginx' nginx.ingress.kubernetes.io/affinity: 'cookie' nginx.ingress.kubernetes.io/session-cookie-name: 'sticky' - nginx.ingress.kubernetes.io/ssl-passthrough: "true" + nginx.ingress.kubernetes.io/ssl-passthrough: 'true' spec: tls: - hosts: @@ -204,14 +204,14 @@ spec: rules: - host: '{{ .Values.hostName.osb }}' http: - paths: + paths: - path: / backend: serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.osbClusterName | lower | replace "_" "-" }}' servicePort: {{ .Values.wlsDomain.osbManagedServerSSLPort }} - {{- end }} +{{- end }} {{- end }} - +{{- end }} {{- end }} diff --git a/OracleSOASuite/kubernetes/charts/ingress-per-domain/templates/traefik-ingress-k8s1.19.yaml b/OracleSOASuite/kubernetes/charts/ingress-per-domain/templates/traefik-ingress-k8s1.19.yaml new file mode 100755 index 000000000..d1ad97033 --- /dev/null +++ b/OracleSOASuite/kubernetes/charts/ingress-per-domain/templates/traefik-ingress-k8s1.19.yaml @@ -0,0 +1,219 @@ +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +{{- if eq .Values.type "TRAEFIK" }} +{{- if or (eq .Values.sslType "NONSSL") (eq .Values.sslType "SSL") }} +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ .Values.wlsDomain.domainUID }}-traefik + namespace: {{ .Release.Namespace }} + labels: + weblogic.resourceVersion: domain-v2 + annotations: + kubernetes.io/ingress.class: 'traefik' +{{- if eq .Values.sslType "SSL" }} + traefik.ingress.kubernetes.io/router.entrypoints: 'websecure' + traefik.ingress.kubernetes.io/router.tls: 'true' + traefik.ingress.kubernetes.io/router.middlewares: 'soans-wls-proxy-ssl@kubernetescrd' +{{- end }} +spec: + rules: + - host: '{{ .Values.traefik.hostname }}' + http: + paths: + - path: /console + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /em + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /weblogic/ready + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + {{- if or (eq .Values.domainType "soa") }} + - path: / + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.soaManagedServerPort }} + {{- end }} + {{- if or (eq .Values.domainType "osb") }} + - path: / + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.osbClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.osbManagedServerPort }} + {{- end }} + {{- if or (eq .Values.domainType "soa") (eq .Values.domainType "soaosb") (eq .Values.domainType "soab2b") (eq .Values.domainType "soaosbb2b") }} + - path: /soa-infra + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.soaManagedServerPort }} + {{- end }} + {{- if or (eq .Values.domainType "soa") (eq .Values.domainType "soaosb") (eq .Values.domainType "soab2b") (eq .Values.domainType "soaosbb2b") }} + - path: /soa/composer + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.soaManagedServerPort }} + {{- end }} + {{- if or (eq .Values.domainType "soa") (eq .Values.domainType "soaosb") (eq .Values.domainType "soab2b") (eq .Values.domainType "soaosbb2b") }} + - path: /integration/worklistapp + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.soaManagedServerPort }} + {{- end }} + {{- if or (eq .Values.domainType "osb") (eq .Values.domainType "soaosb") (eq .Values.domainType "soaosbb2b") }} + - path: /servicebus + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + {{- end }} + {{- if or (eq .Values.domainType "osb") (eq .Values.domainType "soaosb") (eq .Values.domainType "soaosbb2b") }} + - path: /lwpfconsole + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + {{- end }} + {{- if or (eq .Values.domainType "osb") (eq .Values.domainType "soaosb") (eq .Values.domainType "soaosbb2b") }} + - path: /xbusrouting + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + {{- end }} + {{- if or (eq .Values.domainType "osb") (eq .Values.domainType "soaosb") (eq .Values.domainType "soaosbb2b") }} + - path: /xbustransform + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + {{- end }} + {{- if or (eq .Values.domainType "soa") (eq .Values.domainType "soaosb") (eq .Values.domainType "soab2b") (eq .Values.domainType "soaosbb2b") }} + - path: /ess + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.soaManagedServerPort }} + {{- end }} + {{- if or (eq .Values.domainType "soa") (eq .Values.domainType "soaosb") (eq .Values.domainType "soab2b") (eq .Values.domainType "soaosbb2b") }} + - path: /EssHealthCheck + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.soaManagedServerPort }} + {{- end }} + {{- if or (eq .Values.domainType "soab2b") (eq .Values.domainType "soaosbb2b") }} + - path: /b2bconsole + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.soaManagedServerPort }} + {{- end }} +{{- if eq .Values.sslType "SSL" }} + tls: + - hosts: + - '{{ .Values.traefik.hostname }}' + secretName: soainfra-tls-cert +{{- end }} +--- +#Create Traefik Middleware custom resource for SSL Termination +{{- if eq .Values.sslType "SSL" }} +apiVersion: traefik.containo.us/v1alpha1 +kind: Middleware +metadata: + name: wls-proxy-ssl + namespace: {{ .Release.Namespace }} +spec: + headers: + customRequestHeaders: + X-Custom-Request-Header: "" + X-Forwarded-For: "" + WL-Proxy-Client-IP: "" + WL-Proxy-SSL: "" + WL-Proxy-SSL: "true" + sslRedirect: true +{{- end }} +{{- else }} +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRouteTCP +metadata: + name: {{ .Values.wlsDomain.domainUID }}-traefik + namespace: {{ .Release.Namespace }} +spec: + entryPoints: + - websecure + routes: + - match: HostSNI(`{{ .Values.hostName.admin }}`) + services: + - name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: {{ .Values.wlsDomain.adminServerSSLPort }} + weight: 3 + TerminationDelay: 400 + {{- if or (eq .Values.domainType "soa") (eq .Values.domainType "soaosb") (eq .Values.domainType "soab2b") (eq .Values.domainType "soaosbb2b") }} + - match: HostSNI(`{{ .Values.hostName.soa }}`) + services: + - name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' + port: {{ .Values.wlsDomain.soaManagedServerSSLPort }} + weight: 3 + TerminationDelay: 400 + {{- end }} + {{- if or (eq .Values.domainType "osb") (eq .Values.domainType "soaosb") (eq .Values.domainType "soaosbb2b") }} + - match: HostSNI(`{{ .Values.hostName.osb }}`) + services: + - name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.osbClusterName | lower | replace "_" "-" }}' + port: {{ .Values.wlsDomain.osbManagedServerSSLPort }} + weight: 3 + TerminationDelay: 400 + {{- end }} + tls: + passthrough: true +{{- end }} + +{{- end }} +{{- end }} + diff --git a/OracleSOASuite/kubernetes/charts/ingress-per-domain/templates/traefik-ingress.yaml b/OracleSOASuite/kubernetes/charts/ingress-per-domain/templates/traefik-ingress.yaml index 5e8d9a025..42773a1e7 100755 --- a/OracleSOASuite/kubernetes/charts/ingress-per-domain/templates/traefik-ingress.yaml +++ b/OracleSOASuite/kubernetes/charts/ingress-per-domain/templates/traefik-ingress.yaml @@ -1,6 +1,7 @@ # Copyright (c) 2020, 2021, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. # +{{- if semverCompare "<1.19-0" .Capabilities.KubeVersion.GitVersion -}} {{- if eq .Values.type "TRAEFIK" }} {{- if or (eq .Values.sslType "NONSSL") (eq .Values.sslType "SSL") }} --- @@ -152,21 +153,22 @@ spec: - match: HostSNI(`{{ .Values.hostName.soa }}`) services: - name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' - port: {{ .Values.wlsDomain.soaManagedServerSSLPort }} + port: {{ .Values.wlsDomain.soaManagedServerSSLPort }} weight: 3 TerminationDelay: 400 - {{- end}} - {{- if or (eq .Values.domainType "osb") (eq .Values.domainType "soaosb") (eq .Values.domainType "soaosbb2b") }} + {{- end }} + {{- if or (eq .Values.domainType "osb") (eq .Values.domainType "soaosb") (eq .Values.domainType "soaosbb2b") }} - match: HostSNI(`{{ .Values.hostName.osb }}`) services: - name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.osbClusterName | lower | replace "_" "-" }}' - port: {{ .Values.wlsDomain.osbManagedServerSSLPort }} + port: {{ .Values.wlsDomain.osbManagedServerSSLPort }} weight: 3 TerminationDelay: 400 - {{- end}} + {{- end }} tls: passthrough: true {{- end }} {{- end }} +{{- end }} diff --git a/OracleSOASuite/kubernetes/charts/ingress-per-domain/templates/voyager-ingress.yaml b/OracleSOASuite/kubernetes/charts/ingress-per-domain/templates/voyager-ingress.yaml deleted file mode 100755 index fd06d0ccb..000000000 --- a/OracleSOASuite/kubernetes/charts/ingress-per-domain/templates/voyager-ingress.yaml +++ /dev/null @@ -1,231 +0,0 @@ -# Copyright (c) 2020, 2021, Oracle and/or its affiliates. -# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -# -{{- if eq .Values.type "VOYAGER" }} -{{- if or (eq .Values.sslType "NONSSL") (eq .Values.sslType "SSL") }} ---- -apiVersion: voyager.appscode.com/v1beta1 -kind: Ingress -metadata: - name: {{ .Values.wlsDomain.domainUID }}-voyager - namespace: {{ .Release.Namespace }} - annotations: - ingress.appscode.com/type: 'NodePort' - ingress.appscode.com/stats: 'true' - ingress.appscode.com/affinity: 'cookie' - ingress.appscode.com/session-cookie-name: 'sticky' - ingress.appscode.com/default-timeout: '{"connect": "1800s", "server": "1800s"}' -spec: -{{- if eq .Values.sslType "SSL" }} - frontendRules: - - port: 443 - rules: - - http-request del-header WL-Proxy-Client-IP - - http-request del-header WL-Proxy-SSL - - http-request set-header WL-Proxy-SSL true - tls: - - secretName: domain1-tls-cert - hosts: - - '*' -{{- end }} - rules: - - host: '*' - http: - {{- if eq .Values.sslType "SSL" }} - nodePort: '{{ .Values.voyager.webSSLPort }}' - {{- else }} - nodePort: '{{ .Values.voyager.webPort }}' - {{- end }} - paths: - - path: /console - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - - path: /em - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - - path: /weblogic/ready - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - {{- if or (eq .Values.domainType "soa") }} - - path: / - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.soaManagedServerPort }} - {{- end }} - {{- if or (eq .Values.domainType "osb") }} - - path: / - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.osbClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.osbManagedServerPort }} - {{- end }} - {{- if or (eq .Values.domainType "soa") (eq .Values.domainType "soaosb") (eq .Values.domainType "soab2b") (eq .Values.domainType "soaosbb2b") }} - - path: /soa-infra - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.soaManagedServerPort }} - {{- end }} - {{- if or (eq .Values.domainType "soa") (eq .Values.domainType "soaosb") (eq .Values.domainType "soab2b") (eq .Values.domainType "soaosbb2b") }} - - path: /soa/composer - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.soaManagedServerPort }} - {{- end }} - {{- if or (eq .Values.domainType "soa") (eq .Values.domainType "soaosb") (eq .Values.domainType "soab2b") (eq .Values.domainType "soaosbb2b") }} - - path: /integration/worklistapp - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.soaManagedServerPort }} - {{- end }} - {{- if or (eq .Values.domainType "osb") (eq .Values.domainType "soaosb") (eq .Values.domainType "soaosbb2b") }} - - path: /servicebus - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - {{- end }} - {{- if or (eq .Values.domainType "osb") (eq .Values.domainType "soaosb") (eq .Values.domainType "soaosbb2b") }} - - path: /lwpfconsole - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - {{- end }} - {{- if or (eq .Values.domainType "osb") (eq .Values.domainType "soaosb") (eq .Values.domainType "soaosbb2b") }} - - path: /xbusrouting - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - {{- end }} - {{- if or (eq .Values.domainType "osb") (eq .Values.domainType "soaosb") (eq .Values.domainType "soaosbb2b") }} - - path: /xbustransform - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - {{- end }} - {{- if or (eq .Values.domainType "soa") (eq .Values.domainType "soaosb") (eq .Values.domainType "soab2b") (eq .Values.domainType "soaosbb2b") }} - - path: /ess - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.soaManagedServerPort }} - {{- end }} - {{- if or (eq .Values.domainType "soa") (eq .Values.domainType "soaosb") (eq .Values.domainType "soab2b") (eq .Values.domainType "soaosbb2b") }} - - path: /EssHealthCheck - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.soaManagedServerPort }} - {{- end }} - {{- if or (eq .Values.domainType "soab2b") (eq .Values.domainType "soaosbb2b") }} - - path: /b2bconsole - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.soaManagedServerPort }} - {{- end }} ---- -apiVersion: v1 -kind: Service -metadata: - name: {{ .Values.wlsDomain.domainUID }}-voyager-stats - namespace: {{ .Release.Namespace }} -spec: - type: NodePort - ports: - - name: client - protocol: TCP - port: 56789 - targetPort: 56789 - nodePort: {{ .Values.voyager.statsPort }} - selector: - origin: voyager - origin-name: {{ .Values.wlsDomain.domainUID }}-voyager -{{- else }} ---- -apiVersion: voyager.appscode.com/v1beta1 -kind: Ingress -metadata: - name: {{ .Values.wlsDomain.domainUID }}-voyager-e2essl-admin - namespace: {{ .Release.Namespace }} - annotations: - ingress.appscode.com/type: 'NodePort' - ingress.appscode.com/stats: 'true' - ingress.appscode.com/affinity: 'cookie' - ingress.appscode.com/session-cookie-name: 'sticky' - ingress.appscode.com/ssl-passthrough: "true" -spec: - tls: - - secretName: domain1-tls-cert - hosts: - - '*' - rules: - - host: '*' - http: - nodePort: '{{ .Values.voyager.adminSSLPort }}' - paths: - - path: / - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerSSLPort }} - -{{- if or (eq .Values.domainType "soa") (eq .Values.domainType "soaosb") (eq .Values.domainType "soab2b") (eq .Values.domainType "soaosbb2b") }} - ---- -apiVersion: voyager.appscode.com/v1beta1 -kind: Ingress -metadata: - name: {{ .Values.wlsDomain.domainUID }}-voyager-e2essl-soa - namespace: {{ .Release.Namespace }} - annotations: - ingress.appscode.com/type: 'NodePort' - ingress.appscode.com/stats: 'true' - ingress.appscode.com/affinity: 'cookie' - ingress.appscode.com/session-cookie-name: 'sticky' - ingress.appscode.com/ssl-passthrough: "true" -spec: - tls: - - secretName: domain1-tls-cert - hosts: - - '*' - rules: - - host: '*' - http: - nodePort: '{{ .Values.voyager.soaSSLPort }}' - paths: - - path: / - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.soaManagedServerSSLPort }} -{{- end}} -{{- if or (eq .Values.domainType "osb") (eq .Values.domainType "soaosb") (eq .Values.domainType "soaosbb2b") }} ---- -apiVersion: voyager.appscode.com/v1beta1 -kind: Ingress -metadata: - name: {{ .Values.wlsDomain.domainUID }}-voyager-e2essl-osb - namespace: {{ .Release.Namespace }} - annotations: - ingress.appscode.com/type: 'NodePort' - ingress.appscode.com/stats: 'true' - ingress.appscode.com/affinity: 'cookie' - ingress.appscode.com/session-cookie-name: 'sticky' - ingress.appscode.com/ssl-passthrough: "true" -spec: - tls: - - secretName: domain1-tls-cert - hosts: - - '*' - rules: - - host: '*' - http: - nodePort: '{{ .Values.voyager.osbSSLPort }}' - paths: - - path: / - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.osbClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.osbManagedServerSSLPort }} -{{- end }} - -{{- end }} -{{- end }} - - - diff --git a/OracleSOASuite/kubernetes/charts/ingress-per-domain/values.yaml b/OracleSOASuite/kubernetes/charts/ingress-per-domain/values.yaml index 85d85c94d..d364dc81a 100755 --- a/OracleSOASuite/kubernetes/charts/ingress-per-domain/values.yaml +++ b/OracleSOASuite/kubernetes/charts/ingress-per-domain/values.yaml @@ -6,21 +6,14 @@ # This is a YAML-formatted file. # Declare variables to be passed into your templates. # -# Load balancer type. Supported values are: TRAEFIK, VOYAGER +# Load balancer type. Supported values are: TRAEFIK, VOYAGER, NGINX type: TRAEFIK -#type: VOYAGER -#type: NGINX -# Type of Configuration Supported Values are : NONSSL,SSL and E2ESSL +# Type of Configuration Supported Values are : NONSSL, SSL and E2ESSL sslType: NONSSL -#sslType: SSL -#sslType: E2ESSL # domainType Supported values are soa,osb and soaosb. -# Make sure only one of these is uncommented. domainType: soa -#domainType: osb -#domainType: soaosb #WLS domain as backend to the load balancer wlsDomain: diff --git a/OracleSOASuite/kubernetes/charts/traefik/values.yaml b/OracleSOASuite/kubernetes/charts/traefik/values.yaml old mode 100644 new mode 100755 diff --git a/OracleSOASuite/kubernetes/charts/weblogic-operator/.helmignore b/OracleSOASuite/kubernetes/charts/weblogic-operator/.helmignore old mode 100644 new mode 100755 diff --git a/OracleSOASuite/kubernetes/charts/weblogic-operator/Chart.yaml b/OracleSOASuite/kubernetes/charts/weblogic-operator/Chart.yaml old mode 100644 new mode 100755 index 88c3511df..b5cac770e --- a/OracleSOASuite/kubernetes/charts/weblogic-operator/Chart.yaml +++ b/OracleSOASuite/kubernetes/charts/weblogic-operator/Chart.yaml @@ -6,5 +6,5 @@ name: weblogic-operator description: Helm chart for configuring the WebLogic operator. type: application -version: 3.2.1 -appVersion: 3.2.1 +version: 3.3.0 +appVersion: 3.3.0 diff --git a/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_domain-namespaces.tpl b/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_domain-namespaces.tpl old mode 100644 new mode 100755 diff --git a/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-clusterrole-domain-admin.tpl b/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-clusterrole-domain-admin.tpl old mode 100644 new mode 100755 diff --git a/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-clusterrole-general.tpl b/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-clusterrole-general.tpl old mode 100644 new mode 100755 diff --git a/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-clusterrole-namespace.tpl b/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-clusterrole-namespace.tpl old mode 100644 new mode 100755 diff --git a/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-clusterrole-nonresource.tpl b/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-clusterrole-nonresource.tpl old mode 100644 new mode 100755 diff --git a/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-clusterrole-operator-admin.tpl b/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-clusterrole-operator-admin.tpl old mode 100644 new mode 100755 diff --git a/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-clusterrolebinding-auth-delegator.tpl b/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-clusterrolebinding-auth-delegator.tpl old mode 100644 new mode 100755 diff --git a/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-clusterrolebinding-discovery.tpl b/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-clusterrolebinding-discovery.tpl old mode 100644 new mode 100755 diff --git a/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-clusterrolebinding-general.tpl b/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-clusterrolebinding-general.tpl old mode 100644 new mode 100755 diff --git a/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-clusterrolebinding-nonresource.tpl b/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-clusterrolebinding-nonresource.tpl old mode 100644 new mode 100755 diff --git a/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-cm.tpl b/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-cm.tpl old mode 100644 new mode 100755 index f66fc6808..dd6594de2 --- a/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-cm.tpl +++ b/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-cm.tpl @@ -34,6 +34,9 @@ data: {{- if .dns1123Fields }} dns1123Fields: {{ .dns1123Fields | quote }} {{- end }} + {{- if .featureGates }} + featureGates: {{ .featureGates | quote }} + {{- end }} {{- if .introspectorJobNameSuffix }} introspectorJobNameSuffix: {{ .introspectorJobNameSuffix | quote }} {{- end }} diff --git a/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-dep.tpl b/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-dep.tpl old mode 100644 new mode 100755 index da331937f..3fadac7dc --- a/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-dep.tpl +++ b/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-dep.tpl @@ -19,9 +19,18 @@ spec: replicas: 1 template: metadata: - labels: + {{- with .annotations }} + annotations: + {{- end }} + {{- range $key, $value := .annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + labels: weblogic.operatorName: {{ .Release.Namespace | quote }} app: "weblogic-operator" + {{- range $key, $value := .labels }} + {{ $key }}: {{ $value | quote }} + {{- end }} spec: serviceAccountName: {{ .serviceAccount | quote }} {{- with .nodeSelector }} @@ -59,8 +68,6 @@ spec: value: {{ .javaLoggingFileSizeLimit | default 20000000 | quote }} - name: "JAVA_LOGGING_COUNT" value: {{ .javaLoggingFileCount | default 10 | quote }} - - name: ISTIO_ENABLED - value: {{ .istioEnabled | quote }} {{- if .remoteDebugNodePortEnabled }} - name: "REMOTE_DEBUG_PORT" value: {{ .internalDebugHttpPort | quote }} @@ -103,15 +110,15 @@ spec: livenessProbe: exec: command: - - "bash" - - "/operator/livenessProbe.sh" + - "bash" + - "/operator/livenessProbe.sh" initialDelaySeconds: 20 periodSeconds: 5 readinessProbe: exec: command: - - "bash" - - "/operator/readinessProbe.sh" + - "bash" + - "/operator/readinessProbe.sh" initialDelaySeconds: 2 periodSeconds: 10 {{- end }} diff --git a/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-external-svc.tpl b/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-external-svc.tpl old mode 100644 new mode 100755 diff --git a/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-internal-svc.tpl b/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-internal-svc.tpl old mode 100644 new mode 100755 diff --git a/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-role.tpl b/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-role.tpl old mode 100644 new mode 100755 index cb05180c9..e0c386b98 --- a/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-role.tpl +++ b/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-role.tpl @@ -12,9 +12,6 @@ metadata: weblogic.operatorName: {{ .Release.Namespace | quote }} rules: - apiGroups: [""] - resources: ["secrets", "configmaps"] - verbs: ["get", "list", "watch"] -- apiGroups: [""] - resources: ["events"] + resources: ["events", "secrets", "configmaps"] verbs: ["get", "list", "watch", "create", "update", "patch", "delete", "deletecollection"] {{- end }} diff --git a/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-rolebinding-namespace.tpl b/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-rolebinding-namespace.tpl old mode 100644 new mode 100755 diff --git a/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-rolebinding.tpl b/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-rolebinding.tpl old mode 100644 new mode 100755 diff --git a/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-secret.tpl b/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-secret.tpl old mode 100644 new mode 100755 diff --git a/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator.tpl b/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator.tpl old mode 100644 new mode 100755 diff --git a/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_utils.tpl b/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_utils.tpl old mode 100644 new mode 100755 diff --git a/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_validate-inputs.tpl b/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_validate-inputs.tpl old mode 100644 new mode 100755 diff --git a/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/main.yaml b/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/main.yaml old mode 100644 new mode 100755 diff --git a/OracleSOASuite/kubernetes/charts/weblogic-operator/values.yaml b/OracleSOASuite/kubernetes/charts/weblogic-operator/values.yaml old mode 100644 new mode 100755 index dfaabe88e..dac9a5382 --- a/OracleSOASuite/kubernetes/charts/weblogic-operator/values.yaml +++ b/OracleSOASuite/kubernetes/charts/weblogic-operator/values.yaml @@ -63,7 +63,7 @@ domainNamespaces: enableClusterRoleBinding: false # image specifies the container image containing the operator. -image: "ghcr.io/oracle/weblogic-kubernetes-operator:3.2.1" +image: "ghcr.io/oracle/weblogic-kubernetes-operator:3.3.0" # imagePullPolicy specifies the image pull policy for the operator's container image. imagePullPolicy: IfNotPresent @@ -99,9 +99,6 @@ externalRestHttpsPort: 31001 # kubernetes/samples/scripts/rest/generate-external-rest-identity.sh # externalRestIdentitySecret: -# istioEnabled specifies whether or not the Domain is deployed under an Istio service mesh. -istioEnabled: false - # elkIntegrationEnabled specifies whether or not ELK integration is enabled. elkIntegrationEnabled: false @@ -117,12 +114,19 @@ elasticSearchHost: "elasticsearch.default.svc.cluster.local" # This parameter is ignored if 'elkIntegrationEnabled' is false. elasticSearchPort: 9200 +# featureGates specifies a set of key=value pairs separated by commas that describe whether a given +# operator feature is enabled. You enable a feature by including a key=value pair where the key is the +# feature name and the value is "true". This will allow the operator team to release features that +# are not yet ready to be enabled by default, but that are ready for testing by customers. Once a feature is +# stable then it will be enabled by default and can not be disabled using this configuration. +# featureGates: "...,AuxiliaryImage=true" + # javaLoggingLevel specifies the Java logging level for the operator. This affects the operator pod's # log output and the contents of log files in the container's /logs/ directory. # Valid values are: "SEVERE", "WARNING", "INFO", "CONFIG", "FINE", "FINER", and "FINEST". javaLoggingLevel: "INFO" -# javaLoggingFileSizeLimit specifies the maximum size in bytes of the Java logging files in the operator container's +# javaLoggingFileSizeLimit specifies the maximum size in bytes for an individual Java logging file in the operator container's # /logs/ directory. javaLoggingFileSizeLimit: 20000000 @@ -130,6 +134,14 @@ javaLoggingFileSizeLimit: 20000000 # directory as the files are rotated. javaLoggingFileCount: 10 +# labels specifies a set of key-value labels that will be added to each pod running the operator. +# See https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +#labels: + +# annotations specifies a set of key-value annotations that will be added to each pod running the operator. +# See https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +#annotations: + # nodeSelector specifies a matching rule that the Kubernetes scheduler will use when selecting the node # where the operator will run. If the nodeSelector value is specified, then this content will be added to # the operator's deployment. See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector diff --git a/OracleSOASuite/kubernetes/common/createFMWJRFDomain.py b/OracleSOASuite/kubernetes/common/createFMWJRFDomain.py new file mode 100755 index 000000000..bde936ca5 --- /dev/null +++ b/OracleSOASuite/kubernetes/common/createFMWJRFDomain.py @@ -0,0 +1,332 @@ +# Copyright (c) 2014, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +import os +import sys + +import com.oracle.cie.domain.script.jython.WLSTException as WLSTException + +class Infra12213Provisioner: + + MACHINES = { + 'machine1' : { + 'NMType': 'SSL', + 'ListenAddress': 'localhost', + 'ListenPort': 5658 + } + } + + JRF_12213_TEMPLATES = { + 'baseTemplate' : '@@ORACLE_HOME@@/wlserver/common/templates/wls/wls.jar', + 'extensionTemplates' : [ + '@@ORACLE_HOME@@/oracle_common/common/templates/wls/oracle.jrf_template.jar', + '@@ORACLE_HOME@@/oracle_common/common/templates/wls/oracle.jrf.ws.async_template.jar', + '@@ORACLE_HOME@@/oracle_common/common/templates/wls/oracle.wsmpm_template.jar', + '@@ORACLE_HOME@@/oracle_common/common/templates/wls/oracle.ums_template.jar', + '@@ORACLE_HOME@@/em/common/templates/wls/oracle.em_wls_template.jar' + ], + 'serverGroupsToTarget' : [ 'JRF-MAN-SVR', 'WSMPM-MAN-SVR' ] + } + + def __init__(self, oracleHome, javaHome, domainParentDir, adminListenPort, adminName, managedNameBase, managedServerPort, prodMode, managedCount, clusterName): + self.oracleHome = self.validateDirectory(oracleHome) + self.javaHome = self.validateDirectory(javaHome) + self.domainParentDir = self.validateDirectory(domainParentDir, create=True) + return + + def createInfraDomain(self, domainName, user, password, db, dbPrefix, dbPassword, adminListenPort, adminName, + managedNameBase, managedServerPort, prodMode, managedCount, clusterName, + exposeAdminT3Channel=None, t3ChannelPublicAddress=None, t3ChannelPort=None): + domainHome = self.createBaseDomain(domainName, user, password, adminListenPort, adminName, managedNameBase, + managedServerPort, prodMode, managedCount, clusterName + ) + self.extendDomain(domainHome, db, dbPrefix, dbPassword, exposeAdminT3Channel, t3ChannelPublicAddress, + t3ChannelPort) + + def createBaseDomain(self, domainName, user, password, adminListenPort, adminName, managedNameBase, managedServerPort, prodMode, managedCount, clusterName): + baseTemplate = self.replaceTokens(self.JRF_12213_TEMPLATES['baseTemplate']) + + readTemplate(baseTemplate) + setOption('DomainName', domainName) + setOption('JavaHome', self.javaHome) + if (prodMode == 'true'): + setOption('ServerStartMode', 'prod') + else: + setOption('ServerStartMode', 'dev') + set('Name', domainName) + + admin_port = int(adminListenPort) + ms_port = int(managedServerPort) + ms_count = int(managedCount) + + # Create Admin Server + # ======================= + print 'Creating Admin Server...' + cd('/Servers/AdminServer') + #set('ListenAddress', '%s-%s' % (domain_uid, admin_server_name_svc)) + set('ListenPort', admin_port) + set('Name', adminName) + + # Define the user password for weblogic + # ===================================== + cd('/Security/' + domainName + '/User/weblogic') + set('Name', user) + set('Password', password) + + # Create a cluster + # ====================== + print 'Creating cluster...' + cd('/') + cl=create(clusterName, 'Cluster') + + # Create managed servers + for index in range(0, ms_count): + cd('/') + msIndex = index+1 + cd('/') + name = '%s%s' % (managedNameBase, msIndex) + create(name, 'Server') + cd('/Servers/%s/' % name ) + print('managed server name is %s' % name); + set('ListenPort', ms_port) + set('NumOfRetriesBeforeMSIMode', 0) + set('RetryIntervalBeforeMSIMode', 1) + set('Cluster', clusterName) + + # Create Node Manager + # ======================= + print 'Creating Node Managers...' + for machine in self.MACHINES: + cd('/') + create(machine, 'Machine') + cd('Machine/' + machine) + create(machine, 'NodeManager') + cd('NodeManager/' + machine) + for param in self.MACHINES[machine]: + set(param, self.MACHINES[machine][param]) + + + setOption('OverwriteDomain', 'true') + domainHome = self.domainParentDir + '/' + domainName + print 'Will create Base domain at ' + domainHome + + print 'Writing base domain...' + writeDomain(domainHome) + closeTemplate() + print 'Base domain created at ' + domainHome + return domainHome + + + def extendDomain(self, domainHome, db, dbPrefix, dbPassword, exposeAdminT3Channel, t3ChannelPublicAddress, + t3ChannelPort): + print 'Extending domain at ' + domainHome + print 'Database ' + db + readDomain(domainHome) + setOption('AppDir', self.domainParentDir + '/applications') + + print 'ExposeAdminT3Channel %s with %s:%s ' % (exposeAdminT3Channel, t3ChannelPublicAddress, t3ChannelPort) + if 'true' == exposeAdminT3Channel: + self.enable_admin_channel(t3ChannelPublicAddress, t3ChannelPort) + + print 'Applying JRF templates...' + for extensionTemplate in self.JRF_12213_TEMPLATES['extensionTemplates']: + addTemplate(self.replaceTokens(extensionTemplate)) + + print 'Extension Templates added' + + print 'Configuring the Service Table DataSource...' + fmwDb = 'jdbc:oracle:thin:@' + db + print 'fmwDatabase ' + fmwDb + cd('/JDBCSystemResource/LocalSvcTblDataSource/JdbcResource/LocalSvcTblDataSource') + cd('JDBCDriverParams/NO_NAME_0') + set('DriverName', 'oracle.jdbc.OracleDriver') + set('URL', fmwDb) + set('PasswordEncrypted', dbPassword) + + stbUser = dbPrefix + '_STB' + cd('Properties/NO_NAME_0/Property/user') + set('Value', stbUser) + + print 'Getting Database Defaults...' + getDatabaseDefaults() + + print 'Targeting Server Groups...' + managedName= '%s%s' % (managedNameBase, 1) + print "Set CoherenceClusterSystemResource to defaultCoherenceCluster for server:" + managedName + serverGroupsToTarget = list(self.JRF_12213_TEMPLATES['serverGroupsToTarget']) + cd('/') + setServerGroups(managedName, serverGroupsToTarget) + print "Set CoherenceClusterSystemResource to defaultCoherenceCluster for server:" + managedName + cd('/Servers/' + managedName) + set('CoherenceClusterSystemResource', 'defaultCoherenceCluster') + + print 'Targeting Cluster ...' + cd('/') + print "Set CoherenceClusterSystemResource to defaultCoherenceCluster for cluster:" + clusterName + cd('/Cluster/' + clusterName) + set('CoherenceClusterSystemResource', 'defaultCoherenceCluster') + print "Set WLS clusters as target of defaultCoherenceCluster:" + clusterName + cd('/CoherenceClusterSystemResource/defaultCoherenceCluster') + set('Target', clusterName) + + print 'Preparing to update domain...' + updateDomain() + print 'Domain updated successfully' + closeDomain() + return + + + ########################################################################### + # Helper Methods # + ########################################################################### + + def validateDirectory(self, dirName, create=False): + directory = os.path.realpath(dirName) + if not os.path.exists(directory): + if create: + os.makedirs(directory) + else: + message = 'Directory ' + directory + ' does not exist' + raise WLSTException(message) + elif not os.path.isdir(directory): + message = 'Directory ' + directory + ' is not a directory' + raise WLSTException(message) + return self.fixupPath(directory) + + + def fixupPath(self, path): + result = path + if path is not None: + result = path.replace('\\', '/') + return result + + + def replaceTokens(self, path): + result = path + if path is not None: + result = path.replace('@@ORACLE_HOME@@', oracleHome) + return result + + def enable_admin_channel(self, admin_channel_address, admin_channel_port): + if admin_channel_address == None or admin_channel_port == 'None': + return + cd('/') + admin_server_name = get('AdminServerName') + print('setting admin server t3channel for ' + admin_server_name) + cd('/Servers/' + admin_server_name) + create('T3Channel', 'NetworkAccessPoint') + cd('/Servers/' + admin_server_name + '/NetworkAccessPoint/T3Channel') + set('ListenPort', int(admin_channel_port)) + set('PublicPort', int(admin_channel_port)) + set('PublicAddress', admin_channel_address) + +############################# +# Entry point to the script # +############################# + +def usage(): + print sys.argv[0] + ' -oh -jh -parent -name ' + \ + '-user -password ' + \ + '-rcuDb -rcuPrefix -rcuSchemaPwd ' \ + '-adminListenPort -adminName ' \ + '-managedNameBase -managedServerPort -prodMode ' \ + '-managedServerCount -clusterName ' \ + '-exposeAdminT3Channel -t3ChannelPublicAddress
' \ + '-t3ChannelPort ' + sys.exit(0) + +# Uncomment for Debug only +#print str(sys.argv[0]) + " called with the following sys.argv array:" +#for index, arg in enumerate(sys.argv): +# print "sys.argv[" + str(index) + "] = " + str(sys.argv[index]) + +if len(sys.argv) < 16: + usage() + +#oracleHome will be passed by command line parameter -oh. +oracleHome = None +#javaHome will be passed by command line parameter -jh. +javaHome = None +#domainParentDir will be passed by command line parameter -parent. +domainParentDir = None +#domainUser is hard-coded to weblogic. You can change to other name of your choice. Command line paramter -user. +domainUser = 'weblogic' +#domainPassword will be passed by Command line parameter -password. +domainPassword = None +#rcuDb will be passed by command line parameter -rcuDb. +rcuDb = None +#change rcuSchemaPrefix to your infra schema prefix. Command line parameter -rcuPrefix. +rcuSchemaPrefix = 'DEV12' +#change rcuSchemaPassword to your infra schema password. Command line parameter -rcuSchemaPwd. +rcuSchemaPassword = None +exposeAdminT3Channel = None +t3ChannelPort = None +t3ChannelPublicAddress = None +i = 1 +while i < len(sys.argv): + if sys.argv[i] == '-oh': + oracleHome = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-jh': + javaHome = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-parent': + domainParentDir = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-name': + domainName = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-user': + domainUser = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-password': + domainPassword = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-rcuDb': + rcuDb = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-rcuPrefix': + rcuSchemaPrefix = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-rcuSchemaPwd': + rcuSchemaPassword = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-adminListenPort': + adminListenPort = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-adminName': + adminName = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-managedNameBase': + managedNameBase = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-managedServerPort': + managedServerPort = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-prodMode': + prodMode = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-managedServerCount': + managedCount = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-clusterName': + clusterName = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-t3ChannelPublicAddress': + t3ChannelPublicAddress = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-t3ChannelPort': + t3ChannelPort = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-exposeAdminT3Channel': + exposeAdminT3Channel = sys.argv[i + 1] + i += 2 + else: + print 'Unexpected argument switch at position ' + str(i) + ': ' + str(sys.argv[i]) + usage() + sys.exit(1) + +provisioner = Infra12213Provisioner(oracleHome, javaHome, domainParentDir, adminListenPort, adminName, managedNameBase, managedServerPort, prodMode, managedCount, clusterName) +provisioner.createInfraDomain(domainName, domainUser, domainPassword, rcuDb, rcuSchemaPrefix, rcuSchemaPassword, + adminListenPort, adminName, managedNameBase, managedServerPort, prodMode, managedCount, + clusterName, exposeAdminT3Channel, t3ChannelPublicAddress, t3ChannelPort) \ No newline at end of file diff --git a/OracleSOASuite/kubernetes/common/createFMWRestrictedJRFDomain.py b/OracleSOASuite/kubernetes/common/createFMWRestrictedJRFDomain.py new file mode 100755 index 000000000..acfe5da80 --- /dev/null +++ b/OracleSOASuite/kubernetes/common/createFMWRestrictedJRFDomain.py @@ -0,0 +1,291 @@ +# Copyright (c) 2014, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +import os +import sys + +import com.oracle.cie.domain.script.jython.WLSTException as WLSTException + +class Infra12213Provisioner: + + MACHINES = { + 'machine1' : { + 'NMType': 'SSL', + 'ListenAddress': 'localhost', + 'ListenPort': 5658 + } + } + + JRF_12213_TEMPLATES = { + 'baseTemplate' : '@@ORACLE_HOME@@/wlserver/common/templates/wls/wls.jar', + 'extensionTemplates' : [ + '@@ORACLE_HOME@@/oracle_common/common/templates/wls/oracle.jrf_restricted_template.jar', + '@@ORACLE_HOME@@/em/common/templates/wls/oracle.em_wls_restricted_template.jar' + ], + 'serverGroupsToTarget' : [ 'JRF-MAN-SVR', 'WSMPM-MAN-SVR' ] + } + + def __init__(self, oracleHome, javaHome, domainParentDir, adminListenPort, adminName, managedNameBase, managedServerPort, prodMode, managedCount, clusterName): + self.oracleHome = self.validateDirectory(oracleHome) + self.javaHome = self.validateDirectory(javaHome) + self.domainParentDir = self.validateDirectory(domainParentDir, create=True) + return + + def createInfraDomain(self, domainName, user, password, adminListenPort, adminName, + managedNameBase, managedServerPort, prodMode, managedCount, clusterName, + exposeAdminT3Channel=None, t3ChannelPublicAddress=None, t3ChannelPort=None): + domainHome = self.createBaseDomain(domainName, user, password, adminListenPort, adminName, managedNameBase, + managedServerPort, prodMode, managedCount, clusterName + ) + self.extendDomain(domainHome, exposeAdminT3Channel, t3ChannelPublicAddress, t3ChannelPort) + + def createBaseDomain(self, domainName, user, password, adminListenPort, adminName, managedNameBase, managedServerPort, prodMode, managedCount, clusterName): + baseTemplate = self.replaceTokens(self.JRF_12213_TEMPLATES['baseTemplate']) + + readTemplate(baseTemplate) + setOption('DomainName', domainName) + setOption('JavaHome', self.javaHome) + if (prodMode == 'true'): + setOption('ServerStartMode', 'prod') + else: + setOption('ServerStartMode', 'dev') + set('Name', domainName) + + admin_port = int(adminListenPort) + ms_port = int(managedServerPort) + ms_count = int(managedCount) + + # Create Admin Server + # ======================= + print 'Creating Admin Server...' + cd('/Servers/AdminServer') + #set('ListenAddress', '%s-%s' % (domain_uid, admin_server_name_svc)) + set('ListenPort', admin_port) + set('Name', adminName) + + # Define the user password for weblogic + # ===================================== + cd('/Security/' + domainName + '/User/weblogic') + set('Name', user) + set('Password', password) + + # Create a cluster + # ====================== + print 'Creating cluster...' + cd('/') + cl=create(clusterName, 'Cluster') + + # Create managed servers + for index in range(0, ms_count): + cd('/') + msIndex = index+1 + cd('/') + name = '%s%s' % (managedNameBase, msIndex) + create(name, 'Server') + cd('/Servers/%s/' % name ) + print('managed server name is %s' % name); + set('ListenPort', ms_port) + set('NumOfRetriesBeforeMSIMode', 0) + set('RetryIntervalBeforeMSIMode', 1) + set('Cluster', clusterName) + + # Create Node Manager + # ======================= + print 'Creating Node Managers...' + for machine in self.MACHINES: + cd('/') + create(machine, 'Machine') + cd('Machine/' + machine) + create(machine, 'NodeManager') + cd('NodeManager/' + machine) + for param in self.MACHINES[machine]: + set(param, self.MACHINES[machine][param]) + + + setOption('OverwriteDomain', 'true') + domainHome = self.domainParentDir + '/' + domainName + print 'Will create Base domain at ' + domainHome + + print 'Writing base domain...' + writeDomain(domainHome) + closeTemplate() + print 'Base domain created at ' + domainHome + return domainHome + + + def extendDomain(self, domainHome, exposeAdminT3Channel, t3ChannelPublicAddress, + t3ChannelPort): + print 'Extending domain at ' + domainHome + readDomain(domainHome) + setOption('AppDir', self.domainParentDir + '/applications') + + print 'ExposeAdminT3Channel %s with %s:%s ' % (exposeAdminT3Channel, t3ChannelPublicAddress, t3ChannelPort) + if 'true' == exposeAdminT3Channel: + self.enable_admin_channel(t3ChannelPublicAddress, t3ChannelPort) + + print 'Applying JRF templates...' + for extensionTemplate in self.JRF_12213_TEMPLATES['extensionTemplates']: + addTemplate(self.replaceTokens(extensionTemplate)) + + print 'Extension Templates added' + + print 'Preparing to update domain...' + updateDomain() + print 'Domain updated successfully' + closeDomain() + return + + + ########################################################################### + # Helper Methods # + ########################################################################### + + def validateDirectory(self, dirName, create=False): + directory = os.path.realpath(dirName) + if not os.path.exists(directory): + if create: + os.makedirs(directory) + else: + message = 'Directory ' + directory + ' does not exist' + raise WLSTException(message) + elif not os.path.isdir(directory): + message = 'Directory ' + directory + ' is not a directory' + raise WLSTException(message) + return self.fixupPath(directory) + + + def fixupPath(self, path): + result = path + if path is not None: + result = path.replace('\\', '/') + return result + + + def replaceTokens(self, path): + result = path + if path is not None: + result = path.replace('@@ORACLE_HOME@@', oracleHome) + return result + + def enable_admin_channel(self, admin_channel_address, admin_channel_port): + if admin_channel_address == None or admin_channel_port == 'None': + return + cd('/') + admin_server_name = get('AdminServerName') + print('setting admin server t3channel for ' + admin_server_name) + cd('/Servers/' + admin_server_name) + create('T3Channel', 'NetworkAccessPoint') + cd('/Servers/' + admin_server_name + '/NetworkAccessPoint/T3Channel') + set('ListenPort', int(admin_channel_port)) + set('PublicPort', int(admin_channel_port)) + set('PublicAddress', admin_channel_address) + +############################# +# Entry point to the script # +############################# + +def usage(): + print sys.argv[0] + ' -oh -jh -parent -name ' + \ + '-user -password ' + \ + '-rcuDb -rcuPrefix -rcuSchemaPwd ' \ + '-adminListenPort -adminName ' \ + '-managedNameBase -managedServerPort -prodMode ' \ + '-managedServerCount -clusterName ' \ + '-exposeAdminT3Channel -t3ChannelPublicAddress
' \ + '-t3ChannelPort ' + sys.exit(0) + +# Uncomment for Debug only +#print str(sys.argv[0]) + " called with the following sys.argv array:" +#for index, arg in enumerate(sys.argv): +# print "sys.argv[" + str(index) + "] = " + str(sys.argv[index]) + +if len(sys.argv) < 16: + usage() + +#oracleHome will be passed by command line parameter -oh. +oracleHome = None +#javaHome will be passed by command line parameter -jh. +javaHome = None +#domainParentDir will be passed by command line parameter -parent. +domainParentDir = None +#domainUser is hard-coded to weblogic. You can change to other name of your choice. Command line paramter -user. +domainUser = 'weblogic' +#domainPassword will be passed by Command line parameter -password. +domainPassword = None +#rcuDb will be passed by command line parameter -rcuDb. +rcuDb = None +#change rcuSchemaPrefix to your infra schema prefix. Command line parameter -rcuPrefix. +rcuSchemaPrefix = 'DEV12' +#change rcuSchemaPassword to your infra schema password. Command line parameter -rcuSchemaPwd. +rcuSchemaPassword = None +exposeAdminT3Channel = None +t3ChannelPort = None +t3ChannelPublicAddress = None +i = 1 +while i < len(sys.argv): + if sys.argv[i] == '-oh': + oracleHome = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-jh': + javaHome = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-parent': + domainParentDir = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-name': + domainName = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-user': + domainUser = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-password': + domainPassword = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-rcuDb': + rcuDb = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-rcuPrefix': + rcuSchemaPrefix = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-rcuSchemaPwd': + rcuSchemaPassword = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-adminListenPort': + adminListenPort = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-adminName': + adminName = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-managedNameBase': + managedNameBase = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-managedServerPort': + managedServerPort = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-prodMode': + prodMode = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-managedServerCount': + managedCount = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-clusterName': + clusterName = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-t3ChannelPublicAddress': + t3ChannelPublicAddress = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-t3ChannelPort': + t3ChannelPort = sys.argv[i + 1] + i += 2 + elif sys.argv[i] == '-exposeAdminT3Channel': + exposeAdminT3Channel = sys.argv[i + 1] + i += 2 + else: + print 'Unexpected argument switch at position ' + str(i) + ': ' + str(sys.argv[i]) + usage() + sys.exit(1) + +provisioner = Infra12213Provisioner(oracleHome, javaHome, domainParentDir, adminListenPort, adminName, managedNameBase, managedServerPort, prodMode, managedCount, clusterName) +provisioner.createInfraDomain(domainName, domainUser, domainPassword, adminListenPort, adminName, managedNameBase, managedServerPort, prodMode, managedCount, + clusterName, exposeAdminT3Channel, t3ChannelPublicAddress, t3ChannelPort) diff --git a/OracleSOASuite/kubernetes/common/domain-template.yaml b/OracleSOASuite/kubernetes/common/domain-template.yaml old mode 100644 new mode 100755 diff --git a/OracleSOASuite/kubernetes/common/jrf-domain-template.yaml b/OracleSOASuite/kubernetes/common/jrf-domain-template.yaml old mode 100644 new mode 100755 diff --git a/OracleSOASuite/kubernetes/common/utility.sh b/OracleSOASuite/kubernetes/common/utility.sh old mode 100644 new mode 100755 index d87d88575..979207be2 --- a/OracleSOASuite/kubernetes/common/utility.sh +++ b/OracleSOASuite/kubernetes/common/utility.sh @@ -38,8 +38,6 @@ function checkInputFiles { valuesInputFile=${temp[1]} valuesInputFile1=${temp[0]} fi - else - echo "Found only 1 input file" fi } @@ -128,6 +126,7 @@ function parseCommonInputs { # We exclude javaOptions from the exportValuesFile grep -v "javaOptions" ${exportValuesFile} > ${tmpFile} source ${tmpFile} + rm ${exportValuesFile} ${tmpFile} } @@ -273,23 +272,23 @@ function getKubernetesClusterIP { function buildServerPodResources { if [ -n "${serverPodMemoryRequest}" ]; then - local memoryRequest=" memory\: \"${serverPodMemoryRequest}\"\n" + local memoryRequest=" memory\: \"${serverPodMemoryRequest}\"\n" fi if [ -n "${serverPodCpuRequest}" ]; then - local cpuRequest=" cpu\: \"${serverPodCpuRequest}\"\n" + local cpuRequest=" cpu\: \"${serverPodCpuRequest}\"\n" fi if [ -n "${memoryRequest}" ] || [ -n "${cpuRequest}" ]; then - local requests=" requests\: \n$memoryRequest $cpuRequest" + local requests=" requests\: \n$memoryRequest $cpuRequest" fi if [ -n "${serverPodMemoryLimit}" ]; then - local memoryLimit=" memory\: \"${serverPodMemoryLimit}\"\n" + local memoryLimit=" memory\: \"${serverPodMemoryLimit}\"\n" fi if [ -n "${serverPodCpuLimit}" ]; then - local cpuLimit=" cpu\: \"${serverPodCpuLimit}\"\n" + local cpuLimit=" cpu\: \"${serverPodCpuLimit}\"\n" fi if [ -n "${memoryLimit}" ] || [ -n "${cpuLimit}" ]; then - local limits=" limits\: \n$memoryLimit $cpuLimit" + local limits=" limits\: \n$memoryLimit $cpuLimit" fi if [ -n "${requests}" ] || [ -n "${limits}" ]; then @@ -406,17 +405,14 @@ function createFiles { if [ "${domainHomeInImage}" == "true" ]; then domainPropertiesOutput="${domainOutputDir}/domain.properties" - domainHome="/u01/oracle/user_projects/domains/${domainName}" + domainHome="${domainHome:-/u01/oracle/user_projects/domains/${domainName}}" - if [ -z $domainHomeImageBuildPath ]; then - domainHomeImageBuildPath="./docker-images/OracleWebLogic/samples/12213-domain-home-in-image" - fi - # Generate the properties file that will be used when creating the weblogic domain - echo Generating ${domainPropertiesOutput} + echo Generating ${domainPropertiesOutput} from ${domainPropertiesInput} cp ${domainPropertiesInput} ${domainPropertiesOutput} sed -i -e "s:%DOMAIN_NAME%:${domainName}:g" ${domainPropertiesOutput} + sed -i -e "s:%DOMAIN_HOME%:${domainHome}:g" ${domainPropertiesOutput} sed -i -e "s:%ADMIN_PORT%:${adminPort}:g" ${domainPropertiesOutput} sed -i -e "s:%ADMIN_SERVER_SSL_PORT%:${adminServerSSLPort}:g" ${domainPropertiesOutput} sed -i -e "s:%ADMIN_SERVER_NAME%:${adminServerName}:g" ${domainPropertiesOutput} @@ -428,26 +424,34 @@ function createFiles { sed -i -e "s:%SSL_ENABLED%:${sslEnabled}:g" ${domainPropertiesOutput} sed -i -e "s:%PRODUCTION_MODE_ENABLED%:${productionModeEnabled}:g" ${domainPropertiesOutput} sed -i -e "s:%CLUSTER_TYPE%:${clusterType}:g" ${domainPropertiesOutput} - sed -i -e "s:%JAVA_OPTIONS%:${javaOptions}:g" ${domainPropertiesOutput} + sed -i -e "s;%JAVA_OPTIONS%;${javaOptions};g" ${domainPropertiesOutput} sed -i -e "s:%T3_CHANNEL_PORT%:${t3ChannelPort}:g" ${domainPropertiesOutput} sed -i -e "s:%T3_PUBLIC_ADDRESS%:${t3PublicAddress}:g" ${domainPropertiesOutput} sed -i -e "s:%EXPOSE_T3_CHANNEL%:${exposeAdminT3Channel}:g" ${domainPropertiesOutput} sed -i -e "s:%FMW_DOMAIN_TYPE%:${fmwDomainType}:g" ${domainPropertiesOutput} sed -i -e "s:%WDT_DOMAIN_TYPE%:${wdtDomainType}:g" ${domainPropertiesOutput} + sed -i -e "s:%ADMIN_USER_NAME%:${username}:g" ${domainPropertiesOutput} + sed -i -e "s:%ADMIN_USER_PASS%:${password}:g" ${domainPropertiesOutput} + sed -i -e "s:%RCU_SCHEMA_PREFIX%:${rcuSchemaPrefix}:g" ${domainPropertiesOutput} + sed -i -e "s:%RCU_SCHEMA_PASSWORD%:${rcuSchemaPassword}:g" ${domainPropertiesOutput} + sed -i -e "s|%RCU_DB_CONN_STRING%|${rcuDatabaseURL}|g" ${domainPropertiesOutput} if [ -z "${image}" ]; then # calculate the internal name to tag the generated image - defaultImageName="`basename ${domainHomeImageBuildPath} | sed 's/^[0-9]*-//'`" + defaultImageName="domain-home-in-image" baseTag=${domainHomeImageBase#*:} defaultImageName=${defaultImageName}:${baseTag:-"latest"} sed -i -e "s|%IMAGE_NAME%|${defaultImageName}|g" ${domainPropertiesOutput} - else + export BUILD_IMAGE_TAG=${defaultImageName} + else sed -i -e "s|%IMAGE_NAME%|${image}|g" ${domainPropertiesOutput} + export BUILD_IMAGE_TAG=${image} fi else # we're in the domain in PV case wdtVersion="${WDT_VERSION:-${wdtVersion}}" + httpsProxy="${https_proxy}" createJobOutput="${domainOutputDir}/create-domain-job.yaml" deleteJobOutput="${domainOutputDir}/delete-domain-job.yaml" @@ -515,6 +519,8 @@ function createFiles { sed -i -e "s:%ISTIO_ENABLED%:${istioEnabled}:g" ${createJobOutput} sed -i -e "s:%ISTIO_READINESS_PORT%:${istioReadinessPort}:g" ${createJobOutput} sed -i -e "s:%WDT_VERSION%:${wdtVersion}:g" ${createJobOutput} + sed -i -e "s|%DOMAIN_TYPE%|${domain_type}|g" ${createJobOutput} + sed -i -e "s|%PROXY_VAL%|${httpsProxy}|g" ${createJobOutput} # Generate the yaml to create the kubernetes job that will delete the weblogic domain_home folder echo Generating ${deleteJobOutput} @@ -533,8 +539,6 @@ function createFiles { sed -i -e "s:%DOMAIN_ROOT_DIR%:${domainPVMountPath}:g" ${deleteJobOutput} fi - echo Printing domainHomeSourceType - echo domainHomeSourceType is ${domainHomeSourceType} if [ "${domainHomeSourceType}" == "FromModel" ]; then echo domainHomeSourceType is FromModel # leave domainHomeSourceType to FromModel @@ -545,7 +549,6 @@ function createFiles { fi elif [ "${domainHomeInImage}" == "true" ]; then domainHomeSourceType="Image" - echo domainHomeSourceType is Image if [ "${logHomeOnPV}" == "true" ]; then logHomeOnPVPrefix="${enabledPrefix}" else @@ -553,7 +556,6 @@ function createFiles { fi else domainHomeSourceType="PersistentVolume" - echo domainHomeSourceType is PV logHomeOnPVPrefix="${enabledPrefix}" logHomeOnPV=true fi @@ -561,9 +563,6 @@ function createFiles { # Generate the yaml file for creating the domain resource # We want to use wdt's extractDomainResource.sh to get the domain resource # for domain on pv use case. For others, generate domain resource here - echo domainHomeSourceType is ${domainHomeSourceType} - echo wdtDomainType is ${wdtDomainType} - echo useWdt is ${useWdt} if [ "${domainHomeSourceType}" != "PersistentVolume" ] || [ "${wdtDomainType}" != "WLS" ] || [ "${useWdt}" != true ]; then @@ -585,7 +584,7 @@ function createFiles { sed -i -e "s:%HTTP_ACCESS_LOG_IN_LOG_HOME%:${httpAccessLogInLogHome}:g" ${dcrOutput} sed -i -e "s:%DATA_HOME%:${dataHome}:g" ${dcrOutput} sed -i -e "s:%SERVER_START_POLICY%:${serverStartPolicy}:g" ${dcrOutput} - sed -i -e "s:%JAVA_OPTIONS%:${javaOptions}:g" ${dcrOutput} + sed -i -e "s;%JAVA_OPTIONS%;${javaOptions};g" ${dcrOutput} sed -i -e "s:%DOMAIN_PVC_NAME%:${persistentVolumeClaimName}:g" ${dcrOutput} sed -i -e "s:%DOMAIN_ROOT_DIR%:${domainPVMountPath}:g" ${dcrOutput} @@ -664,7 +663,7 @@ function updateModelFile { sed -i -e "s:%HTTP_ACCESS_LOG_IN_LOG_HOME%:${httpAccessLogInLogHome}:g" ${modelFile} sed -i -e "s:%DATA_HOME%:${dataHome}:g" ${modelFile} sed -i -e "s:%SERVER_START_POLICY%:${serverStartPolicy}:g" ${modelFile} - sed -i -e "s:%JAVA_OPTIONS%:${javaOptions}:g" ${modelFile} + sed -i -e "s;%JAVA_OPTIONS%;${javaOptions};g" ${modelFile} sed -i -e "s:%DOMAIN_PVC_NAME%:${persistentVolumeClaimName}:g" ${modelFile} sed -i -e "s:%DOMAIN_ROOT_DIR%:${domainPVMountPath}:g" ${modelFile} @@ -687,7 +686,7 @@ function updateModelFile { sed -i -e "s:%MII_CONFIG_MAP%:${miiConfigMap}:g" ${modelFile} sed -i -e "s:%WDT_DOMAIN_TYPE%:${wdtDomainType}:g" ${modelFile} - #buildServerPodResources + buildServerPodResources if [ -z "${serverPodResources}" ]; then sed -i -e "/%OPTIONAL_SERVERPOD_RESOURCES%/d" ${modelFile} else diff --git a/OracleSOASuite/kubernetes/common/validate.sh b/OracleSOASuite/kubernetes/common/validate.sh index 88151d073..1a407a99a 100755 --- a/OracleSOASuite/kubernetes/common/validate.sh +++ b/OracleSOASuite/kubernetes/common/validate.sh @@ -358,14 +358,15 @@ function validateDomainFilesDir { if [ -z "${createDomainFilesDir}" ] || [ "${createDomainFilesDir}" == "wlst" ]; then useWdt=false fi - echo useWdt is ${useWdt} } # # Function to validate the common input parameters # function validateCommonInputs { - # Parse the commonn inputs file + sample_name=${1:-"other"} + + # Parse the common inputs file parseCommonInputs validateInputParamsSpecified \ @@ -379,12 +380,15 @@ function validateCommonInputs { validateIntegerInputParamsSpecified \ adminPort \ - configuredManagedServerCount \ initialManagedServerReplicas \ managedServerPort \ t3ChannelPort \ adminNodePort + if [ ! "${sample_name}" == "fmw-domain-home-in-image" ]; then + validateIntegerInputParamsSpecified configuredManagedServerCount + fi + validateBooleanInputParamsSpecified \ productionModeEnabled \ exposeAdminT3Channel \ diff --git a/OracleSOASuite/kubernetes/common/wdt-and-wit-utility.sh b/OracleSOASuite/kubernetes/common/wdt-and-wit-utility.sh new file mode 100755 index 000000000..aa9cc691c --- /dev/null +++ b/OracleSOASuite/kubernetes/common/wdt-and-wit-utility.sh @@ -0,0 +1,439 @@ +#!/bin/bash +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# Description: +# +# This script contains functions for installing WebLogic Deploy Tool (WDT) and +# WebLogic Image Tool (WIT), and for running WDT. +# +# +# Usage: +# +# Export customized values for the input shell environment variables as needed +# before calling this script. +# +# Outputs: +# +# WDT install: WDT_DIR/weblogic-deploy/... +# +# Copy of wdt model: WDT_DIR/$(basename WDT_MODEL_FILE) +# Copy of wdt vars: WDT_DIR/$(basename WDT_VAR_FILE) +# +# WDT logs: WDT_DIR/weblogic-deploy/logs/... +# WDT stdout: WDT_DIR/createDomain.sh.out +# +# WebLogic domain home: DOMAIN_HOME_DIR +# default: /shared/domains/ +# +# Input environment variables: +# +# ORACLE_HOME Oracle home with a WebLogic install. +# default: /u01/oracle +# +# DOMAIN_HOME_DIR Target location for generated domain. +# +# WDT_MODEL_FILE Full path to WDT model file. +# default: the directory that contains this script +# plus "/wdt_model.yaml" +# +# WDT_VAR_FILE Full path to WDT variable file (java properties format). +# default: the directory that contains this script +# plus "/create-domain-inputs.yaml" +# +# WDT_DIR Target location to install and run WDT, and to keep a copy of +# $WDT_MODEL_FILE and $WDT_MODEL_VARS. Also the location +# of WDT log files. +# default: /shared/wdt +# +# WDT_VERSION WDT version to download. +# default: LATEST +# +# WDT_INSTALL_ZIP_FILE Filename of WDT install zip. +# default: weblogic-deploy.zip +# +# WDT_INSTALL_ZIP_URL URL for downloading WDT install zip +# default: https://github.com/oracle/weblogic-deploy-tooling/releases/latest/download/$WDT_INSTALL_ZIP_FILE +# +# WIT_DIR Target location to install WIT +# default: /shared/imagetool +# +# WIT_VERSION WIT version to download. +# default: LATEST +# +# WIT_INSTALL_ZIP_FILE Filename of WIT install zip. +# default: imagetool.zip +# +# WIT_INSTALL_ZIP_URL URL for downloading WIT install zip +# default: https://github.com/oracle/weblogic-image-tool/releases/latest/download/$WIT_INSTALL_ZIP_FILE +# + + +# Initialize globals + +export ORACLE_HOME=${ORACLE_HOME:-/u01/oracle} + +SCRIPTPATH="$( cd "$(dirname "$0")" > /dev/null 2>&1 ; pwd -P )" +WDT_MODEL_FILE=${WDT_MODEL_FILE:-"$SCRIPTPATH/wdt_model.yaml"} +WDT_VAR_FILE=${WDT_VAR_FILE:-"$SCRIPTPATH/create-domain-inputs.yaml"} + +WDT_DIR=${WDT_DIR:-/shared/wdt} +WDT_VERSION=${WDT_VERSION:-LATEST} + +WIT_DIR=${WIT_DIR:-/shared/imagetool} +WIT_VERSION=${WIT_VERSION:-LATEST} + +DOMAIN_TYPE="${DOMAIN_TYPE:-WLS}" + +function download { + local fileUrl="${1}" + + local curl_res=1 + max=20 + count=0 + while [ $curl_res -ne 0 -a $count -lt $max ] ; do + sleep 1 + count=`expr $count + 1` + for proxy in "${https_proxy}" "${https_proxy2}"; do + echo @@ "Info: Downloading $fileUrl with https_proxy=\"$proxy\"" + https_proxy="${proxy}" \ + curl --silent --show-error --connect-timeout 10 -O -L $fileUrl + curl_res=$? + [ $curl_res -eq 0 ] && break + done + done + if [ $curl_res -ne 0 ]; then + echo @@ "Error: Download failed." + return 1 + fi +} + +function run_wdt { + # + # Run WDT using WDT_VAR_FILE, WDT_MODEL_FILE, and ORACLE_HOME. + # Output: + # - result domain will be in DOMAIN_HOME_DIR + # - logging output is in $WDT_DIR/createDomain.sh.out and $WDT_DIR/weblogic-deploy/logs + # - WDT_VAR_FILE & WDT_MODEL_FILE will be copied to WDT_DIR. + # + + local action="${1}" + + # Input files and directories. + + local inputs_orig="$WDT_VAR_FILE" + local model_orig="$WDT_MODEL_FILE" + local oracle_home="$ORACLE_HOME" + local domain_type="$DOMAIN_TYPE" + local wdt_bin_dir="$WDT_DIR/weblogic-deploy/bin" + local wdt_createDomain_script="$wdt_bin_dir/createDomain.sh" + + if [ ${action} = "create" ]; then + local wdt_domain_script="$wdt_bin_dir/createDomain.sh" + else + local wdt_domain_script="$wdt_bin_dir/updateDomain.sh" + fi + + local domain_home_dir="$DOMAIN_HOME_DIR" + if [ -z "${domain_home_dir}" ]; then + local domain_dir="/shared/domains" + local domain_uid=`egrep 'domainUID' $inputs_orig | awk '{print $2}'` + local domain_home_dir=$domain_dir/$domain_uid + fi + + mkdir -p $domain_home_dir + + # Output files and directories. + + local inputs_final=$WDT_DIR/$(basename "$inputs_orig") + local model_final=$WDT_DIR/$(basename "$model_orig") + if [ ${action} = "create" ]; then + local out_file=$WDT_DIR/createDomain.sh.out + else + local out_file=$WDT_DIR/updateDomain.sh.out + fi + local wdt_log_dir="$WDT_DIR/weblogic-deploy/logs" + + echo @@ "Info: About to run WDT ${wdt_domain_script}" + + for directory in wdt_bin_dir SCRIPTPATH WDT_DIR oracle_home; do + if [ ! -d "${!directory}" ]; then + echo @@ "Error: Could not find ${directory} directory ${!directory}." + return 1 + fi + done + + for fil in inputs_orig model_orig wdt_createDomain_script; do + if [ ! -f "${!fil}" ]; then + echo @@ "Error: Could not find ${fil} file ${!fil}." + return 1 + fi + done + + cp $model_orig $model_final || return 1 + cp $inputs_orig $inputs_final || return 1 + + local save_dir=`pwd` + cd $WDT_DIR || return 1 + + cmd=" + $wdt_domain_script + -oracle_home $oracle_home + -domain_type $domain_type + -domain_home $domain_home_dir + -model_file $model_final + -variable_file $inputs_final + " + + echo @@ "Info: About to run the following WDT command:" + echo "${cmd}" + echo @@ "Info: WDT output will be in $out_file and $wdt_log_dir" + eval $cmd > $out_file 2>&1 + local wdt_res=$? + + cd $save_dir + + if [ $wdt_res -ne 0 ]; then + if [ ${action} = "create" ]; then + cat $WDT_DIR/createDomain.sh.out + echo @@ "Info: WDT createDomain.sh output is in $out_file and $wdt_log_dir" + echo @@ "Error: WDT createDomain.sh failed." + return 1 + else + cat $WDT_DIR/updateDomain.sh.out + echo @@ "Info: WDT updateDomain.sh output is in $out_file and $wdt_log_dir" + echo @@ "Error: WDT updateDomain.sh failed." + return 1 + fi + fi + + cd $WDT_DIR || return 1 + + cmd=" + $wdt_bin_dir/extractDomainResource.sh + -oracle_home $oracle_home + -domain_resource_file domain${action}.yaml + -domain_home $domain_home_dir + -model_file $model_final + -variable_file $inputs_final + " + echo @@ "Info: About to run the following WDT command:" + echo "${cmd}" + echo @@ "Info: WDT output will be in extract${action}.out and $wdt_log_dir" + eval $cmd > extract${action}.out 2>&1 + local wdt_res=$? + + cd $save_dir + + if [ $wdt_res -ne 0 ]; then + cat $WDT_DIR/extract${action}.out + echo @@ "Info: WDT extractDomainResource output is in extract${action}.out and $wdt_log_dir" + echo @@ "Error: WDT createDomain.sh failed." + return 1 + fi + + if [ ${action} = "create" ]; then + # chmod -R g+w $domain_home_dir || return 1 + echo @@ "Info: WDT createDomain.sh succeeded." + else + echo @@ "Info: WDT updateDomain.sh succeeded." + fi + + return 0 +} + +function setup_wdt_shared_dir { + mkdir -p $WDT_DIR || return 1 +} + +# +# Install Weblogic Server Deploy Tooling to ${WDT_DIR} +# +function install_wdt { + + WDT_INSTALL_ZIP_FILE="${WDT_INSTALL_ZIP_FILE:-weblogic-deploy.zip}" + + if [ "$WDT_VERSION" == "LATEST" ]; then + WDT_INSTALL_ZIP_URL=${WDT_INSTALL_ZIP_URL:-"https://github.com/oracle/weblogic-deploy-tooling/releases/latest/download/$WDT_INSTALL_ZIP_FILE"} + else + WDT_INSTALL_ZIP_URL=${WDT_INSTALL_ZIP_URL:-"https://github.com/oracle/weblogic-deploy-tooling/releases/download/release-$WDT_VERSION/$WDT_INSTALL_ZIP_FILE"} + fi + + local save_dir=`pwd` + cd $WDT_DIR || return 1 + + echo @@ "Info: Downloading $WDT_INSTALL_ZIP_URL " + download $WDT_INSTALL_ZIP_URL || return 1 + + if [ ! -f $WDT_INSTALL_ZIP_FILE ]; then + cd $save_dir + echo @@ "Error: Download failed or $WDT_INSTALL_ZIP_FILE not found." + return 1 + fi + + echo @@ "Info: Archive downloaded to $WDT_DIR/$WDT_INSTALL_ZIP_FILE, about to unzip via 'jar xf'." + + jar xf $WDT_INSTALL_ZIP_FILE + local jar_res=$? + + cd $save_dir + + if [ $jar_res -ne 0 ]; then + echo @@ "Error: Install failed while unzipping $WDT_DIR/$WDT_INSTALL_ZIP_FILE" + return $jar_res + fi + + if [ ! -d "$WDT_DIR/weblogic-deploy/bin" ]; then + echo @@ "Error: Install failed: directory '$WDT_DIR/weblogic-deploy/bin' not found." + return 1 + fi + + chmod 775 $WDT_DIR/weblogic-deploy/bin/* || return 1 + + echo @@ "Info: Install succeeded, wdt install is in the $WDT_DIR/weblogic-deploy directory." + return 0 +} + +# +# Install WebLogic Image Tool to ${WIT_DIR}. Used by install_wit_if_needed. +# Do not call this function directory. +# +function install_wit { + + WIT_INSTALL_ZIP_FILE="${WIT_INSTALL_ZIP_FILE:-imagetool.zip}" + + if [ "$WIT_VERSION" == "LATEST" ]; then + WIT_INSTALL_ZIP_URL=${WDT_INSTALL_ZIP_URL:-"https://github.com/oracle/weblogic-image-tool/releases/latest/download/$WIT_INSTALL_ZIP_FILE"} + else + WIT_INSTALL_ZIP_URL=${WIT_INSTALL_ZIP_URL:-"https://github.com/oracle/weblogic-image-tool/releases/download/release-$WIT_VERSION/$WIT_INSTALL_ZIP_FILE"} + fi + + + + local save_dir=`pwd` + + echo @@ "imagetool.sh not found in ${imagetoolBinDir}. Installing imagetool..." + + echo @@ "Info: Downloading $WIT_INSTALL_ZIP_URL " + download $WIT_INSTALL_ZIP_URL || return 1 + + if [ ! -f $WIT_INSTALL_ZIP_FILE ]; then + cd $save_dir + echo @@ "Error: Download failed or $WIT_INSTALL_ZIP_FILE not found." + return 1 + fi + echo @@ "Info: Archive downloaded to $WIT_DIR/$WIT_INSTALL_ZIP_FILE, about to unzip via 'jar xf'." + + jar xf $WIT_INSTALL_ZIP_FILE + local jar_res=$? + + cd $save_dir + + if [ $jar_res -ne 0 ]; then + echo @@ "Error: Install failed while unzipping $WIT_DIR/$WIT_INSTALL_ZIP_FILE" + return $jar_res + fi + + if [ ! -d "$WIT_DIR/imagetool/bin" ]; then + echo @@ "Error: Install failed: directory '$WIT_DIR/imagetool/bin' not found." + return 1 + fi + + chmod 775 $WIT_DIR/imagetool/bin/* || return 1 +} + +# +# Checks whether WebLogic Image Tool is already installed under ${WIT_DIR}, and install +# it if not. +# +function install_wit_if_needed { + + local save_dir=`pwd` + + mkdir -p $WIT_DIR || return 1 + cd $WIT_DIR || return 1 + + imagetoolBinDir=$WIT_DIR/imagetool/bin + if [ -f $imagetoolBinDir/imagetool.sh ]; then + echo @@ "Info: imagetool.sh already exist in ${imagetoolBinDir}. Skipping WIT installation." + else + install_wit + fi + + export WLSIMG_CACHEDIR="$WIT_DIR/imagetool-cache" + + # Check existing imageTool cache entry for WDT: + # - if there is already an entry, and the WDT installer file specified in the cache entry exists, skip WDT installation + # - if file in cache entry doesn't exist, delete cache entry, install WDT, and add WDT installer to cache + # - if entry does not exist, install WDT, and add WDT installer to cache + if [ "$WDT_VERSION" == "LATEST" ]; then + wdtCacheVersion="latest" + else + wdtCacheVersion=$WDT_VERSION + fi + + local listItems=$( ${imagetoolBinDir}/imagetool.sh cache listItems | grep "wdt_${wdtCacheVersion}" ) + + if [ ! -z "$listItems" ]; then + local wdt_file_path_in_cache=$(echo $listItems | sed 's/.*=\(.*\)/\1/') + if [ -f "$wdt_file_path_in_cache" ]; then + skip_wdt_install=true + else + echo @@ "Info: imageTool cache contains an entry for WDT zip at $wdt_file_path_in_cache which does not exist. Removing from cache entry." + ${imagetoolBinDir}/imagetool.sh cache deleteEntry \ + --key wdt_${wdtCacheVersion} + fi + fi + + if [ -z "$skip_wdt_install" ]; then + echo @@ "Info: imageTool cache does not contain a valid entry for wdt_${wdtCacheVersion}. Installing WDT" + setup_wdt_shared_dir || return 1 + install_wdt || return 1 + ${imagetoolBinDir}/imagetool.sh cache addInstaller \ + --type wdt \ + --version $WDT_VERSION \ + --path $WDT_DIR/$WDT_INSTALL_ZIP_FILE || return 1 + else + echo @@ "Info: imageTool cache already contains entry ${listItems}. Skipping WDT installation." + fi + + cd $save_dir + + echo @@ "Info: Install succeeded, imagetool install is in the $WIT_DIR/imagetool directory." + return 0 +} + +function encrypt_model { + # + # run encryptModel.sh from WDT to encrypt model and properties files + # + local domainOutputDirFullPath=${1} # full path to directory where the model, encrypt file, and domain properties files are + local model_file=${2} # path to file containing encryption key relative to ${domainOutputDirFullPath} + local encrypt_key_file=${3} # path to file containing encryption key relative to ${domainOutputDirFullPath} + local domain_properties_file=${4} # path to domain properties file relative to ${domainOutputDirFullPath} + local oracle_home="$ORACLE_HOME" + + echo @@ "Info: encrypt passwords in the variables file at ${domainOutputDirFullPath}/${domain_properties_file} using encryption key from create-domain.sh argument written to file: ${encrypt_key_file}" + + cmd=" + cat /shared/${encrypt_key_file} /shared/${encrypt_key_file} | + /wdt/bin/encryptModel.sh \ + -oracle_home ${oracle_home} \ + -model_file /shared/${model_file} \ + -variable_file /shared/${domain_properties_file} + " + echo $cmd > ${domainOutputDirFullPath}/cmd.sh + chmod 755 ${domainOutputDirFullPath}/cmd.sh + echo @@ "Info: Encrypt Model: About to run the following command in container with image ${domainHomeImageBase}:" + cat ${domainOutputDirFullPath}/cmd.sh + + chmod 766 ${domainOutputDirFullPath}/${domain_properties_file} + docker run -it --rm -v ${domainOutputDirFullPath}:/shared -v ${WDT_DIR}/weblogic-deploy:/wdt ${domainHomeImageBase} /bin/bash -c /shared/cmd.sh || return 1 + + # clean up the generated files + rm ${domainOutputDirFullPath}/cmd.sh + + echo @@ "Info: encrypt_model Completed" +} + + diff --git a/OracleSOASuite/kubernetes/create-oracle-db-service/README.md b/OracleSOASuite/kubernetes/create-oracle-db-service/README.md old mode 100644 new mode 100755 diff --git a/OracleSOASuite/kubernetes/create-oracle-db-service/common/checkDbState.sh b/OracleSOASuite/kubernetes/create-oracle-db-service/common/checkDbState.sh old mode 100644 new mode 100755 diff --git a/OracleSOASuite/kubernetes/create-oracle-db-service/common/oracle.db.yaml b/OracleSOASuite/kubernetes/create-oracle-db-service/common/oracle.db.yaml old mode 100644 new mode 100755 diff --git a/OracleSOASuite/kubernetes/create-oracle-db-service/create-image-pull-secret.sh b/OracleSOASuite/kubernetes/create-oracle-db-service/create-image-pull-secret.sh index 1464f9517..2a4b1f9d2 100755 --- a/OracleSOASuite/kubernetes/create-oracle-db-service/create-image-pull-secret.sh +++ b/OracleSOASuite/kubernetes/create-oracle-db-service/create-image-pull-secret.sh @@ -2,7 +2,7 @@ # Copyright (c) 2020, 2021, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. # -# Create ImagePullSecret to pull Oracle DB and FMW Infrastructure Image +# Create ImagePullSecret to pull Oracle DB and OracleSOASuite Image script="${BASH_SOURCE[0]}" scriptDir="$( cd "$( dirname "${script}" )" && pwd )" diff --git a/OracleSOASuite/kubernetes/create-rcu-credentials/README.md b/OracleSOASuite/kubernetes/create-rcu-credentials/README.md old mode 100644 new mode 100755 index bc2bd5bc8..f2d876e1d --- a/OracleSOASuite/kubernetes/create-rcu-credentials/README.md +++ b/OracleSOASuite/kubernetes/create-rcu-credentials/README.md @@ -53,3 +53,4 @@ sys_password: 12 bytes sys_username: 3 bytes username: 4 bytes ``` + diff --git a/OracleSOASuite/kubernetes/create-rcu-schema/README.md b/OracleSOASuite/kubernetes/create-rcu-schema/README.md old mode 100644 new mode 100755 index ed6ce83dd..29dc86927 --- a/OracleSOASuite/kubernetes/create-rcu-schema/README.md +++ b/OracleSOASuite/kubernetes/create-rcu-schema/README.md @@ -21,19 +21,19 @@ This script generates the RCU schema based `schemaPrefix` and `dburl`. The script assumes that either the image, `soasuite:12.2.1.4`, is available in the nodes or an `ImagePullSecret` is created to pull the image. To create a secret, see the script `create-image-pull-secret.sh`. -```shell +``` $ ./create-rcu-schema.sh -h usage: ./create-rcu-schema.sh -s -t -d -i -u -p -n -q -r -o -c [-h] -s RCU Schema Prefix (required) -t RCU Schema Type (optional) - (supported values: osb,soa,soaosb) + (supported values: osb,soa,soaosb,soab2b,soaosbb2b) -d RCU Oracle Database URL (optional) (default: oracle-db.default.svc.cluster.local:1521/devpdb.k8s) - -p FMW Infrastructure ImagePullSecret (optional) + -p OracleSOASuite ImagePullSecret (optional) (default: none) - -i FMW Infrastructure Image (optional) + -i OracleSOASuite Image (optional) (default: soasuite:12.2.1.4) - -u FMW Infrastructure ImagePullPolicy (optional) + -u OracleSOASuite ImagePullPolicy (optional) (default: IfNotPresent) -n Namespace for RCU pod (optional) (default: default) @@ -76,7 +76,7 @@ You can connect to the database in your app using: Class.forName("oracle.jdbc.OracleDriver").newInstance(); java.sql.Connection conn = Driver.connect("sys as sysdba", props); -Creating RCU Schema for FMW Domain ... +Creating RCU Schema for OracleSOASuite Domain ... Extra RCU Schema Component Choosen[] Processing command line .... @@ -128,12 +128,12 @@ Repository Creation Utility - Create : Operation Completed Use this script to drop the RCU schema based `schemaPrefix` and `dburl`. -```shell +``` $ ./drop-rcu-schema.sh -h usage: ./drop-rcu-schema.sh -s -d -n -q -r [-h] -s RCU Schema Prefix (required) -t RCU Schema Type (optional) - (supported values: osb,soa,soaosb) + (supported values: osb,soa,soaosb,soab2b,soaosbb2b) -d Oracle Database URL (optional) (default: oracle-db.default.svc.cluster.local:1521/devpdb.k8s) -n Namespace where RCU pod is deployed (optional) @@ -166,7 +166,7 @@ You can connect to the database in your app using: Class.forName("oracle.jdbc.OracleDriver").newInstance(); java.sql.Connection conn = Driver.connect("sys as sysdba", props); -Dropping RCU Schema for FMW Domain ... +Dropping RCU Schema for OracleSOASuite Domain ... Extra RCU Schema Component(s) Choosen[] Processing command line .... @@ -216,3 +216,4 @@ Pod [rcu] removed from nameSpace [default] ## Stop an Oracle Database service in a Kubernetes cluster Use the script ``samples/scripts/create-oracle-db-service/stop-db-service.sh`` + diff --git a/OracleSOASuite/kubernetes/create-rcu-schema/common/rcu.yaml b/OracleSOASuite/kubernetes/create-rcu-schema/common/rcu.yaml old mode 100644 new mode 100755 diff --git a/OracleSOASuite/kubernetes/create-rcu-schema/common/template/rcu.yaml.template b/OracleSOASuite/kubernetes/create-rcu-schema/common/template/rcu.yaml.template old mode 100644 new mode 100755 diff --git a/OracleSOASuite/kubernetes/create-rcu-schema/create-rcu-schema.sh b/OracleSOASuite/kubernetes/create-rcu-schema/create-rcu-schema.sh index 90746fdb1..bfc9fceb7 100755 --- a/OracleSOASuite/kubernetes/create-rcu-schema/create-rcu-schema.sh +++ b/OracleSOASuite/kubernetes/create-rcu-schema/create-rcu-schema.sh @@ -9,17 +9,17 @@ scriptDir="$( cd "$( dirname "${script}" )" && pwd )" source ${scriptDir}/../common/utility.sh function usage { - echo "usage: ${script} -s -t -d -i -u -p -n -q -r -o -c [-h]" + echo "usage: ${script} -s -t -d -i -u -p -n -q -r -o -c [-l] [-h] " echo " -s RCU Schema Prefix (required)" echo " -t RCU Schema Type (optional)" - echo " (supported values: osb,soa,soaosb)" + echo " (supported values: osb,soa,soaosb,soab2b,soaosbb2b)" echo " -d RCU Oracle Database URL (optional) " echo " (default: oracle-db.default.svc.cluster.local:1521/devpdb.k8s) " - echo " -p FMW Infrastructure ImagePullSecret (optional) " + echo " -p OracleSOASuite ImagePullSecret (optional) " echo " (default: none) " - echo " -i FMW Infrastructure Image (optional) " + echo " -i OracleSOASuite Image (optional) " echo " (default: soasuite:12.2.1.4) " - echo " -u FMW Infrastructure ImagePullPolicy (optional) " + echo " -u OracleSOASuite ImagePullPolicy (optional) " echo " (default: IfNotPresent) " echo " -n Namespace for RCU pod (optional)" echo " (default: default)" @@ -29,13 +29,57 @@ function usage { echo " (default: Oradoc_db1)" echo " -o Output directory for the generated YAML file. (optional)" echo " (default: rcuoutput)" - echo " -c Comma-separated variables in the format variablename=value. (optional)." + echo " -c Comma-separated custom variables in the format variablename=value. (optional)." echo " (default: none)" + echo " -l Timeout limit in seconds. (optional)." + echo " (default: 300)" echo " -h Help" exit $1 } -while getopts ":h:s:d:p:i:t:n:q:r:o:u:c:" opt; do +# Checks if all container(s) in a pod are running state based on READY column using given timeout limit +# NAME READY STATUS RESTARTS AGE +# domain1-adminserver 1/1 Running 0 4m +function checkPodStateUsingCustomTimeout(){ + + status="NotReady" + count=1 + + pod=$1 + ns=$2 + state=${3:-1/1} + timeoutLimit=${4:-300} + max=`expr ${timeoutLimit} / 5` + + echo "Checking Pod READY column for State [$state]" + pname=`kubectl get po -n ${ns} | grep -w ${pod} | awk '{print $1}'` + if [ -z ${pname} ]; then + echo "No such pod [$pod] exists in NameSpace [$ns] " + exit -1 + fi + + rcode=`kubectl get po ${pname} -n ${ns} | grep -w ${pod} | awk '{print $2}'` + [[ ${rcode} -eq "${state}" ]] && status="Ready" + + while [ ${status} != "Ready" -a $count -le $max ] ; do + sleep 5 + rcode=`kubectl get po/$pod -n ${ns} | grep -v NAME | awk '{print $2}'` + [[ ${rcode} -eq "1/1" ]] && status="Ready" + echo "Pod [$1] Status is ${status} Iter [$count/$max]" + count=`expr $count + 1` + done + if [ $count -gt $max ] ; then + echo "[ERROR] Unable to start the Pod [$pod] after ${timeout}s "; + exit 1 + fi + + pname=`kubectl get po -n ${ns} | grep -w ${pod} | awk '{print $1}'` + kubectl -n ${ns} get po ${pname} +} + +timeout=300 + +while getopts ":h:s:d:p:i:t:n:q:r:o:u:c:l:" opt; do case $opt in s) schemaPrefix="${OPTARG}" ;; @@ -59,6 +103,8 @@ while getopts ":h:s:d:p:i:t:n:q:r:o:u:c:" opt; do ;; c) customVariables="${OPTARG}" ;; + l) timeout="${OPTARG}" + ;; h) usage 0 ;; *) usage 1 @@ -112,6 +158,10 @@ if [ -z ${customVariables} ]; then customVariables="none" fi +if [ -z ${timeout} ]; then + timeout=300 +fi + echo "ImagePullSecret[$pullsecret] Image[${fmwimage}] dburl[${dburl}] rcuType[${rcuType}] customVariables[${customVariables}]" mkdir -p ${rcuOutputDir} @@ -130,7 +180,7 @@ kubectl apply -f $rcuYaml # Make sure the rcu deployment Pod is RUNNING checkPod rcu $namespace -checkPodState rcu $namespace "1/1" +checkPodStateUsingCustomTimeout rcu $namespace "1/1" ${timeout} sleep 5 kubectl get po/rcu -n $namespace diff --git a/OracleSOASuite/kubernetes/create-rcu-schema/drop-rcu-schema.sh b/OracleSOASuite/kubernetes/create-rcu-schema/drop-rcu-schema.sh index 48c4b3858..ad7a4c919 100755 --- a/OracleSOASuite/kubernetes/create-rcu-schema/drop-rcu-schema.sh +++ b/OracleSOASuite/kubernetes/create-rcu-schema/drop-rcu-schema.sh @@ -12,7 +12,7 @@ function usage { echo "usage: ${script} -s -d -n -q -r -c [-h]" echo " -s RCU Schema Prefix (required)" echo " -t RCU Schema Type (optional)" - echo " (supported values: osb,soa,soaosb) " + echo " (supported values: osb,soa,soaosb,soab2b,soaosbb2b) " echo " -d Oracle Database URL (optional)" echo " (default: oracle-db.default.svc.cluster.local:1521/devpdb.k8s) " echo " -n Namespace where RCU pod is deployed (optional)" @@ -21,7 +21,7 @@ function usage { echo " (default: Oradoc_db1)" echo " -r password for all schema owner (regular user). (optional)" echo " (default: Oradoc_db1)" - echo " -c Comma-separated variables in the format variablename=value. (optional)." + echo " -c Comma-separated custom variables in the format variablename=value. (optional)." echo " (default: none)" echo " -h Help" exit $1 @@ -105,3 +105,4 @@ fi kubectl delete pod rcu -n ${namespace} checkPodDelete rcu ${namespace} + diff --git a/OracleSOASuite/kubernetes/create-soa-domain/domain-home-on-pv/create-domain-inputs.yaml b/OracleSOASuite/kubernetes/create-soa-domain/domain-home-on-pv/create-domain-inputs.yaml old mode 100644 new mode 100755 index 23113a478..3d63795a2 --- a/OracleSOASuite/kubernetes/create-soa-domain/domain-home-on-pv/create-domain-inputs.yaml +++ b/OracleSOASuite/kubernetes/create-soa-domain/domain-home-on-pv/create-domain-inputs.yaml @@ -156,7 +156,7 @@ createDomainScriptsMountPath: /u01/weblogic # `createDomainScriptsMountPath` property. # # If you need to provide your own scripts to create the domain home, instead of using the -# built-it scripts, you must use this property to set the name of the script that you want +# built-in scripts, you must use this property to set the name of the script that you want # the create domain job to run. createDomainScriptName: create-domain-job.sh diff --git a/OracleSOASuite/kubernetes/create-soa-domain/domain-home-on-pv/create-domain-job-template.yaml b/OracleSOASuite/kubernetes/create-soa-domain/domain-home-on-pv/create-domain-job-template.yaml old mode 100644 new mode 100755 diff --git a/OracleSOASuite/kubernetes/create-soa-domain/domain-home-on-pv/delete-domain-job-template.yaml b/OracleSOASuite/kubernetes/create-soa-domain/domain-home-on-pv/delete-domain-job-template.yaml old mode 100644 new mode 100755 diff --git a/OracleSOASuite/kubernetes/create-soa-domain/domain-home-on-pv/deploy-artifacts-inputs.yaml b/OracleSOASuite/kubernetes/create-soa-domain/domain-home-on-pv/deploy-artifacts-inputs.yaml new file mode 100644 index 000000000..a8865d435 --- /dev/null +++ b/OracleSOASuite/kubernetes/create-soa-domain/domain-home-on-pv/deploy-artifacts-inputs.yaml @@ -0,0 +1,108 @@ +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +# The version of this inputs file. Do not modify. +version: deploy-artifacts-inputs-v1 + +# Port number for admin server +adminPort: 7001 + +# Name of the Admin Server +adminServerName: AdminServer + +# Unique ID identifying a domain. +# This ID must not contain an underscope ("_"), and must be lowercase and unique across all domains in a Kubernetes cluster. +domainUID: soainfra + +# Domain Type must be specified depending on the usecase. Based on this value deployment is triggered on Cluster. +# In case of SOASuite domains, the supported Domain types are soa,osb,soaosb. +# soa : Deploys artifacts into a SOA Domain +# osb : Deploys artifcats into an OSB Domain (Oracle Service Bus) +# soaosb : Deploys artifacts into both SOA and OSB Domain +domainType: soa + +# SOA Cluster name +soaClusterName: soa_cluster + +# Port number for each SOA managed server +soaManagedServerPort: 8001 + +# SOA Suite image. +# Artifacts deployment requires SOA Suite 12.2.1.4.0. +# Refer to https://oracle.github.io/fmw-kubernetes/soa-domains/userguide/prepare-your-environment/#obtaining-the-soa-suite-docker-image/ for details on how to obtain or create the image. +image: soasuite:12.2.1.4 + +# Image pull policy +# Legal values are "IfNotPresent", "Always", or "Never" +imagePullPolicy: IfNotPresent + +# Name of the Kubernetes secret to access the container registry to pull the SOA Suite image +# The presence of the secret will be validated when this parameter is enabled. +#imagePullSecretName: + +# Name of the Kubernetes secret for the Admin Server's username and password +# The name must be lowercase. +weblogicCredentialsSecretName: soainfra-domain-credentials + +# Name of the domain namespace +namespace: soans + +# The deploy artifacts source type +# Set to PersistentVolume for deploy artifacts available in Persistent Volume and Image for deploy artifacts available as an image +#artifactsSourceType: PersistentVolume +artifactsSourceType: Image + +# Name of the persistent volume claim +# Required if ArtifactsSourceType is PersistentVolume +#persistentVolumeClaimName: soainfra-deploy-artifacts-pvc + +# Deploy artifacts image +# Required if ArtifactsSourceType is Image +artifactsImage: artifacts:12.2.1.4 + +# Image pull policy +# Legal values are "IfNotPresent", "Always", or "Never" +artifactsImagePullPolicy: IfNotPresent + +# Name of the Kubernetes secret to access the container registry to pull the Artifacts image +# The presence of the secret will be validated when this parameter is enabled. +#artifactsImagePullSecretName: + +# Mount path where the deploy artifacts scripts are located inside a pod +# +# The `deploy-artifacts.sh` script creates a Kubernetes job to run the script (specified in the +# `deployScriptName` property) in a Kubernetes pod to deploy artifacts into respective cluster. Files +# in the `deployScriptFilesDir` directory are mounted to this location in the pod, so that +# a Kubernetes pod can use the scripts and supporting files to deploy artifacts. +deployScriptsMountPath: /u01/weblogic + +# Script that deploys artifacts into the Cluster +# +# The `deploy-artifacts.sh` script creates a Kubernetes job to run this script to deploy +# artifacts into respective cluster. The script is located in the in-pod directory that +# is specified in the `deployScriptsMountPath` property. +# +# If you need to provide your own scripts to deploy the artifacts, instead of using the +# built-in scripts, you must use this property to set the name of the script that you want +# the deploy artifacts job to run. +deployScriptName: deploy.sh + +# Directory on the host machine to locate all the files to deploy artifacts +# It contains the script that is specified in the `deployScriptName` property. +# +# By default, this directory is set to the relative path `deploy`, and the deploy artifacts script will +# use the built-in deploy.sh scripts in the `deploy` directory to deploy artifacts into the cluster. +# +# An absolute path is also supported to point to an arbitrary directory in the file system. +# +# The built-in scripts can be replaced by the user-provided scripts or files as long +# as those files are in the specified directory. Files in this directory are put into a +# Kubernetes config map, which in turn is mounted to the `deployScriptsMountPath`, +# so that the Kubernetes pod can use the scripts and supporting files for deploying artifacts. +deployScriptFilesDir: deploy + +# Directory inside container where SOA Archives are placed +soaArtifactsArchivePath: /u01/sarchives + +# Directory inside container where Service Bus Archives are placed +osbArtifactsArchivePath: /u01/sbarchives diff --git a/OracleSOASuite/kubernetes/create-soa-domain/domain-home-on-pv/deploy-artifacts-job-template.yaml b/OracleSOASuite/kubernetes/create-soa-domain/domain-home-on-pv/deploy-artifacts-job-template.yaml new file mode 100644 index 000000000..0e20279aa --- /dev/null +++ b/OracleSOASuite/kubernetes/create-soa-domain/domain-home-on-pv/deploy-artifacts-job-template.yaml @@ -0,0 +1,84 @@ +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +apiVersion: batch/v1 +kind: Job +metadata: + name: %DOMAIN_UID%-deploy-artifacts-job-%RUN_UID% + namespace: %NAMESPACE% +spec: + template: + metadata: + labels: + weblogic.domainUID: %DOMAIN_UID% + weblogic.domainName: %DOMAIN_NAME% + app: %DOMAIN_UID%-deploy-artifacts-job + spec: + restartPolicy: Never + %ARTIFACTS_IN_IMAGE_PREFIX%initContainers: + %ARTIFACTS_IN_IMAGE_PREFIX%- name: prepare-artifacts + %ARTIFACTS_IN_IMAGE_PREFIX% image: %ARTIFACTS_IMAGE% + %ARTIFACTS_IN_IMAGE_PREFIX% imagePullPolicy: %ARTIFACTS_IMAGE_PULL_POLICY% + %ARTIFACTS_IN_IMAGE_PREFIX% command: [ "/bin/sh" ] + %ARTIFACTS_IN_IMAGE_PREFIX% args: ["-c","cp -rf %SOA_ARTIFACTS_ARCHIVE_PATH% /artifacts/soa; cp -rf %OSB_ARTIFACTS_ARCHIVE_PATH% /artifacts/osb; ls -R /artifacts" ] + %ARTIFACTS_IN_IMAGE_PREFIX% volumeMounts: + %ARTIFACTS_IN_IMAGE_PREFIX% - mountPath: "/artifacts" + %ARTIFACTS_IN_IMAGE_PREFIX% name: deploy-artifacts-job-storage-volume + containers: + %OSB_DEPLOY_PREFIX%- name: osb-deploy-artifacts-job + %OSB_DEPLOY_PREFIX% image: "%WEBLOGIC_IMAGE%" + %OSB_DEPLOY_PREFIX% imagePullPolicy: %WEBLOGIC_IMAGE_PULL_POLICY% + %OSB_DEPLOY_PREFIX% volumeMounts: + %OSB_DEPLOY_PREFIX% - mountPath: "%DEPLOY_ARTIFACTS_SCRIPT_DIR%" + %OSB_DEPLOY_PREFIX% name: deploy-scripts-osb-job-cm-volume + %OSB_DEPLOY_PREFIX% - mountPath: "%OSB_ARTIFACTS_ARCHIVE_PATH%" + %OSB_DEPLOY_PREFIX% name: deploy-artifacts-job-storage-volume + %OSB_DEPLOY_PREFIX% subPath: osb + %OSB_DEPLOY_PREFIX% - mountPath: /weblogic-operator/secrets + %OSB_DEPLOY_PREFIX% name: soa-infra-credentials-volume + %OSB_DEPLOY_PREFIX% command: ["/bin/sh"] + %OSB_DEPLOY_PREFIX% args: ["%DEPLOY_ARTIFACTS_SCRIPT_DIR%/%DEPLOY_SCRIPT%"] + %OSB_DEPLOY_PREFIX% env: + %OSB_DEPLOY_PREFIX% - name: DOMAIN_UID + %OSB_DEPLOY_PREFIX% value: "%DOMAIN_UID%" + %OSB_DEPLOY_PREFIX% - name: ADMIN_SERVER_NAME_SVC + %OSB_DEPLOY_PREFIX% value: "%ADMIN_SERVER_NAME_SVC%" + %OSB_DEPLOY_PREFIX% - name: ADMIN_LISTEN_PORT + %OSB_DEPLOY_PREFIX% value: "%ADMIN_PORT%" + %SOA_DEPLOY_PREFIX%- name: soa-deploy-artifacts-job + %SOA_DEPLOY_PREFIX% image: "%WEBLOGIC_IMAGE%" + %SOA_DEPLOY_PREFIX% imagePullPolicy: %WEBLOGIC_IMAGE_PULL_POLICY% + %SOA_DEPLOY_PREFIX% volumeMounts: + %SOA_DEPLOY_PREFIX% - mountPath: "%DEPLOY_ARTIFACTS_SCRIPT_DIR%" + %SOA_DEPLOY_PREFIX% name: deploy-scripts-soa-job-cm-volume + %SOA_DEPLOY_PREFIX% - mountPath: "%SOA_ARTIFACTS_ARCHIVE_PATH%" + %SOA_DEPLOY_PREFIX% name: deploy-artifacts-job-storage-volume + %SOA_DEPLOY_PREFIX% subPath: soa + %SOA_DEPLOY_PREFIX% - mountPath: /weblogic-operator/secrets + %SOA_DEPLOY_PREFIX% name: soa-infra-credentials-volume + %SOA_DEPLOY_PREFIX% command: ["/bin/sh"] + %SOA_DEPLOY_PREFIX% args: ["%DEPLOY_ARTIFACTS_SCRIPT_DIR%/%DEPLOY_SCRIPT%"] + %SOA_DEPLOY_PREFIX% env: + %SOA_DEPLOY_PREFIX% - name: DOMAIN_UID + %SOA_DEPLOY_PREFIX% value: "%DOMAIN_UID%" + %SOA_DEPLOY_PREFIX% - name: SOA_MANAGED_SERVER_PORT + %SOA_DEPLOY_PREFIX% value: "%SOA_MANAGED_SERVER_PORT%" + %SOA_DEPLOY_PREFIX% - name: SOA_CLUSTER_NAME + %SOA_DEPLOY_PREFIX% value: "%SOA_CLUSTER_NAME%" + volumes: + - name: deploy-artifacts-job-storage-volume + %ARTIFACTS_IN_PV_PREFIX% persistentVolumeClaim: + %ARTIFACTS_IN_PV_PREFIX% claimName: "%ARCHIVES_PVC_NAME%" + %ARTIFACTS_IN_IMAGE_PREFIX% emptyDir: {} + %SOA_DEPLOY_PREFIX%- name: deploy-scripts-soa-job-cm-volume + %SOA_DEPLOY_PREFIX% configMap: + %SOA_DEPLOY_PREFIX% name: %DOMAIN_UID%-deploy-scripts-soa-job-cm + %OSB_DEPLOY_PREFIX%- name: deploy-scripts-osb-job-cm-volume + %OSB_DEPLOY_PREFIX% configMap: + %OSB_DEPLOY_PREFIX% name: %DOMAIN_UID%-deploy-scripts-osb-job-cm + - name: soa-infra-credentials-volume + secret: + secretName: %WEBLOGIC_CREDENTIALS_SECRET_NAME% + %IMAGE_PULL_SECRET_PREFIX%imagePullSecrets: + %WEBLOGIC_IMAGE_PULL_SECRET_PREFIX%- name: %WEBLOGIC_IMAGE_PULL_SECRET_NAME% + %ARTIFACTS_IMAGE_PULL_SECRET_PREFIX%- name: %ARTIFACTS_IMAGE_PULL_SECRET_NAME% + diff --git a/OracleSOASuite/kubernetes/create-soa-domain/domain-home-on-pv/deploy-artifacts.sh b/OracleSOASuite/kubernetes/create-soa-domain/domain-home-on-pv/deploy-artifacts.sh new file mode 100755 index 000000000..0efa13b2a --- /dev/null +++ b/OracleSOASuite/kubernetes/create-soa-domain/domain-home-on-pv/deploy-artifacts.sh @@ -0,0 +1,441 @@ +#!/usr/bin/env bash +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# Description +# This sample script Deploys SOA Suite and Serivce Bus artifacts available in an existing PV/PVC into SOA Suite domain. + +# The deploy artifacts inputs can be customized by editing deploy-artifcts-inputs.yaml +# +# The following pre-requisites must be handled prior to running this script: +# * The SOA Suite domain must already be created and servers must be up and running. +# * The Kubernetes secrets 'username' and 'password' of the admin account have been available in the namespace +# * The host directory that will be used as the persistent volume must already exist +# and have the appropriate file permissions set. +# * The Kubernetes persistent volume must already be created +# * The Kubernetes persistent volume claim must already be created +# + +# Initialize +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" +runId=$(date +%Y%m%d-%H%M%S) +# source weblogic operator provided common utility scripts +source ${scriptDir}/../../common/utility.sh +source ${scriptDir}/../../common/validate.sh +# source SOA specific utility scripts +source ${scriptDir}/../utils/utility.sh +source ${scriptDir}/../utils/validate.sh + +function usage { + echo usage: ${script} -o dir -i file [-v] [-t] [-h] + echo " -i Parameter inputs file, must be specified." + echo " -o Output directory for the generated yaml files, must be specified." + echo " -v Validate the existence of persistentVolumeClaim, optional." + echo " -t Timeout (in seconds) for deploy artifacts job execution, optional." + echo " -h Help" + exit $1 +} + +# +# Parse the command line options +# +doValidation=false +timeout=600 +while getopts "vhi:o:t:" opt; do + case $opt in + i) valuesInputFile="${OPTARG}" + ;; + o) outputDir="${OPTARG}" + ;; + v) doValidation=true + ;; + t) timeout="${OPTARG}" + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +if [ -z ${valuesInputFile} ]; then + echo "${script}: -i must be specified." + missingRequiredOption="true" +fi + +if [ -z ${outputDir} ]; then + echo "${script}: -o must be specified." + missingRequiredOption="true" +fi + +if [ "${missingRequiredOption}" == "true" ]; then + usage 1 +fi + +if [ -z ${timeout} ]; then + timeout=600 +fi + +# +# Function to initialize and validate the output directory +# for the generated yaml files for this domain. +# +function initOutputDir { + deployOutputDir="${outputDir}/deploy-artifacts/${domainUID}/${runId}" + # Create a directory for the output files + mkdir -p ${deployOutputDir} + + removeFileIfExists ${deployOutputDir}/${valuesInputFile} + removeFileIfExists ${deployOutputDir}/deploy-artifacts-inputs.yaml + removeFileIfExists ${deployOutputDir}/deploy-artifacts-job.yaml +} + + +# +# Function to setup the environment to run the create domain job +# +function initialize { + + parseCommonInputs + adminServerNameSVC=$(toDNS1123Legal $adminServerName) + soaClusterNameSVC=$(toDNS1123Legal $soaClusterName) + # Validate the required files exist + validateErrors=false + + validateKubectlAvailable + + if [ -z "${valuesInputFile}" ]; then + validationError "You must use the -i option to specify the name of the inputs parameter file (a modified copy of kubernetes/create-soa-domain/domain-home-on-pv/deploy-artifacts-inputs.yaml)." + else + if [ ! -f ${valuesInputFile} ]; then + validationError "Unable to locate the input parameters file ${valuesInputFile}" + fi + fi + + if [ -z "${outputDir}" ]; then + validationError "You must use the -o option to specify the name of an existing directory to store the generated yaml files in." + fi + + createJobInput="${scriptDir}/deploy-artifacts-job-template.yaml" + if [ ! -f ${createJobInput} ]; then + validationError "The template file ${createJobInput} for deploying artifacts to SOA domain was not found" + fi + + failIfValidationErrors + + + initOutputDir + +} + + +function createFiles { + + copyInputsFileToOutputDirectory ${valuesInputFile} "${deployOutputDir}/deploy-artifacts-inputs.yaml" + domainName=${domainUID} + + enabledPrefix="" # uncomment the feature + disabledPrefix="# " # comment out the feature + + if [ -z "${artifactsSourceType}" ]; then + artifactsSourceType="PersistentVolume" + fi + + if [ -z "${weblogicCredentialsSecretName}" ]; then + weblogicCredentialsSecretName="${domainUID}-weblogic-credentials" + fi + soaDeployPrefix=${disabledPrefix} + osbDeployPrefix=${disabledPrefix} + artifactsInPvPrefix=${disabledPrefix} + artifactsInImagePrefix=${disabledPrefix} + artifactsImagePullSecretPrefix=${disabledPrefix} + weblogicImagePullSecretPrefix=${disabledPrefix} + imagePullSecretPrefix=${disabledPrefix} + + if [[ $artifactsSourceType == "PersistentVolume" ]]; then + artifactsImage="" + artifactsInPvPrefix=${enabledPrefix} + if [ -z "${persistentVolumeClaimName}" ]; then + persistentVolumeClaimName="${domainUID}-deploy-artifacts-pvc" + fi + fi + + if [[ $artifactsSourceType == "Image" ]]; then + validateArtifactsImagePullSecretName + artifactsInImagePrefix=${enabledPrefix} + if [ -z "${artifactsImage}" ]; then + artifactsImage="artifacts:12.2.1.4" + fi + artifactsImage=$(echo ${artifactsImage} | sed -e "s/\:/\\\:/g") + if [ ! -z ${artifactsImagePullSecretName} ]; then + artifactsImagePullSecretPrefix=${enabledPrefix} + imagePullSecretPrefix=${enabledPrefix} + fi + fi + + if [[ $domainType =~ "soa" ]]; then + soaDeployPrefix=${enabledPrefix} + fi + if [[ $domainType =~ "osb" ]]; then + osbDeployPrefix=${enabledPrefix} + fi + createJobOutput="${deployOutputDir}/deploy-artifacts-job.yaml" + # Use the default value if not defined. + if [ -z "${deployScriptsMountPath}" ]; then + deployScriptsMountPath="/u01/weblogic" + fi + + if [ ! -z "${imagePullSecretName}" ]; then + weblogicImagePullSecretPrefix=${enabledPrefix} + imagePullSecretPrefix=${enabledPrefix} + fi + # Must escape the ':' value in image for sed to properly parse and replace + image=$(echo ${image} | sed -e "s/\:/\\\:/g") + + # Generate the yaml to create the kubernetes job that will deploy the artifacts + echo Generating ${createJobOutput} + + cp ${createJobInput} ${createJobOutput} + sed -i -e "s:%NAMESPACE%:$namespace:g" ${createJobOutput} + sed -i -e "s:%RUN_UID%:$runId:g" ${createJobOutput} + sed -i -e "s:%WEBLOGIC_CREDENTIALS_SECRET_NAME%:${weblogicCredentialsSecretName}:g" ${createJobOutput} + sed -i -e "s:%WEBLOGIC_IMAGE%:${image}:g" ${createJobOutput} + sed -i -e "s:%WEBLOGIC_IMAGE_PULL_POLICY%:${imagePullPolicy}:g" ${createJobOutput} + sed -i -e "s:%WEBLOGIC_IMAGE_PULL_SECRET_NAME%:${imagePullSecretName}:g" ${createJobOutput} + sed -i -e "s:%IMAGE_PULL_SECRET_PREFIX%:${imagePullSecretPrefix}:g" ${createJobOutput} + sed -i -e "s:%WEBLOGIC_IMAGE_PULL_SECRET_PREFIX%:${weblogicImagePullSecretPrefix}:g" ${createJobOutput} + sed -i -e "s:%DOMAIN_UID%:${domainUID}:g" ${createJobOutput} + sed -i -e "s:%DOMAIN_NAME%:${domainName}:g" ${createJobOutput} + sed -i -e "s:%ADMIN_SERVER_NAME_SVC%:${adminServerNameSVC}:g" ${createJobOutput} + sed -i -e "s:%ADMIN_PORT%:${adminPort}:g" ${createJobOutput} + sed -i -e "s:%SOA_DEPLOY_PREFIX%:${soaDeployPrefix}:g" ${createJobOutput} + sed -i -e "s:%SOA_MANAGED_SERVER_PORT%:${soaManagedServerPort}:g" ${createJobOutput} + sed -i -e "s:%OSB_DEPLOY_PREFIX%:${osbDeployPrefix}:g" ${createJobOutput} + sed -i -e "s:%SOA_CLUSTER_NAME%:${soaClusterNameSVC}:g" ${createJobOutput} + sed -i -e "s:%OSB_CLUSTER_NAME%:${osbClusterName}:g" ${createJobOutput} + sed -i -e "s:%ARCHIVES_PVC_NAME%:${persistentVolumeClaimName}:g" ${createJobOutput} + sed -i -e "s:%DEPLOY_ARTIFACTS_SCRIPT_DIR%:${deployScriptsMountPath}:g" ${createJobOutput} + sed -i -e "s:%DEPLOY_SCRIPT%:${deployScriptName}:g" ${createJobOutput} + sed -i -e "s:%ARTIFACTS_IN_IMAGE_PREFIX%:${artifactsInImagePrefix}:g" ${createJobOutput} + sed -i -e "s:%ARTIFACTS_IN_PV_PREFIX%:${artifactsInPvPrefix}:g" ${createJobOutput} + sed -i -e "s:%ARTIFACTS_IMAGE%:${artifactsImage}:g" ${createJobOutput} + sed -i -e "s:%ARTIFACTS_IMAGE_PULL_POLICY%:${artifactsImagePullPolicy}:g" ${createJobOutput} + sed -i -e "s:%ARTIFACTS_IMAGE_PULL_SECRET_NAME%:${artifactsImagePullSecretName}:g" ${createJobOutput} + sed -i -e "s:%ARTIFACTS_IMAGE_PULL_SECRET_PREFIX%:${artifactsImagePullSecretPrefix}:g" ${createJobOutput} + sed -i -e "s:%SOA_ARTIFACTS_ARCHIVE_PATH%:${soaArtifactsArchivePath}:g" ${createJobOutput} + sed -i -e "s:%OSB_ARTIFACTS_ARCHIVE_PATH%:${osbArtifactsArchivePath}:g" ${createJobOutput} + # Remove any "...yaml-e" and "...properties-e" files left over from running sed + rm -f ${deployOutputDir}/*.yaml-e + rm -f ${deployOutputDir}/*.properties-e + + +} + +# create configmap using what is in the deployScriptFilesDir +function deployConfigmap { + # Use the default files if deployScriptFilesDir is not specified + if [ -z "${deployScriptFilesDir}" ]; then + deployFilesDir=${scriptDir}/deploy + elif [[ ! ${deployScriptFilesDir} == /* ]]; then + deployFilesDir=${scriptDir}/${deployScriptFilesDir} + fi + + + if [[ "$domainType" =~ "soa" ]]; then + # customize the files with domain information + soaExternalFilesTmpDir=$deployOutputDir/soa + mkdir -p $soaExternalFilesTmpDir + cp ${deployScriptFilesDir}/soa/* ${soaExternalFilesTmpDir}/ + cp ${deployOutputDir}/deploy-artifacts-inputs.yaml ${soaExternalFilesTmpDir}/ + local cmName=${domainUID}-deploy-scripts-soa-job-cm + kubectl create configmap ${cmName} -n $namespace --from-file $soaExternalFilesTmpDir --dry-run=client -o yaml | kubectl apply -f - + echo Checking the configmap $cmName was created + local num=`kubectl get cm -n $namespace | grep ${cmName} | wc | awk ' { print $1; } '` + if [ "$num" != "1" ]; then + fail "The configmap ${cmName} was not created" + fi + + kubectl label configmap ${cmName} --overwrite=true -n $namespace weblogic.domainUID=$domainUID weblogic.domainName=$domainName + rm -rf $soaExternalFilesTmpDir + fi + + if [[ "$domainType" =~ "osb" ]]; then + # customize the files with domain information + osbExternalFilesTmpDir=$deployOutputDir/osb + mkdir -p $osbExternalFilesTmpDir + cp ${deployScriptFilesDir}/osb/* ${osbExternalFilesTmpDir}/ + cp ${deployOutputDir}/deploy-artifacts-inputs.yaml ${osbExternalFilesTmpDir}/ + local cmName=${domainUID}-deploy-scripts-osb-job-cm + kubectl create configmap ${cmName} -n $namespace --from-file $osbExternalFilesTmpDir --dry-run=client -o yaml | kubectl apply -f - + echo Checking the configmap $cmName was created + local num=`kubectl get cm -n $namespace | grep ${cmName} | wc | awk ' { print $1; } '` + if [ "$num" != "1" ]; then + fail "The configmap ${cmName} was not created" + fi + + kubectl label configmap ${cmName} --overwrite=true -n $namespace weblogic.domainUID=$domainUID weblogic.domainName=$domainName + rm -rf $osbExternalFilesTmpDir + fi +} + + +# Clean up the configmaps after deployments +function cleanUpConfigMaps { + + if [[ "$domainType" =~ "soa" ]]; then + local cmName=${domainUID}-deploy-scripts-soa-job-cm + kubectl delete configmap ${cmName} -n $namespace + fi + + if [[ "$domainType" =~ "osb" ]]; then + local cmName=${domainUID}-deploy-scripts-osb-job-cm + kubectl delete configmap ${cmName} -n $namespace + fi + +} + +function startDeployArtifacts { + # Setup the environment for running this script and perform initial validation checks + initialize + + # Generate files for creating the domain + createFiles + + # Check that the domain secret exists and contains the required elements + validateDomainSecret + + # Validate the persistent volume claim + if [ "${doValidation}" == true ] && [ "${artifactsSourceType}" == "PersistentVolume" ]; then + validateArtifactsPVC + fi + + # Deploy the artifacts + deployArtifacts + + # cleanup the configmaps which holds the deployment scripts + cleanUpConfigMaps + + # Print a summary + printSummary + +} + +function checkForErrors { + + domain=$1 + CONTAINER_NAME_SUFFIX="deploy-artifacts-job" + JOB_NAME="${domainUID}-${CONTAINER_NAME_SUFFIX}-${runId}" + CONTAINER_NAME="${domain}-${CONTAINER_NAME_SUFFIX}" + CONTAINER_ERRORS=`kubectl logs jobs/${JOB_NAME} ${CONTAINER_NAME} -n ${namespace} | grep "ERROR:" ` + CONTAINER_ERR_COUNT=`echo ${CONTAINER_ERRORS} | grep "ERROR:" | wc | awk ' {print $1; }'` + if [ "$CONTAINER_ERR_COUNT" != "0" ]; then + echo "A failure was detected in the log file for job ${JOB_NAME}." + echo "$CONTAINER_ERRORS" + echo "Check the log output for additional information." + fail "Exiting due to failure - the job has failed!" + fi + +} +# +# Function to run the job that deploy the artifacts +# +function deployArtifacts { + + # create the config map for the job + deployConfigmap + + # There is no way to re-run a kubernetes job, so first delete any prior job + CONTAINER_NAME_SUFFIX="deploy-artifacts-job" + JOB_NAME="${domainUID}-${CONTAINER_NAME_SUFFIX}-${runId}" + if [[ $domainType =~ "soa" ]]; then + SOA_CONTAINER_NAME="soa-${CONTAINER_NAME_SUFFIX}" + fi + if [[ $domainType =~ "osb" ]]; then + OSB_CONTAINER_NAME="osb-${CONTAINER_NAME_SUFFIX}" + fi + + deleteK8sObj job $JOB_NAME ${createJobOutput} + + echo Deploying artifacts by creating the job ${createJobOutput} + kubectl create -f ${createJobOutput} + + echo "Waiting for the job to complete..." + JOB_STATUS="0" + max=`expr ${timeout} / 30` + count=0 + while [ "$JOB_STATUS" != "Completed" -a $count -lt $max ] ; do + sleep 30 + count=`expr $count + 1` + JOBS=`kubectl get pods -n ${namespace} | grep ${JOB_NAME}` + JOB_STATUS=`echo $JOBS | awk ' { print $3; } '` + JOB_INFO=`echo $JOBS | awk ' { print "pod", $1, "status is", $3; } '` + echo "status on iteration $count of $max for $domainUID" + echo "$JOB_INFO" + # Terminate the retry loop when a fatal error has already occurred. Search for "ERROR:" in the job log file + if [ "$JOB_STATUS" != "Completed" ]; then + if [[ $domainType =~ "soa" ]]; then + checkForErrors "soa" + fi + if [[ $domainType =~ "osb" ]]; then + checkForErrors "osb" + fi + fi + done + + # Confirm the job pod is status completed + if [ "$JOB_STATUS" != "Completed" ]; then + echo "The deploy artifacts job is not showing status completed after waiting ${timeout} seconds." + echo "Check the log output for errors." + if [[ $domainType =~ "soa" ]]; then + kubectl logs jobs/$JOB_NAME $SOA_CONTAINER_NAME -n ${namespace} + fi + if [[ $domainType =~ "osb" ]]; then + kubectl logs jobs/$JOB_NAME $OSB_CONTAINER_NAME -n ${namespace} + fi + fail "Exiting due to failure - the job status is not Completed!" + fi + + # Check for successful completion in log file + JOB_POD=`kubectl get pods -n ${namespace} | grep ${JOB_NAME} | awk ' { print $1; } '` + if [[ $domainType =~ "soa" ]]; then + SOA_JOB_STS=`kubectl logs jobs/$JOB_NAME $SOA_CONTAINER_NAME -n ${namespace} | grep "Successfully Completed" | awk ' { print $1; } '` + if [ "${SOA_JOB_STS}" != "Successfully" ]; then + echo The log file for the deploy artifacts job does not contain a successful completion status + echo Check the log output for errors + kubectl logs jobs/$JOB_NAME $SOA_CONTAINER_NAME -n ${namespace} + deployFail="true" + fi + fi + if [[ $domainType =~ "osb" ]]; then + OSB_JOB_STS=`kubectl logs jobs/$JOB_NAME $OSB_CONTAINER_NAME -n ${namespace} | grep "Successfully Completed" | awk ' { print $1; } '` + if [ "${OSB_JOB_STS}" != "Successfully" ]; then + echo The log file for the deploy artifacts job does not contain a successful completion status + echo Check the log output for errors + kubectl logs jobs/$JOB_NAME $OSB_CONTAINER_NAME -n ${namespace} + deployFail="true" + fi + fi + if [[ $deployFail == "true" ]]; then + fail "Exiting due to failure - the job log file does not contain a successful completion status!" + fi +} + +# +# Function to output to the console a summary of the work completed +# + +function printSummary { + + echo "The following files were generated:" + echo " ${deployOutputDir}/deploy-artifacts-inputs.yaml" + echo " ${createJobOutput}" + echo " ${dcrOutput}" + echo "" + echo "Completed" +} + +# Perform the sequence of steps to deploy the artifacts +startDeployArtifacts + + diff --git a/OracleSOASuite/kubernetes/create-soa-domain/domain-home-on-pv/deploy-docker-file/Dockerfile b/OracleSOASuite/kubernetes/create-soa-domain/domain-home-on-pv/deploy-docker-file/Dockerfile new file mode 100644 index 000000000..0d76d491a --- /dev/null +++ b/OracleSOASuite/kubernetes/create-soa-domain/domain-home-on-pv/deploy-docker-file/Dockerfile @@ -0,0 +1,18 @@ +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +# This is a sample Dockerfile for supplying deployment artifacts in image + +FROM busybox +ARG SOA_ARTIFACTS_ARCHIVE_PATH=/u01/sarchives +ARG OSB_ARTIFACTS_ARCHIVE_PATH=/u01/sbarchives +ARG USER=oracle +ARG USERID=1000 +ARG GROUP=root +ENV SOA_ARTIFACTS_ARCHIVE_PATH=${SOA_ARTIFACTS_ARCHIVE_PATH} +ENV OSB_ARTIFACTS_ARCHIVE_PATH=${OSB_ARTIFACTS_ARCHIVE_PATH} +RUN adduser -D -u ${USERID} -G $GROUP $USER +COPY soa/ ${SOA_ARTIFACTS_ARCHIVE_PATH}/ +COPY osb/ ${OSB_ARTIFACTS_ARCHIVE_PATH}/ +RUN chown -R $USER:$GROUP ${SOA_ARTIFACTS_ARCHIVE_PATH}/ ${OSB_ARTIFACTS_ARCHIVE_PATH}/ +USER $USER diff --git a/OracleSOASuite/kubernetes/create-soa-domain/domain-home-on-pv/deploy-docker-file/build.sh b/OracleSOASuite/kubernetes/create-soa-domain/domain-home-on-pv/deploy-docker-file/build.sh new file mode 100755 index 000000000..2d7e1fee8 --- /dev/null +++ b/OracleSOASuite/kubernetes/create-soa-domain/domain-home-on-pv/deploy-docker-file/build.sh @@ -0,0 +1,79 @@ +#!/bin/bash +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl +# +# Script to build a Docker image for Oracle SOA suite artifacts. +# +#============================================================= +usage() { +cat << EOF +Usage: build.sh -t [tag] +Builds a Docker Image with Oracle SOA/OSB artifacts +Parameters: + -h: view usage + -t: tag for image, default is 12.2.1.4 +EOF +exit $1 +} +#============================================================= +#== MAIN starts here... +#============================================================= +TAG="12.2.1.4" +while getopts "ht:" optname; do + case "$optname" in + "h") + usage 0 + ;; + "t") + TAG="$OPTARG" + ;; + *) + # Should not occur + echo "ERROR: Invalid argument for build.sh" + usage 1 + ;; + esac +done + +IMAGE_NAME="artifacts:$TAG" +DOCKERFILE_NAME=Dockerfile +# Proxy settings - Set your own proxy environment +if [ "${http_proxy}" != "" ]; then + PROXY_SETTINGS="--build-arg http_proxy=${http_proxy}" +fi + +if [ "${https_proxy}" != "" ]; then + PROXY_SETTINGS="$PROXY_SETTINGS --build-arg https_proxy=${https_proxy}" +fi + +if [ "${no_proxy}" != "" ]; then + PROXY_SETTINGS="$PROXY_SETTINGS --build-arg no_proxy=${no_proxy}" +fi + +# ################## # +# BUILDING THE IMAGE # +# ################## # +buildCmd="docker build $BUILD_OPTS --force-rm=true $PROXY_SETTINGS -t $IMAGE_NAME -f $DOCKERFILE_NAME ." + +# BUILD THE IMAGE (replace all environment variables) +BUILD_START=$(date '+%s') +${buildCmd} || { + echo "ERROR: There was an error building the image." + exit 1 +} +status=$? + +BUILD_END=$(date '+%s') +BUILD_ELAPSED=`expr $BUILD_END - $BUILD_START` + + +if [ ${status} -eq 0 ]; then + cat << EOF +INFO: Artifacts image for Oracle SOA suite + is ready to be extended. + --> $IMAGE_NAME +INFO: Build completed in $BUILD_ELAPSED seconds. +EOF +else + echo "ERROR: Artifacts image for Oracle SOA Suite was NOT successfully created. Check the output and correct any reported problems with the docker build operation." +fi diff --git a/OracleSOASuite/kubernetes/create-soa-domain/domain-home-on-pv/deploy/osb/deploy.sh b/OracleSOASuite/kubernetes/create-soa-domain/domain-home-on-pv/deploy/osb/deploy.sh new file mode 100755 index 000000000..c5407504a --- /dev/null +++ b/OracleSOASuite/kubernetes/create-soa-domain/domain-home-on-pv/deploy/osb/deploy.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" + +# Set the required environment values +. $ORACLE_HOME/wlserver/server/bin/setWLSEnv.sh + +function exitIfError { + if [ "$1" != "0" ]; then + echo "$2" + exit $1 + fi +} +# Deploys Oracle Service Bus archive +function deploy { + osb_archive=$1 + tmp_dir=$(mktemp -d -t ci-XXXXXXXXXX) + cp $scriptDir/import.properties.template $tmp_dir/import.properties + sed -i -e "s:%DOMAIN_UID%:${DOMAIN_UID}:g" $tmp_dir/import.properties + sed -i -e "s:%ADMIN_SERVER_NAME_SVC%:${ADMIN_SERVER_NAME_SVC}:g" $tmp_dir/import.properties + sed -i -e "s:%ADMIN_LISTEN_PORT%:${ADMIN_LISTEN_PORT}:g" $tmp_dir/import.properties + sed -i -e "s:%USERNAME%:$(cat /weblogic-operator/secrets/username):g" $tmp_dir/import.properties + sed -i -e "s:%PASSWORD%:$(cat /weblogic-operator/secrets/password):g" $tmp_dir/import.properties + sed -i -e "s:%OSB_JAR%:$1:g" $tmp_dir/import.properties + cp /u01/sbarchives/${osb_archive} $tmp_dir + cd $tmp_dir + java weblogic.WLST $scriptDir/import.py -p import.properties +} + +# Reads the available Oracle Service Bus archives and deploys +cd /u01/sbarchives/ +sbars=$(ls *) +for sbar in $sbars +do + deploy $sbar +done + +exitIfError $? "ERROR: $script failed." + +# DON'T REMOVE THIS +# This script has to contain this log message. +# It is used to determine if the job is really completed. +echo "Successfully Completed" + diff --git a/OracleSOASuite/kubernetes/create-soa-domain/domain-home-on-pv/deploy/osb/import.properties.template b/OracleSOASuite/kubernetes/create-soa-domain/domain-home-on-pv/deploy/osb/import.properties.template new file mode 100755 index 000000000..24a70d724 --- /dev/null +++ b/OracleSOASuite/kubernetes/create-soa-domain/domain-home-on-pv/deploy/osb/import.properties.template @@ -0,0 +1,21 @@ +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +################################################################## +# OSB Admin Security Configuration # +################################################################## +adminUrl=t3://%DOMAIN_UID%-%ADMIN_SERVER_NAME_SVC%:%ADMIN_LISTEN_PORT% +importUser=%USERNAME% +importPassword=%PASSWORD% + +################################################################## +# OSB Jar to be exported, optional customization file # +################################################################## +importJar=%OSB_JAR% +#customizationFile=OSBCustomizationFile.xml + +################################################################## +# Optional passphrase and project name # +################################################################## +passphrase=osb +project=default diff --git a/OracleSOASuite/kubernetes/create-soa-domain/domain-home-on-pv/deploy/osb/import.py b/OracleSOASuite/kubernetes/create-soa-domain/domain-home-on-pv/deploy/osb/import.py new file mode 100755 index 000000000..a5c4329f2 --- /dev/null +++ b/OracleSOASuite/kubernetes/create-soa-domain/domain-home-on-pv/deploy/osb/import.py @@ -0,0 +1,217 @@ +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +import wlstModule +from com.bea.wli.sb.management.configuration import SessionManagementMBean +from com.bea.wli.sb.management.configuration import ALSBConfigurationMBean +from com.bea.wli.config import Ref +from com.bea.wli.sb.util import Refs + +from java.util import HashMap +from java.util import HashSet +from java.util import ArrayList +from java.io import FileInputStream +from java.util import Collections +from com.bea.wli.config.resource import Diagnostic +from com.bea.wli.sb.util import EnvValueTypes +from com.bea.wli.config.env import QualifiedEnvValue +from com.bea.wli.config.env import EnvValueQuery +from com.bea.wli.config.customization import EnvValueCustomization + + +from com.bea.wli.config.customization import Customization +from com.bea.wli.sb.management.importexport import ALSBImportOperation + +import sys + +#======================================================================================= +# Entry function to deploy project configuration and resources +# into a ALSB domain +#======================================================================================= + +def importToALSBDomain(importConfigFile): + try: + SessionMBean = None + print 'Loading Deployment config from :', importConfigFile + exportConfigProp = loadProps(importConfigFile) + adminUrl = exportConfigProp.get("adminUrl") + importUser = exportConfigProp.get("importUser") + importPassword = exportConfigProp.get("importPassword") + + importJar = exportConfigProp.get("importJar") + customFile = exportConfigProp.get("customizationFile") + + passphrase = exportConfigProp.get("passphrase") + project = exportConfigProp.get("project") + + connectToServer(importUser, importPassword, adminUrl) + + print 'Attempting to import :', importJar, "on ALSB Admin Server listening on :", adminUrl + + theBytes = readBinaryFile(importJar) + print 'Read file', importJar + sessionName = createSessionName() + print 'Created session', sessionName + SessionMBean = getSessionManagementMBean(sessionName) + print 'SessionMBean started session' + ALSBConfigurationMBean = findService(String("ALSBConfiguration.").concat(sessionName), "com.bea.wli.sb.management.configuration.ALSBConfigurationMBean") + print "ALSBConfiguration MBean found", ALSBConfigurationMBean + ALSBConfigurationMBean.uploadJarFile(theBytes) + print 'Jar Uploaded' + + if project == None: + print 'No project specified, additive deployment performed' + alsbJarInfo = ALSBConfigurationMBean.getImportJarInfo() + alsbImportPlan = alsbJarInfo.getDefaultImportPlan() + alsbImportPlan.setPassphrase(passphrase) + alsbImportPlan.setPreserveExistingEnvValues(true) + importResult = ALSBConfigurationMBean.importUploaded(alsbImportPlan) + SessionMBean.activateSession(sessionName, "Complete test import with customization using wlst") + else: + print 'ALSB project', project, 'will get overlaid' + alsbJarInfo = ALSBConfigurationMBean.getImportJarInfo() + alsbImportPlan = alsbJarInfo.getDefaultImportPlan() + alsbImportPlan.setPassphrase(passphrase) + operationMap=HashMap() + operationMap = alsbImportPlan.getOperations() + print + print 'Default importPlan' + printOpMap(operationMap) + set = operationMap.entrySet() + + alsbImportPlan.setPreserveExistingEnvValues(true) + + #boolean + abort = false + #list of created ref + createdRef = ArrayList() + + for entry in set: + ref = entry.getKey() + op = entry.getValue() + #set different logic based on the resource type + type = ref.getTypeId + if type == Refs.SERVICE_ACCOUNT_TYPE or type == Refs.SERVICE_PROVIDER_TYPE: + if op.getOperation() == ALSBImportOperation.Operation.Create: + print 'Unable to import a service account or a service provider on a target system', ref + abort = true + elif op.getOperation() == ALSBImportOperation.Operation.Create: + #keep the list of created resources + createdRef.add(ref) + + if abort == true : + print 'This jar must be imported manually to resolve the service account and service provider dependencies' + SessionMBean.discardSession(sessionName) + raise + + print + print 'Modified importPlan' + printOpMap(operationMap) + importResult = ALSBConfigurationMBean.importUploaded(alsbImportPlan) + + printDiagMap(importResult.getImportDiagnostics()) + + if importResult.getFailed().isEmpty() == false: + print 'One or more resources could not be imported properly' + raise + + #customize if a customization file is specified + #affects only the created resources + if customFile != None : + print 'Loading customization File', customFile + print 'Customization applied to the created resources only', createdRef + iStream = FileInputStream(customFile) + customizationList = Customization.fromXML(iStream) + filteredCustomizationList = ArrayList() + setRef = HashSet(createdRef) + + # apply a filter to all the customizations to narrow the target to the created resources + for customization in customizationList: + print customization + newcustomization = customization.clone(setRef) + filteredCustomizationList.add(newcustomization) + + ALSBConfigurationMBean.customize(filteredCustomizationList) + + SessionMBean.activateSession(sessionName, "Complete test import with customization using wlst") + + print "Deployment of : " + importJar + " successful" + except: + print "Unexpected error:", sys.exc_info()[0] + if SessionMBean != None: + SessionMBean.discardSession(sessionName) + raise + +#======================================================================================= +# Utility function to print the list of operations +#======================================================================================= +def printOpMap(map): + set = map.entrySet() + for entry in set: + op = entry.getValue() + print op.getOperation(), + ref = entry.getKey() + print ref + print + +#======================================================================================= +# Utility function to print the diagnostics +#======================================================================================= +def printDiagMap(map): + set = map.entrySet() + for entry in set: + diag = entry.getValue().toString() + print diag + print + +#======================================================================================= +# Utility function to load properties from a config file +#======================================================================================= + +def loadProps(configPropFile): + propInputStream = FileInputStream(configPropFile) + configProps = Properties() + configProps.load(propInputStream) + return configProps + +#======================================================================================= +# Connect to the Admin Server +#======================================================================================= + +def connectToServer(username, password, url): + connect(username, password, url) + domainRuntime() + +#======================================================================================= +# Utility function to read a binary file +#======================================================================================= +def readBinaryFile(fileName): + file = open(fileName, 'rb') + bytes = file.read() + return bytes + +#======================================================================================= +# Utility function to create an arbitrary session name +#======================================================================================= +def createSessionName(): + sessionName = String("SessionScript"+Long(System.currentTimeMillis()).toString()) + return sessionName + +#======================================================================================= +# Utility function to load a session MBeans +#======================================================================================= +def getSessionManagementMBean(sessionName): + SessionMBean = findService("SessionManagement", "com.bea.wli.sb.management.configuration.SessionManagementMBean") + SessionMBean.createSession(sessionName) + return SessionMBean + +# IMPORT script init +try: + # import the service bus configuration + # argv[1] is the export config properties file + importToALSBDomain('import.properties') + +except: + print "Unexpected error: ", sys.exc_info()[0] + dumpStack() + raise diff --git a/OracleSOASuite/kubernetes/create-soa-domain/domain-home-on-pv/deploy/soa/deploy.sh b/OracleSOASuite/kubernetes/create-soa-domain/domain-home-on-pv/deploy/soa/deploy.sh new file mode 100755 index 000000000..e0f0f72eb --- /dev/null +++ b/OracleSOASuite/kubernetes/create-soa-domain/domain-home-on-pv/deploy/soa/deploy.sh @@ -0,0 +1,38 @@ +#!/bin/bash +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" + +. $ORACLE_HOME/wlserver/server/bin/setWLSEnv.sh + +function exitIfError { + if [ "$1" != "0" ]; then + echo "$2" + exit $1 + fi +} + +function deploy { + cd /u01/oracle/soa/bin + ant -f ant-sca-deploy.xml \ + -DserverURL=http://${DOMAIN_UID}-cluster-${SOA_CLUSTER_NAME}:${SOA_MANAGED_SERVER_PORT} \ + -DsarLocation=/u01/sarchives/$1 \ + -Doverwrite=true \ + -Duser=$(cat /weblogic-operator/secrets/username) -Dpassword=$(cat /weblogic-operator/secrets/password) +} + +cd /u01/sarchives/ +sars=$(ls *) +for sar in $sars +do + deploy $sar +done + +exitIfError $? "ERROR: $script failed." + +# DON'T REMOVE THIS +# This script has to contain this log message. +# It is used to determine if the job is really completed. +echo "Successfully Completed" diff --git a/OracleSOASuite/kubernetes/create-soa-domain/utils/soasuite-domain-template.yaml b/OracleSOASuite/kubernetes/create-soa-domain/utils/soasuite-domain-template.yaml old mode 100644 new mode 100755 diff --git a/OracleSOASuite/kubernetes/create-soa-domain/utils/validate.sh b/OracleSOASuite/kubernetes/create-soa-domain/utils/validate.sh index bb9038ca2..6e77c0883 100755 --- a/OracleSOASuite/kubernetes/create-soa-domain/utils/validate.sh +++ b/OracleSOASuite/kubernetes/create-soa-domain/utils/validate.sh @@ -193,3 +193,23 @@ function validateCommonInputs_SOA { failIfValidationErrors } + +# +# Function to validate the image pull secret name +# +function validateArtifactsImagePullSecretName { + if [ ! -z ${artifactsImagePullSecretName} ]; then + validateLowerCase artifactsImagePullSecretName ${artifactsImagePullSecretName} + artifactsImagePullSecretPrefix="" + else + # Set name blank when not specified, and comment out the yaml + artifactsImagePullSecretName="" + artifactsImagePullSecretPrefix="#" + fi +} + + +function validateArtifactsPVC { + # Validate the PVC using existing function + validateDomainPVC +} diff --git a/OracleSOASuite/kubernetes/create-weblogic-domain-credentials/README.md b/OracleSOASuite/kubernetes/create-weblogic-domain-credentials/README.md old mode 100644 new mode 100755 index 3e4bc1733..51f6d86a6 --- a/OracleSOASuite/kubernetes/create-weblogic-domain-credentials/README.md +++ b/OracleSOASuite/kubernetes/create-weblogic-domain-credentials/README.md @@ -47,3 +47,4 @@ metadata: type: Opaque ``` + diff --git a/OracleSOASuite/kubernetes/create-weblogic-domain-credentials/create-weblogic-credentials.sh b/OracleSOASuite/kubernetes/create-weblogic-domain-credentials/create-weblogic-credentials.sh index b1eda394b..4adbb3e2a 100755 --- a/OracleSOASuite/kubernetes/create-weblogic-domain-credentials/create-weblogic-credentials.sh +++ b/OracleSOASuite/kubernetes/create-weblogic-domain-credentials/create-weblogic-credentials.sh @@ -118,3 +118,4 @@ if [ "${SECRET}" != "1" ]; then fi echo "The secret ${secretName} has been successfully created in the ${namespace} namespace." + diff --git a/OracleSOASuite/kubernetes/create-weblogic-domain-pv-pvc/README.md b/OracleSOASuite/kubernetes/create-weblogic-domain-pv-pvc/README.md old mode 100644 new mode 100755 diff --git a/OracleSOASuite/kubernetes/create-weblogic-domain-pv-pvc/create-pv-pvc-inputs.yaml b/OracleSOASuite/kubernetes/create-weblogic-domain-pv-pvc/create-pv-pvc-inputs.yaml old mode 100644 new mode 100755 diff --git a/OracleSOASuite/kubernetes/create-weblogic-domain-pv-pvc/pv-template.yaml b/OracleSOASuite/kubernetes/create-weblogic-domain-pv-pvc/pv-template.yaml old mode 100644 new mode 100755 diff --git a/OracleSOASuite/kubernetes/create-weblogic-domain-pv-pvc/pvc-template.yaml b/OracleSOASuite/kubernetes/create-weblogic-domain-pv-pvc/pvc-template.yaml old mode 100644 new mode 100755 diff --git a/OracleSOASuite/kubernetes/delete-domain/README.md b/OracleSOASuite/kubernetes/delete-domain/README.md old mode 100644 new mode 100755 diff --git a/OracleSOASuite/kubernetes/domain-lifecycle/README.md b/OracleSOASuite/kubernetes/domain-lifecycle/README.md old mode 100644 new mode 100755 index d24709517..0eeab95d5 --- a/OracleSOASuite/kubernetes/domain-lifecycle/README.md +++ b/OracleSOASuite/kubernetes/domain-lifecycle/README.md @@ -81,9 +81,9 @@ domain.weblogic.oracle/domain1 patched The `scaleCluster.sh` script scales a WebLogic cluster by patching the `spec.clusters[].replicas` attribute of the domain resource to the specified value. The operator will perform the scaling operation for the WebLogic cluster based on the specified value of the `replicas` attribute after its value is updated. See the script `usage` information by using the `-h` option. ``` $ scaleCluster.sh -d domain1 -n weblogic-domain-1 -c cluster-1 -r 3 -[2021-02-26T19:04:14.335 UTC][INFO] Patching replicas for cluster 'cluster-1' to '3'. +[2021-02-26T19:04:14.335000Z][INFO] Patching replicas for cluster 'cluster-1' to '3'. domain.weblogic.oracle/domain1 patched -[2021-02-26T19:04:14.466 UTC][INFO] Successfully patched replicas for cluster 'cluster-1'! +[2021-02-26T19:04:14.466000Z][INFO] Successfully patched replicas for cluster 'cluster-1'! ``` ### Script to view the status of a WebLogic cluster @@ -123,42 +123,42 @@ The `rollDomain.sh` script updates the value of the `spec.restartVersion` attrib ``` $ rollDomain.sh -d domain1 -n weblogic-domain-1 -[2021-03-24T04:01:19.733 UTC][INFO] Patching restartVersion for domain 'domain1' to '1'. +[2021-03-24T04:01:19.733000Z][INFO] Patching restartVersion for domain 'domain1' to '1'. domain.weblogic.oracle/domain1 patched -[2021-03-24T04:01:19.850 UTC][INFO] Successfully patched restartVersion for domain 'domain1'! +[2021-03-24T04:01:19.850000Z][INFO] Successfully patched restartVersion for domain 'domain1'! ``` Use the following command to roll the Server Pods in a WebLogic domain with a specific `restartVersion`: ``` $ rollDomain.sh -r v1 -d domain1 -n weblogic-domain-1 -[2021-03-24T13:43:47.586 UTC][INFO] Patching restartVersion for domain 'domain1' to 'v1'. +[2021-03-24T13:43:47.586000Z][INFO] Patching restartVersion for domain 'domain1' to 'v1'. domain.weblogic.oracle/domain1 patched -[2021-03-24T13:43:47.708 UTC][INFO] Successfully patched restartVersion for domain 'domain1'! +[2021-03-24T13:43:47.708000Z][INFO] Successfully patched restartVersion for domain 'domain1'! ``` The `rollCluster.sh` script updates the value of the `spec.clusters[].restartVersion` attribute of the domain resource. Then, the operator will do a rolling restart of the WebLogic cluster Server Pods after the value of the `spec.clusters[].restartVersion` is updated. You can provide the new value of the `restartVersion` as a parameter to the script or the script will automatically generate a new value to trigger the rolling restart. See the script `usage` information by using the `-h` option. ``` $ rollCluster.sh -c cluster-1 -d domain1 -n weblogic-domain-1 -[2021-03-24T04:03:27.521 UTC][INFO] Patching restartVersion for cluster 'cluster-1' to '2'. +[2021-03-24T04:03:27.521000Z][INFO] Patching restartVersion for cluster 'cluster-1' to '2'. domain.weblogic.oracle/domain1 patched -[2021-03-24T04:03:27.669 UTC][INFO] Successfully patched restartVersion for cluster 'cluster-1'! +[2021-03-24T04:03:27.669000Z][INFO] Successfully patched restartVersion for cluster 'cluster-1'! ``` Use the following command to roll the WebLogic Cluster Servers with a specific `restartVersion`: ``` $ rollCluster.sh -r v2 -c cluster-1 -d domain1 -n weblogic-domain-1 -[2021-03-24T13:46:16.833 UTC][INFO] Patching restartVersion for cluster 'cluster-1' to 'v2'. +[2021-03-24T13:46:16.833000Z][INFO] Patching restartVersion for cluster 'cluster-1' to 'v2'. domain.weblogic.oracle/domain1 patched -[2021-03-24T13:46:16.975 UTC][INFO] Successfully patched restartVersion for cluster 'cluster-1'! +[2021-03-24T13:46:16.975000Z][INFO] Successfully patched restartVersion for cluster 'cluster-1'! ``` ### Scripts to restart a WebLogic Server in a domain The `restartServer.sh` script can be used to restart a WebLogic Server in a domain. This script restarts the Server by deleting the Server Pod for the WebLogic Server instance. ``` $ restartServer.sh -s managed-server1 -d domain1 -n weblogic-domain-1 -[2021-03-24T22:20:22.498 UTC][INFO] Initiating restart of 'managed-server1' by deleting server pod 'domain1-managed-server1'. -[2021-03-24T22:20:37.614 UTC][INFO] Server restart succeeded ! +[2021-03-24T22:20:22.498000Z][INFO] Initiating restart of 'managed-server1' by deleting server pod 'domain1-managed-server1'. +[2021-03-24T22:20:37.614000Z][INFO] Server restart succeeded ! ``` ### Scripts to explicitly initiate introspection of a WebLogic domain @@ -168,17 +168,17 @@ The `introspectDomain.sh` script can be used to rerun a WebLogic domain's intros Use the following command to rerun a domain's introspect job with the `introspectVersion` value generated by the script. ``` $ introspectDomain.sh -d domain1 -n weblogic-domain-1 -[2021-03-24T21:37:55.989 UTC][INFO] Patching introspectVersion for domain 'domain1' to '1'. +[2021-03-24T21:37:55.989000Z][INFO] Patching introspectVersion for domain 'domain1' to '1'. domain.weblogic.oracle/domain1 patched -[2021-03-24T21:37:56.110 UTC][INFO] Successfully patched introspectVersion for domain 'domain1'! +[2021-03-24T21:37:56.110000Z][INFO] Successfully patched introspectVersion for domain 'domain1'! ``` Use the following command to rerun a domain's introspect job with a specific `introspectVersion` value. ``` $ introspectDomain.sh -i v1 -d domain1 -n weblogic-domain-1 -[2021-03-24T21:38:34.369 UTC][INFO] Patching introspectVersion for domain 'domain1' to 'v1'. +[2021-03-24T21:38:34.369000Z][INFO] Patching introspectVersion for domain 'domain1' to 'v1'. domain.weblogic.oracle/domain1 patched -[2021-03-24T21:38:34.488 UTC][INFO] Successfully patched introspectVersion for domain 'domain1'! +[2021-03-24T21:38:34.488000Z][INFO] Successfully patched introspectVersion for domain 'domain1'! ``` ### Watching the Pods after executing life cycle scripts diff --git a/OracleSOASuite/kubernetes/domain-lifecycle/clusterStatus.sh b/OracleSOASuite/kubernetes/domain-lifecycle/clusterStatus.sh index 353f71c36..8bfeb45f3 100755 --- a/OracleSOASuite/kubernetes/domain-lifecycle/clusterStatus.sh +++ b/OracleSOASuite/kubernetes/domain-lifecycle/clusterStatus.sh @@ -1,4 +1,6 @@ -#!/bin/bash +# !/bin/sh +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. set -eu set -o pipefail diff --git a/OracleSOASuite/kubernetes/domain-lifecycle/helper.sh b/OracleSOASuite/kubernetes/domain-lifecycle/helper.sh old mode 100644 new mode 100755 index 4216dcd5b..efb277e63 --- a/OracleSOASuite/kubernetes/domain-lifecycle/helper.sh +++ b/OracleSOASuite/kubernetes/domain-lifecycle/helper.sh @@ -1006,19 +1006,15 @@ function executePatchCommand { } # timestamp -# purpose: echo timestamp in the form yyyymmddThh:mm:ss.mmm ZZZ -# example: 20181001T14:00:00.001 UTC +# purpose: echo timestamp in the form yyyy-mm-ddThh:mm:ss.nnnnnnZ +# example: 2018-10-01T14:00:00.000001Z function timestamp() { - timestamp="$(set +e && date --utc '+%Y-%m-%dT%H:%M:%S %N %s %Z' 2>&1 || echo illegal)" + local timestamp="`date --utc '+%Y-%m-%dT%H:%M:%S.%NZ' 2>&1`" if [ ! "${timestamp/illegal/xyz}" = "${timestamp}" ]; then # old shell versions don't support %N or --utc - timestamp="`date -u '+%Y-%m-%dT%H:%M:%S 000000 %s %Z' 2>&1`" + timestamp="`date -u '+%Y-%m-%dT%H:%M:%S.000000Z' 2>&1`" fi - local ymdhms="`echo $timestamp | awk '{ print $1 }'`" - # convert nano to milli - local milli="`echo $timestamp | awk '{ print $2 }' | sed 's/\(^...\).*/\1/'`" - local timezone="`echo $timestamp | awk '{ print $4 }'`" - echo "${ymdhms}.${milli} ${timezone}" + echo "${timestamp}" } # diff --git a/OracleSOASuite/kubernetes/elasticsearch-and-kibana/README.md b/OracleSOASuite/kubernetes/elasticsearch-and-kibana/README.md old mode 100644 new mode 100755 diff --git a/OracleSOASuite/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml b/OracleSOASuite/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml old mode 100644 new mode 100755 diff --git a/OracleSOASuite/kubernetes/imagetool-scripts/additionalBuildCmds.txt b/OracleSOASuite/kubernetes/imagetool-scripts/additionalBuildCmds.txt old mode 100644 new mode 100755 index 2a8427a69..d6f6b0150 --- a/OracleSOASuite/kubernetes/imagetool-scripts/additionalBuildCmds.txt +++ b/OracleSOASuite/kubernetes/imagetool-scripts/additionalBuildCmds.txt @@ -1,5 +1,5 @@ [package-manager-packages] -hostname ant +hostname [final-build-commands] @@ -8,7 +8,7 @@ ENV ORACLE_HOME=/u01/oracle \ SCRIPT_FILE=/u01/oracle/container-scripts/* \ HEALTH_SCRIPT_FILE=/u01/oracle/container-scripts/get_healthcheck_url.sh \ JAVA_OPTIONS="-Doracle.jdbc.fanEnabled=false -Dweblogic.StdoutDebugEnabled=false" \ - PATH=$PATH:/usr/java/default/bin:/u01/oracle/oracle_common/common/bin:/u01/oracle/wlserver/common/bin:/u01/oracle/container-scripts + PATH=$PATH:/u01/oracle/container-scripts:/u01/oracle/oracle_common/modules/thirdparty/org.apache.ant/1.10.5.0.0/apache-ant-1.10.5/bin USER root RUN mkdir -p $VOLUME_DIR && chown oracle:root /u01 $VOLUME_DIR && \ @@ -21,9 +21,9 @@ USER oracle RUN if [ -f "${ORACLE_HOME}/soa/soa/thirdparty/edifecs/XEngine_8_4_1_23.tar.gz" ]; then \ cd $ORACLE_HOME/soa/soa/thirdparty/edifecs && \ - tar -zxvf XEngine_8_4_1_23.tar.gz \ + tar -zxvf XEngine_8_4_1_23.tar.gz; \ else \ - echo -e "\nNo XEngine_8_4_1_23.tar.gz present in ${ORACLE_HOME}/soa/soa/thirdparty/edifecs directory. Skipping untar." 1>&2; \ + echo -e "\nXEngine_8_4_1_23.tar.gz not present in ${ORACLE_HOME}/soa/soa/thirdparty/edifecs directory. Skipping untar."; \ fi HEALTHCHECK --start-period=5m --interval=1m CMD curl -k -s --fail `$HEALTH_SCRIPT_FILE` || exit 1 WORKDIR ${ORACLE_HOME} diff --git a/OracleSOASuite/kubernetes/imagetool-scripts/buildArgs b/OracleSOASuite/kubernetes/imagetool-scripts/buildArgs old mode 100644 new mode 100755 index 98ef2f1ed..9dc53f9dd --- a/OracleSOASuite/kubernetes/imagetool-scripts/buildArgs +++ b/OracleSOASuite/kubernetes/imagetool-scripts/buildArgs @@ -1,8 +1,8 @@ create ---jdkVersion=%JDK_VERSION% +--jdkVersion %JDK_VERSION% --type soa_osb_b2b ---version=12.2.1.4.0 ---tag=%BUILDTAG% +--version 12.2.1.4.0 +--tag %BUILDTAG% --pull --chown oracle:root --installerResponseFile %DOCKER_REPO%/OracleFMWInfrastructure/dockerfiles/12.2.1.4.0/install.file,%DOCKER_REPO%/OracleSOASuite/dockerfiles/12.2.1.4.0/install/soasuite.response,%DOCKER_REPO%/OracleSOASuite/dockerfiles/12.2.1.4.0/install/osb.response,%DOCKER_REPO%/OracleSOASuite/dockerfiles/12.2.1.4.0/install/b2b.response diff --git a/OracleSOASuite/kubernetes/logging-services/logstash/README.md b/OracleSOASuite/kubernetes/logging-services/logstash/README.md index c1faa15de..07a9555c7 100755 --- a/OracleSOASuite/kubernetes/logging-services/logstash/README.md +++ b/OracleSOASuite/kubernetes/logging-services/logstash/README.md @@ -56,3 +56,4 @@ Make sure to point the logstash configuration file to correct location and also ``` 1. Now, you can view the diagnostics logs using Kibana with index pattern `logstash-*`. + diff --git a/OracleSOASuite/kubernetes/logging-services/logstash/logstash.conf b/OracleSOASuite/kubernetes/logging-services/logstash/logstash.conf index f9f5511b6..f156528f3 100755 --- a/OracleSOASuite/kubernetes/logging-services/logstash/logstash.conf +++ b/OracleSOASuite/kubernetes/logging-services/logstash/logstash.conf @@ -22,4 +22,4 @@ output { elasticsearch { hosts => ["elasticsearch.default.svc.cluster.local:9200"] } -} \ No newline at end of file +} diff --git a/OracleSOASuite/kubernetes/logging-services/logstash/logstash.yaml b/OracleSOASuite/kubernetes/logging-services/logstash/logstash.yaml index e335e87da..04cddb50b 100755 --- a/OracleSOASuite/kubernetes/logging-services/logstash/logstash.yaml +++ b/OracleSOASuite/kubernetes/logging-services/logstash/logstash.yaml @@ -36,3 +36,4 @@ spec: ports: - containerPort: 5044 name: logstash + diff --git a/OracleSOASuite/kubernetes/logging-services/weblogic-logging-exporter/README.md b/OracleSOASuite/kubernetes/logging-services/weblogic-logging-exporter/README.md index 68e6f1ad6..bba55d8c5 100755 --- a/OracleSOASuite/kubernetes/logging-services/weblogic-logging-exporter/README.md +++ b/OracleSOASuite/kubernetes/logging-services/weblogic-logging-exporter/README.md @@ -61,11 +61,12 @@ Follow these steps to setup WebLogic Logging Exporter in a WebLogic operator env a) Copy setDomainEnv.sh from the pod to local folder. ```bash - $ kubectl cp soans/soainfra-adminserver:/u01/oracle/user_projects/domains/soainfra/bin/setDomainEnv.sh + $ kubectl cp soans/soainfra-adminserver:/u01/oracle/user_projects/domains/soainfra/bin/setDomainEnv.sh setDomainEnv.sh ``` b) Modify setDomainEnv.sh to update the Server Class path. ```bash - $CLASSPATH=/u01/oracle/user_projects/domains/soainfra/weblogic-logging-exporter.jar:/u01/oracle/user_projects/domains/soainfra/snakeyaml-1.23.jar:${CLASSPATH} + CLASSPATH=/u01/oracle/user_projects/domains/soainfra/weblogic-logging-exporter.jar:/u01/oracle/user_projects/domains/soainfra/snakeyaml-1.27.jar:${CLASSPATH} + export CLASSPATH ``` c) Copy back the modified setDomainEnv.sh to the pod. @@ -127,3 +128,4 @@ Copy WebLogicLoggingExporter.yaml to the domain folder in the WebLogic server po We need to create an index pattern in Kibana for the logs to be available in the dashboard. Create an index pattern `wls*` in `Kibana` > `Management`. After the server starts, you will be able to see the log data from the WebLogic servers in the Kibana dashboard, + diff --git a/OracleSOASuite/kubernetes/logging-services/weblogic-logging-exporter/WebLogicLoggingExporter.yaml b/OracleSOASuite/kubernetes/logging-services/weblogic-logging-exporter/WebLogicLoggingExporter.yaml index 42f404c3e..78e31d6b4 100755 --- a/OracleSOASuite/kubernetes/logging-services/weblogic-logging-exporter/WebLogicLoggingExporter.yaml +++ b/OracleSOASuite/kubernetes/logging-services/weblogic-logging-exporter/WebLogicLoggingExporter.yaml @@ -2,7 +2,6 @@ # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. # - weblogicLoggingIndexName: wls publishHost: elasticsearch.default.svc.cluster.local publishPort: 9200 diff --git a/OracleSOASuite/kubernetes/monitoring-service/README.md b/OracleSOASuite/kubernetes/monitoring-service/README.md index 756cf4b05..8ca7ad65d 100755 --- a/OracleSOASuite/kubernetes/monitoring-service/README.md +++ b/OracleSOASuite/kubernetes/monitoring-service/README.md @@ -1,107 +1,79 @@ -## Monitor the OracleSOASuite instance using Prometheus and Grafana -Using the `WebLogic Monitoring Exporter` you can scrape runtime information from a running OracleSOASuite instance and monitor them using Prometheus and Grafana. +# Monitor the Oracle SOA Suite instance using Prometheus and Grafana +Using the `WebLogic Monitoring Exporter` you can scrape runtime information from a running Oracle SOA Suite instance and monitor them using Prometheus and Grafana. -### Prerequisites +## Prerequisites +- Have Docker and a Kubernetes cluster running and have `kubectl` installed and configured. +- Have Helm installed. +- An Oracle SOA Suite domain cluster deployed by `weblogic-operator` is running in the Kubernetes cluster. -This document assumes that the Prometheus Operator is deployed on the Kubernetes cluster. If it is not already deployed, follow the steps below for deploying the Prometheus Operator. +## Set up monitoring for Oracle SOA Suite domain -#### Clone the kube-prometheus project +Set up the WebLogic Monitoring Exporter that will collect WebLogic Server metrics and monitor Oracle SOA Suite domain. -```bash -$ cd $HOME -$ wget https://github.com/coreos/kube-prometheus/archive/v0.5.0.zip -$ unzip v0.5.0.zip -``` - -#### Label the nodes -Kube-Prometheus requires all the exporter nodes to be labelled with `kubernetes.io/os=linux`. If a node is not labelled, then you must label it using the following command: - -``` -$ kubectl label nodes --all kubernetes.io/os=linux -``` - -#### Create Prometheus and Grafana resources - -Execute the following commands to create the namespace and CRDs: - -**NOTE**: Wait for a minute for each command to process. - -```bash -$ cd kube-prometheus-0.5.0 -$ kubectl create -f manifests/setup -$ until kubectl get servicemonitors --all-namespaces ; do date; sleep 1; echo ""; done -$ kubectl create -f manifests/ -``` - -#### Provide external access -To provide external access for Grafana, Prometheus, and Alertmanager, execute the commands below: - -```bash -$ kubectl patch svc grafana -n monitoring --type=json -p '[{"op": "replace", "path": "/spec/type", "value": "NodePort" },{"op": "replace", "path": "/spec/ports/0/nodePort", "value": 32100 }]' -$ kubectl patch svc prometheus-k8s -n monitoring --type=json -p '[{"op": "replace", "path": "/spec/type", "value": "NodePort" },{"op": "replace", "path": "/spec/ports/0/nodePort", "value": 32101 }]' -$ kubectl patch svc alertmanager-main -n monitoring --type=json -p '[{"op": "replace", "path": "/spec/type", "value": "NodePort" },{"op": "replace", "path": "/spec/ports/0/nodePort", "value": 32102 }]' -``` - -**NOTE**: - -* `32100` is the external port for Grafana -* `32101` is the external port for Prometheus -* `32102` is the external port for Alertmanager +**Note**: Either of the following methods can be used to set up monitoring for Oracle SOA Suite domain. Using `setup-monitoring.sh` does the set up in an automated way. --------------- +1. [Set up manually](#set-up-manually) +1. [Set up using `setup-monitoring.sh`](#set-up-using-setup-monitoringsh) -### Set Up the WebLogic Monitoring Exporter +## Set up manually -Set up the WebLogic Monitoring Exporter that will collect WebLogic Server metrics and monitor OracleSOASuite domain. +Before setting up WebLogic Monitoring Exporter, make sure that Prometheus and Grafana are deployed on the Kubernetes cluster. Refer [Deploy Prometheus and Grafana](https://oracle.github.io/fmw-kubernetes/soa-domains/adminguide/monitoring-soa-domains/#deploy-prometheus-and-grafana) for details. #### Generate the WebLogic Monitoring Exporter Deployment Package The `wls-exporter.war` package need to be updated and created for each listening ports (Administration Server and Managed Servers) in the domain. -Run the script `get-wls-exporter.sh ` to generate the required WAR files at `${WORKDIR}/monitoring-service/scripts/wls-exporter-deploy`: +Set the below environment values based on your domainType and run the script `get-wls-exporter.sh` to generate the required WAR files at `${WORKDIR}/monitoring-service/scripts/wls-exporter-deploy`: +- adminServerPort +- wlsMonitoringExporterTosoaCluster +- soaManagedServerPort +- wlsMonitoringExporterToosbCluster +- osbManagedServerPort -```bash +Example for `soaosb` domainType: + +``` $ cd ${WORKDIR}/monitoring-service/scripts -$ sh get-wls-exporter.sh +$ export adminServerPort=7001 +$ export wlsMonitoringExporterTosoaCluster=true +$ export soaManagedServerPort=8001 +$ export wlsMonitoringExporterToosbCluster=true +$ export osbManagedServerPort=9001 +$ sh get-wls-exporter.sh ``` -For `soaosb` domainType : +Verify whether the required WAR files are generated at `${WORKDIR}/monitoring-service/scripts/wls-exporter-deploy`. -```bash -$ sh get-wls-exporter.sh soaosb +``` +$ ls ${WORKDIR}/monitoring-service/scripts/wls-exporter-deploy ``` -Sample output: +#### Deploy the WebLogic Monitoring Exporter into the Oracle SOA Suite domain + +Follow these steps to copy and deploy the WebLogic Monitoring Exporter WAR files into the Oracle SOA Suite Domain. +**Note**: Replace the `` with appropriate values based on your environment: + ``` -created XXXX/monitoring-service/scripts/wls-exporter-deploy dir -created /tmp/ci-f1xt9gdpMH -/tmp/ci-f1xt9gdpMH XXXX/monitoring-service/scripts -in temp dir - adding: WEB-INF/weblogic.xml (deflated 66%) - adding: config.yml (deflated 63%) -XXXX/monitoring-service/scripts -created /tmp/ci-2LGWm8WLDA -/tmp/ci-2LGWm8WLDA XXXX/monitoring-service/scripts -in temp dir - adding: WEB-INF/weblogic.xml (deflated 66%) - adding: config.yml (deflated 63%) -XXXX/monitoring-service/scripts -created /tmp/ci-62Wuwbupgq -/tmp/ci-62Wuwbupgq XXXX/monitoring-service/scripts -in temp dir - adding: WEB-INF/weblogic.xml (deflated 66%) - adding: config.yml (deflated 63%) -XXXX/monitoring-service/scripts +$ cd ${WORKDIR}/monitoring-service/scripts +$ kubectl cp wls-exporter-deploy /:/u01/oracle +$ kubectl cp deploy-weblogic-monitoring-exporter.py /:/u01/oracle/wls-exporter-deploy +$ kubectl exec -it -n -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/deploy-weblogic-monitoring-exporter.py \ +-domainName -adminServerName -adminURL \ +-soaClusterName -wlsMonitoringExporterTosoaCluster \ +-osbClusterName -wlsMonitoringExporterToosbCluster \ +-username -password ``` -#### Deploy the WebLogic Monitoring Exporter into the OracleSOASuite domain - -Follow these steps to copy and deploy the WebLogic Monitoring Exporter WAR files into the OracleSOASuite Domain. Replace the with appropriate value: +Example for `soaosb` domainType: ``` $ cd ${WORKDIR}/monitoring-service/scripts $ kubectl cp wls-exporter-deploy soans/soainfra-adminserver:/u01/oracle $ kubectl cp deploy-weblogic-monitoring-exporter.py soans/soainfra-adminserver:/u01/oracle/wls-exporter-deploy -$ kubectl exec -it -n soans soainfra-adminserver -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/deploy-weblogic-monitoring-exporter.py -domainType +$ kubectl exec -it -n soans soainfra-adminserver -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/deploy-weblogic-monitoring-exporter.py \ +-domainName soainfra -adminServerName AdminServer -adminURL soainfra-adminserver:7001 \ +-soaClusterName soa_cluster -wlsMonitoringExporterTosoaCluster true \ +-osbClusterName osb_cluster -wlsMonitoringExporterToosbCluster true \ +-username weblogic -password Welcome1 ``` #### Configure Prometheus Operator @@ -110,16 +82,19 @@ Prometheus enables you to collect metrics from the WebLogic Monitoring Exporter. The service monitor deployment YAML configuration file is available at `${WORKDIR}/monitoring-service/manifests/wls-exporter-ServiceMonitor.yaml.template`. Copy the file as `wls-exporter-ServiceMonitor.yaml` to update with appropraite values as detailed below. -The exporting of metrics from `wls-exporter` requires `basicAuth`, so a Kubernetes `Secret` is created with the user name and password that are base64 encoded. This `Secret` is used in the `ServiceMonitor` deployment. The `wls-exporter-ServiceMonitor.yaml` has `basicAuth` with credentials as username: weblogic and password: Welcome1 in base64 encoded. +The exporting of metrics from `wls-exporter` requires `basicAuth`, so a Kubernetes `Secret` is created with the user name and password that are base64 encoded. This `Secret` is used in the `ServiceMonitor` deployment. The `wls-exporter-ServiceMonitor.yaml` has namespace as `soans` and has `basicAuth` with credentials as `username: %USERNAME%` and `password: %PASSWORD%`. Update `%USERNAME%` and `%PASSWORD% ` in base64 encoded and all occurences of `soans` based on your environment. -If you are using a different credentials, then update `wls-exporter-ServiceMonitor.yaml` for `basicAuth` with required details.Use the following example for base64 encoded: +Use the following example for base64 encoded: ``` $ echo -n "Welcome1" | base64 V2VsY29tZTE= ``` + You need to add `RoleBinding` and `Role` for the namespace (soans) under which the WebLogic Servers pods are running in the Kubernetes cluster. These are required for Prometheus to access the endpoints provided by the WebLogic Monitoring Exporters. The YAML configuration files for soans namespace are provided in "${WORKDIR}/monitoring-service/manifests/". +If you are using namespace other than `soans`, update the namespace details in `prometheus-roleBinding-domain-namespace.yaml` and `prometheus-roleSpecific-domain-namespace.yaml`. + Perform the below steps for enabling Prometheus to collect the metrics from the WebLogic Monitoring Exporter: ``` @@ -144,11 +119,118 @@ You can access the Grafana dashboard at `http://mycompany.com:32100/`. 1. Log in to Grafana dashboard with username: admin and password: admin`. -1. Navigate to + (Create) -> Import -> Upload the `weblogic-server-dashboard.json` file (provided at `${WORKDIR}/monitoring-service/config/weblogic-server-dashboard.json`). +1. Navigate to + (Create) -> Import -> Upload the `weblogic-server-dashboard-import.json` file (provided at `${WORKDIR}/monitoring-service/config/weblogic-server-dashboard-import.json`). + + +## Set up using `setup-monitoring.sh` + +Alternatively, you can run the helper script `setup-monitoring.sh` available at `${WORKDIR}/monitoring-service` to setup the monitoring for Oracle SOA Suite domain. + +This script creates kube-prometheus-stack(Prometheus, Grafana and Alertmanager), WebLogic Monitoring Exporter and imports `weblogic-server-dashboard.json` into Grafana for WebLogic Server Dashboard. + +### Prepare to use the setup monitoring script +The sample scripts for setup monitoring for Oracle SOA Suite domain are available at `${WORKDIR}/monitoring-service`. -### Setup Prometheus, Grafana and WebLogic Monitoring Exporter using `setup-monitoring.sh` +You must edit `monitoring-inputs.yaml`(or a copy of it) to provide the details of your domain. Refer to the configuration parameters below to understand the information that you must provide in this file. -Alternatively, you can run the helper script `setup-monitoring.sh` available at `${WORKDIR}/monitoring-service` to setup the monitoring for OracleSOASuite domain. For usage details execute `./setup-monitoring.sh -h`. +#### Configuration parameters -Sample `delete-monitoring.sh` is available at `${WORKDIR}/monitoring-service`, to uninstall the Prometheus, Grafana and WebLogic Monitoring Exporter. For usage details execute `./delete-monitoring.sh -h`. +The following parameters can be provided in the inputs file. + +| Parameter | Description | Default | +| --- | --- | --- | +| `domainUID` | domainUID of the Oracle SOA Suite domain. | `soainfra` | +| `domainNamespace` | Kubernetes namespace of the Oracle SOA Suite domain. | `soans` | +| `setupKubePrometheusStack` | Boolean value indicating whether kube-prometheus-stack (Prometheus, Grafana and Alertmanager) to be installed | `true` | +| `additionalParamForKubePrometheusStack` | The script install's kube-prometheus-stack with `service.type` as NodePort and values for `service.nodePort` as per the parameters defined in `monitoring-inputs.yaml`. Use `additionalParamForKubePrometheusStack` parameter to further configure with additional parameters as per [values.yaml](https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/values.yaml). Sample value to disable NodeExporter, Prometheus-Operator TLS support and Admission webhook support for PrometheusRules resources is `--set nodeExporter.enabled=false --set prometheusOperator.tls.enabled=false --set prometheusOperator.admissionWebhooks.enabled=false`| | +| `monitoringNamespace` | Kubernetes namespace for monitoring setup. | `monitoring` | +| `adminServerName` | Name of the Administration Server. | `AdminServer` | +| `adminServerPort` | Port number for the Administration Server inside the Kubernetes cluster. | `7001` | +| `soaClusterName` | Name of the soaCluster. | `soa_cluster` | +| `soaManagedServerPort` | Port number of the managed servers in the soaCluster. | `8001` | +| `wlsMonitoringExporterTosoaCluster` | Boolean value indicating whether to deploy WebLogic Monitoring Exporter to soaCluster. | `false` | +| `osbClusterName` | Name of the osbCluster. | `osb_cluster` | +| `osbManagedServerPort` | Port number of the managed servers in the osbCluster. | `9001` | +| `wlsMonitoringExporterToosbCluster` | Boolean value indicating whether to deploy WebLogic Monitoring Exporter to osbCluster. | `false` | +| `exposeMonitoringNodePort` | Boolean value indicating if the Monitoring Services (Prometheus, Grafana and Alertmanager) is exposed outside of the Kubernetes cluster. | `false` | +| `prometheusNodePort` | Port number of the Prometheus outside the Kubernetes cluster. | `32101` | +| `grafanaNodePort` | Port number of the Grafana outside the Kubernetes cluster. | `32100` | +| `alertmanagerNodePort` | Port number of the Alertmanager outside the Kubernetes cluster. | `32102` | +| `weblogicCredentialsSecretName` | Name of the Kubernetes secret which has Administration Server’s user name and password. | `soainfra-domain-credentials` | + +Note that the values specified in the `monitoring-inputs.yaml` file will be used to install kube-prometheus-stack (Prometheus, Grafana and Alertmanager) and deploying WebLogic Monitoring Exporter into the Oracle SOA Suite domain. Hence make the domain specific values to be same as that used during domain creation. + +### Run the setup monitoring script + +Update the values in `monitoring-inputs.yaml` as per your requirement and run the `setup-monitoring.sh` script, specifying your inputs file: + +```bash +$ cd ${WORKDIR}/monitoring-service +$ ./setup-monitoring.sh \ + -i monitoring-inputs.yaml +``` +The script will perform the following steps: + +- Helm install `prometheus-community/kube-prometheus-stack` of version "16.5.0" if `setupKubePrometheusStack` is set to `true`. +- Deploys WebLogic Monitoring Exporter to Administration Server. +- Deploys WebLogic Monitoring Exporter to `soaCluster` if `wlsMonitoringExporterTosoaCluster` is set to `true`. +- Deploys WebLogic Monitoring Exporter to `osbCluster` if `wlsMonitoringExporterToosbCluster` is set to `true`. +- Exposes the Monitoring Services (Prometheus at `32101`, Grafana at `32100` and Alertmanager at `32102`) outside of the Kubernetes cluster if `exposeMonitoringNodePort` is set to `true`. +- Imports the WebLogic Server Grafana Dashboard if `setupKubePrometheusStack` is set to `true`. + +### Verify the results +The setup monitoring script will report failure if there was any error. However, verify that required resources were created by the script. + +#### Verify the kube-prometheus-stack + +To confirm that `prometheus-community/kube-prometheus-stack` was installed when `setupKubePrometheusStack` is set to `true`, run the following command: + +```bash +$ helm ls -n +``` +Replace with value for Kubernetes namespace used for monitoring. + +Sample output: +```bash +$ helm ls -n monitoring +NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION +monitoring monitoring 1 2021-06-18 12:58:35.177221969 +0000 UTC deployed kube-prometheus-stack-16.5.0 0.48.0 +$ +``` + +#### Verify the Prometheus, Grafana and Alertmanager setup + +When `exposeMonitoringNodePort` was set to `true`, verify that monitoring services are accessible outside of the Kubernetes cluster: + +- `32100` is the external port for Grafana and with credentials `admin:admin` +- `32101` is the external port for Prometheus +- `32102` is the external port for Alertmanager + +#### Verify the service discovery of WebLogic Monitoring Exporter + +Verify whether prometheus is able to discover wls-exporter and collect the metrics: + +1. Access the Prometheus dashboard at http://mycompany.com:32101/ + +1. Navigate to Status to see the Service Discovery details. + +1. Verify that wls-exporter is listed in the discovered services. + +#### Verify the WebLogic Server dashoard + +You can access the Grafana dashboard at http://mycompany.com:32100/. + +1. Log in to Grafana dashboard with username: `admin` and password: `admin`. + +1. Navigate to "WebLogic Server Dashboard" under General and verify. + +### Delete the monitoring setup + +To delete the monitoring setup created by [Run the setup monitoring script](#run-the-setup-monitoring-script), run the below command: + +```bash +$ cd ${WORKDIR}/monitoring-service +$ ./delete-monitoring.sh \ + -i monitoring-inputs.yaml +``` diff --git a/OracleSOASuite/kubernetes/monitoring-service/config/weblogic-server-dashboard-import.json b/OracleSOASuite/kubernetes/monitoring-service/config/weblogic-server-dashboard-import.json new file mode 100644 index 000000000..82d2cd26c --- /dev/null +++ b/OracleSOASuite/kubernetes/monitoring-service/config/weblogic-server-dashboard-import.json @@ -0,0 +1,3312 @@ +{ + "__inputs": [], + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "5.2.4" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph", + "version": "5.0.0" + }, + { + "type": "panel", + "id": "singlestat", + "name": "Singlestat", + "version": "5.0.0" + }, + { + "type": "panel", + "id": "table", + "name": "Table", + "version": "5.0.0" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": null, + "iteration": 1591295215114, + "links": [], + "panels": [ + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 32, + "panels": [], + "title": "Servers", + "type": "row" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "decimals": 0, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 13, + "x": 0, + "y": 1 + }, + "hideTimeOverride": true, + "id": 16, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "count(count (wls_jvm_uptime{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\"}) by (name))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "timeFrom": null, + "timeShift": null, + "title": "Running Servers", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 11, + "x": 13, + "y": 1 + }, + "id": 23, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "count(count(wls_webapp_config_deployment_state{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\"}) by (app))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "timeFrom": null, + "timeShift": null, + "title": "Deployed Applications", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "decimals": 1, + "description": "", + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 6, + "x": 0, + "y": 4 + }, + "hideTimeOverride": true, + "id": 104, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "repeat": "serverName", + "repeatDirection": "v", + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "weblogic_serverName", + "targets": [ + { + "expr": "wls_server_activation_time{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\",weblogic_serverName=\"$serverName\"}", + "format": "table", + "hide": false, + "instant": true, + "interval": "10s", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "50,80", + "timeFrom": null, + "timeShift": null, + "title": "Server Name", + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "", + "value": "" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "#56A64B", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 6, + "y": 4 + }, + "id": 84, + "interval": "", + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "repeat": "serverName", + "repeatDirection": "v", + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "wls_server_state_val{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=\"$serverName\"}", + "format": "time_series", + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": "", + "timeFrom": null, + "timeShift": null, + "title": "Server Status", + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "SHUTDOWN", + "value": "0" + }, + { + "op": "=", + "text": "STARTING", + "value": "1" + }, + { + "op": "=", + "text": "RUNNING", + "value": "2" + }, + { + "op": "=", + "text": "STANDBY", + "value": "3" + }, + { + "op": "=", + "text": "FAILED", + "value": "8" + }, + { + "op": "=", + "text": "FAILED", + "value": "17" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "decimals": 1, + "description": "", + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 10, + "y": 4 + }, + "hideTimeOverride": true, + "id": 27, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "repeat": "serverName", + "repeatDirection": "v", + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "instance", + "targets": [ + { + "expr": "100 - wls_jvm_heap_free_percent{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=\"$serverName\"}", + "format": "time_series", + "hide": false, + "instant": true, + "interval": "10s", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "50,80", + "timeFrom": null, + "timeShift": null, + "title": "Heap Usage", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "", + "value": "" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorPostfix": false, + "colorValue": true, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "decimals": 1, + "description": "", + "format": "ms", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 5, + "x": 14, + "y": 4 + }, + "hideTimeOverride": true, + "id": 91, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "repeat": "serverName", + "repeatDirection": "v", + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "instance", + "targets": [ + { + "expr": "wls_jvm_uptime{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=\"$serverName\"}", + "format": "time_series", + "hide": false, + "instant": true, + "interval": "10s", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "50,80", + "timeFrom": null, + "timeShift": null, + "title": "Running Time", + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "", + "value": "" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorPostfix": false, + "colorValue": true, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "decimals": 0, + "description": "", + "format": "short", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 5, + "x": 19, + "y": 4 + }, + "hideTimeOverride": true, + "id": 96, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "repeat": "serverName", + "repeatDirection": "v", + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "instance", + "targets": [ + { + "expr": "wls_server_open_sockets_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=\"$serverName\"}", + "format": "time_series", + "hide": false, + "instant": true, + "interval": "10s", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "50,80", + "timeFrom": null, + "timeShift": null, + "title": "Open Sockets", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "", + "value": "" + } + ], + "valueName": "current" + }, + { + "aliasColors": { + " heap free managed-server-1": "super-light-green", + " heap free managed-server-2": "dark-green", + "heap size managed-server-1 ": "super-light-red", + "heap size managed-server-2 ": "dark-red" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 10, + "w": 24, + "x": 0, + "y": 7 + }, + "id": 12, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "wls_jvm_heap_free_current{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": " Heap Free ()", + "refId": "B" + }, + { + "expr": "wls_jvm_heap_size_current{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", + "format": "time_series", + "instant": false, + "intervalFactor": 1, + "legendFormat": "Heap Size ()", + "refId": "A" + }, + { + "expr": "wls_jvm_heap_size_max{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", + "format": "time_series", + "hide": true, + "intervalFactor": 1, + "legendFormat": "Heap Max ()", + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "JVM Heap", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "decbytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + " heap free managed-server-1": "super-light-green", + " heap free managed-server-2": "dark-green", + "heap size managed-server-1 ": "super-light-red", + "heap size managed-server-2 ": "dark-red" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 17 + }, + "id": 21, + "legend": { + "alignAsTable": false, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "wls_jvm_process_cpu_load{weblogic_domainUID=~\"$domainName\", weblogic_clusterName=~\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"} * 100", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": " ", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "CPU Load", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percent", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 17 + }, + "id": 10, + "legend": { + "alignAsTable": false, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "wls_threadpool_execute_thread_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Total Threads ()", + "refId": "A" + }, + { + "expr": "wls_threadpool_stuck_thread_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Stuck Threads ()", + "refId": "D" + }, + { + "expr": "wls_threadpool_queue_length{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", + "format": "time_series", + "hide": true, + "intervalFactor": 1, + "legendFormat": "queue", + "refId": "C" + }, + { + "expr": "wls_threadpool_hogging_thread_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", + "format": "time_series", + "hide": true, + "intervalFactor": 1, + "legendFormat": "hogging", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Thread Pool", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 24 + }, + "id": 35, + "panels": [ + { + "columns": [], + "datasource": "$datasource", + "fontSize": "100%", + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 28 + }, + "hideTimeOverride": true, + "id": 126, + "links": [], + "pageSize": null, + "scroll": true, + "showHeader": true, + "sort": { + "col": 13, + "desc": true + }, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Webapp", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "app", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "Total Sessions", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "hidden", + "unit": "short" + } + ], + "targets": [ + { + "expr": "topk($topN,sum(wls_webapp_config_sessions_opened_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (app))", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "B" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Total Sessions (top $topN)", + "transform": "table", + "type": "table" + }, + { + "columns": [], + "datasource": "$datasource", + "fontSize": "100%", + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 28 + }, + "hideTimeOverride": true, + "id": 136, + "links": [], + "pageSize": null, + "scroll": true, + "showHeader": true, + "sort": { + "col": 13, + "desc": true + }, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Webapp", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "app", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "Total Requests", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "hidden", + "unit": "short" + } + ], + "targets": [ + { + "expr": "topk($topN,sum(wls_servlet_invocation_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (app))", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "B" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Total Requests (top $topN)", + "transform": "table", + "type": "table" + }, + { + "columns": [], + "datasource": "$datasource", + "fontSize": "100%", + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 28 + }, + "hideTimeOverride": true, + "id": 134, + "links": [], + "pageSize": null, + "scroll": true, + "showHeader": true, + "sort": { + "col": 13, + "desc": true + }, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Webapp", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "app", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "Total Time", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value", + "thresholds": [], + "type": "number", + "unit": "ms" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "hidden", + "unit": "short" + } + ], + "targets": [ + { + "expr": "topk($topN,sum(wls_servlet_execution_time_total{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (app))", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "B" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Total Execution Time (top $topN)", + "transform": "table", + "type": "table" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 35 + }, + "id": 14, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(wls_webapp_config_open_sessions_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (app)", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + }, + { + "expr": "", + "format": "time_series", + "intervalFactor": 1, + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Current Sessions ", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 35 + }, + "id": 128, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": false + }, + "lines": false, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": " sum(irate(wls_webapp_config_sessions_opened_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])) by (app)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Session Rate ", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": "per second", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 43 + }, + "id": 132, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "(sum(wls_servlet_execution_time_average{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (app)) / (count(wls_servlet_execution_time_average{weblogic_domainUID=\"domain1\", weblogic_clusterName=\"cluster-1\"}) by (app))", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Execution Time per Request ", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "ms", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 43 + }, + "id": 138, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": false + }, + "lines": false, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(wls_servlet_invocation_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])) by (app)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Request Rate ", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": "per second", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "title": "Web Applications", + "type": "row" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 25 + }, + "id": 43, + "panels": [ + { + "columns": [], + "datasource": "$datasource", + "fontSize": "100%", + "gridPos": { + "h": 4, + "w": 24, + "x": 0, + "y": 29 + }, + "hideTimeOverride": true, + "id": 111, + "links": [], + "pageSize": null, + "scroll": true, + "showHeader": true, + "sort": { + "col": 0, + "desc": true + }, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Server", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "weblogic_serverName", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "Name", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "name", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "Active Connections", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #A", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Current Capacity", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #C", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Total Connections", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #D", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Total Connections", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #D", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "hidden", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sum(wls_datasource_curr_capacity{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (weblogic_serverName,name)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "C" + }, + { + "expr": "sum(wls_datasource_active_connections_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (weblogic_serverName,name)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "A" + }, + { + "expr": "sum(wls_datasource_connections_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (weblogic_serverName,name)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "D" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Overview", + "transform": "table", + "type": "table" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 8, + "w": 13, + "x": 0, + "y": 33 + }, + "id": 50, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "wls_datasource_active_connections_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": " @ ", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Active Connections", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 8, + "w": 11, + "x": 13, + "y": 33 + }, + "id": 71, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "irate(wls_datasource_connections_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": " @ ", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Connection Rate", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": "per second", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 9, + "w": 11, + "x": 0, + "y": 41 + }, + "id": 46, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "wls_datasource_waiting_for_connection_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": " @ ", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Pending Connection Requests", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 9, + "w": 13, + "x": 11, + "y": 41 + }, + "id": 73, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "wls_datasource_connection_delay_time{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": " @ ", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Average Connection Delay Time", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "title": "Data Sources", + "type": "row" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 26 + }, + "id": 40, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 6, + "w": 12, + "x": 0, + "y": 30 + }, + "id": 145, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(wls_jmsruntime_connections_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (weblogic_serverName)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "JMS Connections", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 6, + "w": 12, + "x": 12, + "y": 30 + }, + "id": 147, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(wls_jmsruntime_connections_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])) by (weblogic_serverName)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "JMS Connection Rate", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "columns": [], + "datasource": "$datasource", + "fontSize": "100%", + "gridPos": { + "h": 4, + "w": 24, + "x": 0, + "y": 36 + }, + "hideTimeOverride": true, + "id": 113, + "links": [], + "pageSize": null, + "scroll": true, + "showHeader": true, + "sort": { + "col": 0, + "desc": true + }, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Name", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "jmsserver", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "Current Dests", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #B", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Total Msgs", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #A", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Total Bytes", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #D", + "thresholds": [], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Total Dests", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #E", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "hidden", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sum(wls_jms_destinations_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "B" + }, + { + "expr": "sum(wls_jms_messages_received_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "A" + }, + { + "expr": "sum(wls_jms_bytes_received_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "D" + }, + { + "expr": "sum(wls_jms_destinations_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "E" + }, + { + "expr": "sum(wls_jms_destinations_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "F" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "JMSServer Overview", + "transform": "table", + "type": "table" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 40 + }, + "id": 54, + "legend": { + "alignAsTable": false, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(wls_jms_messages_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Current ()", + "refId": "A" + }, + { + "expr": "sum(wls_jms_messages_pending_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Pending ()", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Messages", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 40 + }, + "id": 56, + "legend": { + "alignAsTable": false, + "avg": false, + "current": true, + "max": false, + "min": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(wls_jms_bytes_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Current ()", + "refId": "A" + }, + { + "expr": "sum(wls_jms_bytes_pending_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Pending ()", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Bytes", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 47 + }, + "id": 58, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(wls_jms_messages_received_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])) by (jmsserver)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Received Message Rate ", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 47 + }, + "id": 117, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(wls_jms_bytes_received_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])) by (jmsserver)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Received Byte Rate", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "columns": [], + "datasource": "$datasource", + "fontSize": "100%", + "gridPos": { + "h": 5, + "w": 24, + "x": 0, + "y": 54 + }, + "hideTimeOverride": true, + "id": 119, + "links": [], + "pageSize": null, + "scroll": true, + "showHeader": true, + "sort": { + "col": 3, + "desc": false + }, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Destination", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "destination", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "Current Consumers", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #A", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Current Msgs", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #B", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Pending Msgs", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #C", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Currrent Bytes", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #D", + "thresholds": [], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Pending Bytes", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #E", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Total Msgs", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #F", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Total Bytes", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #G", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "hidden", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sum(wls_jms_dest_consumers_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (destination)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "A" + }, + { + "expr": "sum(wls_jms_dest_messages_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (destination)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "B" + }, + { + "expr": "sum(wls_jms_dest_messages_pending_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (destination)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "C" + }, + { + "expr": "sum(wls_jms_dest_bytes_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (destination)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "D" + }, + { + "expr": "sum(wls_jms_dest_bytes_pending_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (destination)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "E" + }, + { + "expr": "sum(wls_jms_dest_messages_received_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (destination)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 1, + "refId": "F" + }, + { + "expr": "sum(wls_jms_dest_bytes_received_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (destination)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "G" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Destinations Overview", + "transform": "table", + "type": "table" + } + ], + "title": "JMS Services", + "type": "row" + } + ], + "refresh": false, + "schemaVersion": 16, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "current": { + "text": "prometheus", + "value": "prometheus" + }, + "hide": 0, + "label": null, + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": {}, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "Domain", + "multi": false, + "name": "domainName", + "options": [], + "query": "label_values(weblogic_domainUID)", + "refresh": 1, + "regex": "", + "sort": 2, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": {}, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "Cluster", + "multi": false, + "name": "clusterName", + "options": [], + "query": "label_values(wls_jvm_uptime{weblogic_domainUID=\"$domainName\"},weblogic_clusterName)", + "refresh": 1, + "regex": "", + "sort": 2, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": {}, + "datasource": "$datasource", + "hide": 0, + "includeAll": true, + "label": "Server", + "multi": true, + "name": "serverName", + "options": [], + "query": "label_values(wls_jvm_uptime{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\"},weblogic_serverName)", + "refresh": 1, + "regex": "", + "sort": 2, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "selected": false, + "tags": [], + "text": "5", + "value": "5" + }, + "hide": 0, + "includeAll": false, + "label": "Top N", + "multi": false, + "name": "topN", + "options": [ + { + "selected": false, + "text": "3", + "value": "3" + }, + { + "selected": true, + "text": "5", + "value": "5" + }, + { + "selected": false, + "text": "7", + "value": "7" + }, + { + "selected": false, + "text": "10", + "value": "10" + } + ], + "query": "3, 5, 7, 10", + "skipUrlSync": false, + "type": "custom" + } + ] + }, + "time": { + "from": "now-30m", + "to": "now" + }, + "timepicker": { + "hidden": false, + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "WebLogic Server Dashboard", + "uid": "5yUwzbZWz", + "version": 1 +} diff --git a/OracleSOASuite/kubernetes/monitoring-service/delete-monitoring.sh b/OracleSOASuite/kubernetes/monitoring-service/delete-monitoring.sh index 331692991..b676e9b40 100755 --- a/OracleSOASuite/kubernetes/monitoring-service/delete-monitoring.sh +++ b/OracleSOASuite/kubernetes/monitoring-service/delete-monitoring.sh @@ -10,37 +10,66 @@ scriptDir="$( cd "$( dirname "${script}" )" && pwd )" OLD_PWD=`pwd` +# +## Function to exit and print an error message +## $1 - text of message +function fail { + printError $* + exit 1 +} + +# Function to print an error message +function removeFileIfExists { + echo "input is $1" + if [ -f $1 ]; then + rm -f $1 + fi +} + +function exitIfError { + if [ "$1" != "0" ]; then + echo "$2" + exit $1 + fi +} +# +# Function to parse a yaml file and generate the bash exports +# $1 - Input filename +# $2 - Output filename +function parseYaml { + local s='[[:space:]]*' w='[a-zA-Z0-9_]*' fs=$(echo @|tr @ '\034') + sed -ne "s|^\($s\):|\1|" \ + -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \ + -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $1 | + awk -F$fs '{ + if (length($3) > 0) { + # javaOptions may contain tokens that are not allowed in export command + # we need to handle it differently. + if ($2=="javaOptions") { + printf("%s=%s\n", $2, $3); + } else { + printf("export %s=\"%s\"\n", $2, $3); + } + } + }' > $2 +} + function usage { - echo "usage: ${script} -t -n -d -u -p -k [-h]" - echo " -t Domain Type. (required)" - echo " -n Domain namespace (optional)" - echo " (default: soans)" - echo " -d domainUID of Domain. (optional)" - echo " (default: soainfra)" - echo " -u username. (optional)" - echo " (default: weblogic)" - echo " -p password. {optional)" - echo " (default: Welcome1)" - echo " -k Delete kubeprometheus yes/no. (optional)" - echo " (default: no)" + echo usage: ${script} -i file [-v] [-h] + echo " -i Parameter inputs file, must be specified." echo " -h Help" exit $1 } -while getopts ":h:t:n:d:u:p:k:" opt; do +function deleteKubePrometheusStack { + helm delete ${monitoringNamespace} --namespace ${monitoringNamespace} +} + +#Parse the inputs +while getopts "hi:" opt; do case $opt in - t) domainType="${OPTARG}" - ;; - n) namespace="${OPTARG}" - ;; - d) domainUID="${OPTARG}" - ;; - u) username="${OPTARG}" - ;; - p) password="${OPTARG}" - ;; - k) kubeprometheus=`echo "${OPTARG}" | tr "[:upper:]" "[:lower:]"` + i) valuesInputFile="${OPTARG}" ;; h) usage 0 ;; @@ -49,54 +78,44 @@ while getopts ":h:t:n:d:u:p:k:" opt; do esac done -if [ -z ${domainType} ]; then - echo "${script}: -t must be specified." - usage 1 -fi - -if [ -z ${namespace} ]; then - namespace="soans" -fi - -if [ -z ${domainUID} ]; then - domainUID="soainfra" +if [ -z ${valuesInputFile} ]; then + echo "${script}: -i must be specified." + missingRequiredOption="true" fi -if [ -z ${username} ]; then - username="weblogic" -fi - -if [ -z ${password} ]; then - password="Welcome1" +if [ "${missingRequiredOption}" == "true" ]; then + usage 1 fi -if [ -z ${kubeprometheus} ]; then - kubeprometheus="no" +if [ ! -f ${valuesInputFile} ]; then + echo "Unable to locate the input parameters file ${valuesInputFile}" + fail 'The error listed above must be resolved before the script can continue' fi +exportValuesFile=$(mktemp /tmp/export-values-XXXXXXXXX.sh) +parseYaml ${valuesInputFile} ${exportValuesFile} -adminServerName="AdminServer" -adminServerPort="7001" -function deletePrometheusGrafana { - - cd ${scriptDir}/kube-prometheus-0.5.0 - kubectl delete --ignore-not-found=true -f manifests/ - kubectl delete --ignore-not-found=true -f manifests/setup/ -} +source ${exportValuesFile} +rm ${exportValuesFile} # Setting up the WebLogic Monitoring Exporter echo "Undeploy WebLogic Monitoring Exporter started" -kubectl delete --ignore-not-found=true -f ${scriptDir}/manifests/ +serviceMonitor=${scriptDir}/manifests/wls-exporter-ServiceMonitor.yaml +kubectl delete --ignore-not-found=true -f ${serviceMonitor} script=${scriptDir}/scripts/undeploy-weblogic-monitoring-exporter.sh -sh ${script} ${domainType} ${namespace} ${domainUID} ${adminServerName} ${adminServerPort} ${username} ${password} - -echo "Undeploy WebLogic Monitoring Exporter completed" +sh ${script} +if [ "$?" != "0" ]; then + echo "ERROR: $script failed." + echo "Undeploy WebLogic Monitoring Exporter completed with errors. Review the logs and rerun" +else + echo "Undeploy WebLogic Monitoring Exporter completed." +fi -if [ "${kubeprometheus}" = "yes" ]; then +if [ "${setupKubePrometheusStack}" = "true" ]; then echo "Deleting Prometheus and grafana started" - deletePrometheusGrafana + deleteKubePrometheusStack echo "Deleting Prometheus and grafana completed" fi cd $OLD_PWD diff --git a/OracleSOASuite/kubernetes/monitoring-service/manifests/wls-exporter-ServiceMonitor.yaml.template b/OracleSOASuite/kubernetes/monitoring-service/manifests/wls-exporter-ServiceMonitor.yaml.template index 9fdd55b21..7fe1972d1 100755 --- a/OracleSOASuite/kubernetes/monitoring-service/manifests/wls-exporter-ServiceMonitor.yaml.template +++ b/OracleSOASuite/kubernetes/monitoring-service/manifests/wls-exporter-ServiceMonitor.yaml.template @@ -5,19 +5,20 @@ apiVersion: v1 kind: Secret metadata: name: basic-auth - namespace: monitoring + namespace: soans data: - password: V2VsY29tZTE= - user: d2VibG9naWM= + password: %PASSWORD% + user: %USERNAME% type: Opaque --- apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: name: wls-exporter - namespace: monitoring + namespace: soans labels: k8s-app: wls-exporter + release: monitoring spec: namespaceSelector: matchNames: diff --git a/OracleSOASuite/kubernetes/monitoring-service/monitoring-inputs.yaml b/OracleSOASuite/kubernetes/monitoring-service/monitoring-inputs.yaml new file mode 100755 index 000000000..5e16b9957 --- /dev/null +++ b/OracleSOASuite/kubernetes/monitoring-service/monitoring-inputs.yaml @@ -0,0 +1,64 @@ +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# The version of this inputs file. Do not modify. +version: create-soainfra-monitoring-inputs-v1 + +# Unique ID identifying your domain. +# This ID must not contain an underscope ("_"), and must be lowercase and unique across all domains in a Kubernetes cluster. +domainUID: soainfra + +# Name of the domain namespace +domainNamespace: soans + +# Boolean value indicating whether to install kube-prometheus-stack +setupKubePrometheusStack: true + +# Additional parameters for helm install kube-prometheus-stack +# Refer https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/values.yaml for additional parameters +# Sample : +# additionalParamForKubePrometheusStack: --set nodeExporter.enabled=false --set prometheusOperator.tls.enabled=false --set prometheusOperator.admissionWebhooks.enabled=false +additionalParamForKubePrometheusStack: + +# Name of the monitoring namespace +monitoringNamespace: monitoring + +# Name of the Admin Server +adminServerName: AdminServer +# +# Port number for admin server +adminServerPort: 7001 + +# Cluster name +soaClusterName: soa_cluster + +# Port number for managed server +soaManagedServerPort: 8001 + +# Boolean value indicating whether to deploy WebLogic Monitoring Exporter to soaCluster +wlsMonitoringExporterTosoaCluster: false + +# Cluster name +osbClusterName: osb_cluster + +# Port number for managed server +osbManagedServerPort: 9001 + +# Boolean value indicating whether to deploy WebLogic Monitoring Exporter to osbCluster +wlsMonitoringExporterToosbCluster: false + +# Boolean to indicate if the Monitoring Services will be exposed +exposeMonitoringNodePort: false + +# NodePort to expose Prometheus +prometheusNodePort: 32101 + +# NodePort to expose Grafana +grafanaNodePort: 32100 + +# NodePort to expose Alertmanager +alertmanagerNodePort: 32102 + +# Name of the Kubernetes secret for the Admin Server's username and password +weblogicCredentialsSecretName: soainfra-domain-credentials + diff --git a/OracleSOASuite/kubernetes/monitoring-service/scripts/deploy-weblogic-monitoring-exporter.py b/OracleSOASuite/kubernetes/monitoring-service/scripts/deploy-weblogic-monitoring-exporter.py index 7fe0e0f8e..880bf5f0d 100755 --- a/OracleSOASuite/kubernetes/monitoring-service/scripts/deploy-weblogic-monitoring-exporter.py +++ b/OracleSOASuite/kubernetes/monitoring-service/scripts/deploy-weblogic-monitoring-exporter.py @@ -19,28 +19,40 @@ def newDeploy(appName,target): #======================================================== def usage(): - print sys.arg[0] + '-domainName -domainType -adminServerName -adminURL -username -password ' + argsList = ' -domainName -adminServerName -adminURL -username -password ' + argsList=argsList + ' -soaClusterName ' + ' -wlsMonitoringExporterTosoaCluster ' + argsList=argsList + ' -osbClusterName ' + ' -wlsMonitoringExporterToosbCluster ' + print sys.argv[0] + argsList sys.exit(0) if len(sys.argv) < 1: usage() -#domainName will be passed by command line parameter -domainName. +# domainName will be passed by command line parameter -domainName domainName = "soainfra" -#domaintype will be passed by command line parameter -domaintype -domaintype = "soa" - -#adminServerName will be passed by command line parameter -adminServerName +# adminServerName will be passed by command line parameter -adminServerName adminServerName = "AdminServer" -#adminURL will be passed by command line parameter -adminURL +# adminURL will be passed by command line parameter -adminURL adminURL = "soainfra-adminserver:7001" -#username will be passed by command line parameter -username +# soaClusterName will be passed by command line parameter -soaClusterName +soaClusterName = "soaClusterName" + +# wlsMonitoringExporterTosoaCluster will be passed by command line parameter -wlsMonitoringExporterTosoaCluster +wlsMonitoringExporterTosoaCluster = "false" + +# osbClusterName will be passed by command line parameter -osbClusterName +osbClusterName = "osbClusterName" + +# wlsMonitoringExporterToosbCluster will be passed by command line parameter -wlsMonitoringExporterToosbCluster +wlsMonitoringExporterToosbCluster = "false" + +# username will be passed by command line parameter -username username = "weblogic" -#password will be passed by command line parameter -password +# password will be passed by command line parameter -password password = "Welcome1" i=1 @@ -48,9 +60,6 @@ def usage(): if sys.argv[i] == '-domainName': domainName = sys.argv[i+1] i += 2 - elif sys.argv[i] == '-domainType': - domaintype = sys.argv[i+1] - i += 2 elif sys.argv[i] == '-adminServerName': adminServerName = sys.argv[i+1] i += 2 @@ -63,6 +72,18 @@ def usage(): elif sys.argv[i] == '-password': password = sys.argv[i+1] i += 2 + elif sys.argv[i] == '-soaClusterName': + soaClusterName = sys.argv[i+1] + i += 2 + elif sys.argv[i] == '-wlsMonitoringExporterTosoaCluster': + wlsMonitoringExporterTosoaCluster = sys.argv[i+1] + i += 2 + elif sys.argv[i] == '-osbClusterName': + osbClusterName = sys.argv[i+1] + i += 2 + elif sys.argv[i] == '-wlsMonitoringExporterToosbCluster': + wlsMonitoringExporterToosbCluster = sys.argv[i+1] + i += 2 else: print 'Unexpected argument switch at position ' + str(i) + ': ' + str(sys.argv[i]) usage() @@ -72,11 +93,11 @@ def usage(): connect(username, password, 't3://' + adminURL) cd('AppDeployments') newDeploy('wls-exporter-adminserver',adminServerName) -if 'soa' in domaintype: - newDeploy('wls-exporter-soa','soa_cluster') +if 'true' == wlsMonitoringExporterTosoaCluster: + newDeploy('wls-exporter-soa',soaClusterName) -if 'osb' in domaintype: - newDeploy('wls-exporter-osb','osb_cluster') +if 'true' == wlsMonitoringExporterToosbCluster: + newDeploy('wls-exporter-osb',osbClusterName) disconnect() exit() diff --git a/OracleSOASuite/kubernetes/monitoring-service/scripts/deploy-weblogic-monitoring-exporter.sh b/OracleSOASuite/kubernetes/monitoring-service/scripts/deploy-weblogic-monitoring-exporter.sh index 941d449be..684521d87 100755 --- a/OracleSOASuite/kubernetes/monitoring-service/scripts/deploy-weblogic-monitoring-exporter.sh +++ b/OracleSOASuite/kubernetes/monitoring-service/scripts/deploy-weblogic-monitoring-exporter.sh @@ -6,15 +6,10 @@ script="${BASH_SOURCE[0]}" scriptDir="$( cd "$( dirname "${script}" )" && pwd )" warDir=$PWD +source ${scriptDir}/utils.sh -domainType=${1:-soa} -namespace=${2:-soans} -domainUID=${3:-soainfra} -adminServerName=${4:-AdminServer} -adminServerPort=${5:-7001} -username=${6:-weblogic} -password=${7:Welcome1} - +# Setting default values +initialize # Function to lowercase a value and make it a legal DNS1123 name # $1 - value to convert to lowercase function toDNS1123Legal { @@ -23,13 +18,22 @@ function toDNS1123Legal { echo "$val" } +# username and password from Kubernetes secret +username=`kubectl get secrets ${weblogicCredentialsSecretName} -n ${domainNamespace} -o=jsonpath='{.data.username}'|base64 --decode` +password=`kubectl get secrets ${weblogicCredentialsSecretName} -n ${domainNamespace} -o=jsonpath='{.data.password}'|base64 --decode` + + adminServerPodName="${domainUID}-$(toDNS1123Legal ${adminServerName})" -echo "Deploying WebLogic Monitoring Exporter with namespace[$namespace], domainUID[$domainUID], domainType[$domainType]" -. $scriptDir/get-wls-exporter.sh $domainType -kubectl cp $scriptDir/wls-exporter-deploy ${namespace}/${adminServerPodName}:/u01/oracle -kubectl cp $scriptDir/deploy-weblogic-monitoring-exporter.py ${namespace}/${adminServerPodName}:/u01/oracle/wls-exporter-deploy -EXEC_DEPLOY="kubectl exec -it -n ${namespace} ${adminServerPodName} -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/deploy-weblogic-monitoring-exporter.py -domainType ${domainType} -domainName ${domainUID} -adminServerName ${adminServerName} -adminURL ${adminServerPodName}:${adminServerPort} -username ${username} -password ${password}" +InputParameterList=" -domainName ${domainUID} -adminServerName ${adminServerName} -adminURL ${adminServerPodName}:${adminServerPort} -username ${username} -password ${password}" +InputParameterList="${InputParameterList} -soaClusterName ${soaClusterName} -wlsMonitoringExporterTosoaCluster ${wlsMonitoringExporterTosoaCluster}" +InputParameterList="${InputParameterList} -osbClusterName ${osbClusterName} -wlsMonitoringExporterToosbCluster ${wlsMonitoringExporterToosbCluster}" + +echo "Deploying WebLogic Monitoring Exporter with domainNamespace[$domainNamespace], domainUID[$domainUID], adminServerPodName[$adminServerPodName]" +. $scriptDir/get-wls-exporter.sh +kubectl cp $scriptDir/wls-exporter-deploy ${domainNamespace}/${adminServerPodName}:/u01/oracle +kubectl cp $scriptDir/deploy-weblogic-monitoring-exporter.py ${domainNamespace}/${adminServerPodName}:/u01/oracle/wls-exporter-deploy +EXEC_DEPLOY="kubectl exec -it -n ${domainNamespace} ${adminServerPodName} -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/deploy-weblogic-monitoring-exporter.py ${InputParameterList}" eval ${EXEC_DEPLOY} diff --git a/OracleSOASuite/kubernetes/monitoring-service/scripts/get-wls-exporter.sh b/OracleSOASuite/kubernetes/monitoring-service/scripts/get-wls-exporter.sh index b958b1e99..b59bb42c3 100755 --- a/OracleSOASuite/kubernetes/monitoring-service/scripts/get-wls-exporter.sh +++ b/OracleSOASuite/kubernetes/monitoring-service/scripts/get-wls-exporter.sh @@ -5,7 +5,7 @@ # Initialize script="${BASH_SOURCE[0]}" scriptDir="$( cd "$( dirname "${script}" )" && pwd )" -domainType=$1 +source ${scriptDir}/utils.sh warDir=$scriptDir/../bin mkdir -p $warDir curl -L -o $warDir/wls-exporter.war https://github.com/oracle/weblogic-monitoring-exporter/releases/download/v2.0.0/wls-exporter.war @@ -32,19 +32,15 @@ function update_wls_exporter_war { popd } -if [[ -z ${domainType} ]]; -then - domainType="soa" -fi - +initialize -update_wls_exporter_war adminserver 7001 -if [[ $domainType =~ "soa" ]]; +update_wls_exporter_war adminserver ${adminServerPort} +if [[ ${wlsMonitoringExporterTosoaCluster} == "true" ]]; then - update_wls_exporter_war soa 8001 + update_wls_exporter_war soa ${soaManagedServerPort} fi -if [[ $domainType =~ "osb" ]]; +if [[ ${wlsMonitoringExporterToosbCluster} == "true" ]]; then - update_wls_exporter_war osb 9001 + update_wls_exporter_war osb ${osbManagedServerPort} fi diff --git a/OracleSOASuite/kubernetes/monitoring-service/scripts/undeploy-weblogic-monitoring-exporter.py b/OracleSOASuite/kubernetes/monitoring-service/scripts/undeploy-weblogic-monitoring-exporter.py index 46da9db97..51767c5d8 100755 --- a/OracleSOASuite/kubernetes/monitoring-service/scripts/undeploy-weblogic-monitoring-exporter.py +++ b/OracleSOASuite/kubernetes/monitoring-service/scripts/undeploy-weblogic-monitoring-exporter.py @@ -18,28 +18,40 @@ def unDeploy(appName,target): # Target you can change as per your need #======================================================== def usage(): - print sys.arg[0] + '-domainName -domainType -adminServerName -adminURL -username -password ' + argsList = ' -domainName -adminServerName -adminURL -username -password ' + argsList=argsList + ' -soaClusterName ' + ' -wlsMonitoringExporterTosoaCluster ' + argsList=argsList + ' -osbClusterName ' + ' -wlsMonitoringExporterToosbCluster ' + print sys.argv[0] + argsList sys.exit(0) if len(sys.argv) < 1: usage() -#domainName will be passed by command line parameter -domainName. +# domainName will be passed by command line parameter -domainName. domainName = "soainfra" -#domaintype will be passed by command line parameter -domaintype -domaintype = "soa" - # adminServerName will be passed by command line parameter -adminServerName adminServerName = "AdminServer" # adminURL will be passed by command line parameter -adminURL adminURL = "soainfra-adminserver:7001" -#username will be passed by command line parameter -username +# soaClusterName will be passed by command line parameter -soaClusterName +soaClusterName = "soaClusterName" + +# wlsMonitoringExporterTosoaCluster will be passed by command line parameter -wlsMonitoringExporterTosoaCluster +wlsMonitoringExporterTosoaCluster = "false" + +# osbClusterName will be passed by command line parameter -osbClusterName +osbClusterName = "osbClusterName" + +# wlsMonitoringExporterToosbCluster will be passed by command line parameter -wlsMonitoringExporterToosbCluster +wlsMonitoringExporterToosbCluster = "false" + +# username will be passed by command line parameter -username username = "weblogic" -#password will be passed by command line parameter -password +# password will be passed by command line parameter -password password = "Welcome1" @@ -48,9 +60,6 @@ def usage(): if sys.argv[i] == '-domainName': domainName = sys.argv[i+1] i += 2 - elif sys.argv[i] == '-domainType': - domaintype = sys.argv[i+1] - i += 2 elif sys.argv[i] == '-adminServerName': adminServerName = sys.argv[i+1] i += 2 @@ -63,20 +72,32 @@ def usage(): elif sys.argv[i] == '-password': password = sys.argv[i+1] i += 2 + elif sys.argv[i] == '-soaClusterName': + soaClusterName = sys.argv[i+1] + i += 2 + elif sys.argv[i] == '-wlsMonitoringExporterTosoaCluster': + wlsMonitoringExporterTosoaCluster = sys.argv[i+1] + i += 2 + elif sys.argv[i] == '-osbClusterName': + osbClusterName = sys.argv[i+1] + i += 2 + elif sys.argv[i] == '-wlsMonitoringExporterToosbCluster': + wlsMonitoringExporterToosbCluster = sys.argv[i+1] + i += 2 + else: print 'Unexpected argument switch at position ' + str(i) + ': ' + str(sys.argv[i]) usage() sys.exit(1) -#Undeploy +# Undeploy connect(username, password, 't3://' + adminURL) unDeploy('wls-exporter-adminserver',adminServerName) -if 'soa' in domaintype: - unDeploy('wls-exporter-soa','soa_cluster') +if 'true' == wlsMonitoringExporterTosoaCluster: + unDeploy('wls-exporter-soa',soaClusterName) -if 'osb' in domaintype: - unDeploy('wls-exporter-osb','osb_cluster') +if 'true' == wlsMonitoringExporterToosbCluster: + unDeploy('wls-exporter-osb',osbClusterName) disconnect() exit() - diff --git a/OracleSOASuite/kubernetes/monitoring-service/scripts/undeploy-weblogic-monitoring-exporter.sh b/OracleSOASuite/kubernetes/monitoring-service/scripts/undeploy-weblogic-monitoring-exporter.sh index 991c0c8d0..7bb45a42a 100755 --- a/OracleSOASuite/kubernetes/monitoring-service/scripts/undeploy-weblogic-monitoring-exporter.sh +++ b/OracleSOASuite/kubernetes/monitoring-service/scripts/undeploy-weblogic-monitoring-exporter.sh @@ -5,15 +5,7 @@ # Initialize script="${BASH_SOURCE[0]}" scriptDir="$( cd "$( dirname "${script}" )" && pwd )" - -domainType=${1:-soa} -namespace=${2:-soans} -domainUID=${3:-soainfra} -adminServerName=${4:-AdminServer} -adminServerPort=${5:-7001} -username=${6:-weblogic} -password=${7:Welcome1} - +source ${scriptDir}/utils.sh # Function to lowercase a value and make it a legal DNS1123 name # $1 - value to convert to lowercase @@ -23,16 +15,24 @@ function toDNS1123Legal { echo "$val" } +initialize + +# username and password from Kubernetes secret +username=`kubectl get secrets ${weblogicCredentialsSecretName} -n ${domainNamespace} -o=jsonpath='{.data.username}'|base64 --decode` +password=`kubectl get secrets ${weblogicCredentialsSecretName} -n ${domainNamespace} -o=jsonpath='{.data.password}'|base64 --decode` + adminServerPodName="${domainUID}-$(toDNS1123Legal ${adminServerName})" +InputParameterList="-domainName ${domainUID} -adminServerName ${adminServerName} -adminURL ${adminServerPodName}:${adminServerPort} -username ${username} -password ${password}" +InputParameterList="${InputParameterList} -soaClusterName ${soaClusterName} -wlsMonitoringExporterTosoaCluster ${wlsMonitoringExporterTosoaCluster}" +InputParameterList="${InputParameterList} -osbClusterName ${osbClusterName} -wlsMonitoringExporterToosbCluster ${wlsMonitoringExporterToosbCluster}" + # Copy weblogic monitoring exporter jars for deployment -echo "Undeploying WebLogic Monitoring Exporter: namespace[$namespace], domainUID[$domainUID], domainType[$domainType]" +echo "Undeploying WebLogic Monitoring Exporter: domainNamespace[$domainNamespace], domainUID[$domainUID], adminServerPodName[$adminServerPodName]" -kubectl cp $scriptDir/undeploy-weblogic-monitoring-exporter.py ${namespace}/${adminServerPodName}:/u01/oracle/undeploy-weblogic-monitoring-exporter.py -EXEC_UNDEPLOY="kubectl exec -it -n ${namespace} ${adminServerPodName} -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/undeploy-weblogic-monitoring-exporter.py -domainType ${domainType} -domainName ${domainUID} -adminServerName ${adminServerName} -adminURL ${adminServerPodName}:${adminServerPort} -username ${username} -password ${password}" +kubectl cp $scriptDir/undeploy-weblogic-monitoring-exporter.py ${domainNamespace}/${adminServerPodName}:/u01/oracle/undeploy-weblogic-monitoring-exporter.py +EXEC_UNDEPLOY="kubectl exec -it -n ${domainNamespace} ${adminServerPodName} -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/undeploy-weblogic-monitoring-exporter.py ${InputParameterList}" eval ${EXEC_UNDEPLOY} # Cleanup the local wars -rm -f ${scriptDir}/wls-exporter-deploy/* -rmdir -f ${scriptDir}/wls-exporter-deploy - +rm -rf ${scriptDir}/wls-exporter-deploy diff --git a/OracleSOASuite/kubernetes/monitoring-service/scripts/utils.sh b/OracleSOASuite/kubernetes/monitoring-service/scripts/utils.sh new file mode 100755 index 000000000..58e880ba1 --- /dev/null +++ b/OracleSOASuite/kubernetes/monitoring-service/scripts/utils.sh @@ -0,0 +1,61 @@ +#!/bin/bash +# Copyright (c) 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# + +function initialize { + if [ -z ${domainNamespace} ]; then + echo "domainNamespace is empty, setting to default soans" + domainNamespace="soans" + fi + + if [ -z ${domainUID} ]; then + echo "domainUID is empty, setting to default soainfra" + domainUID="soainfra" + fi + + if [ -z ${weblogicCredentialsSecretName} ]; then + echo "weblogicCredentialsSecretName is empty, setting to default \"soainfra-domain-credentials\"" + weblogicCredentialsSecretName="soainfra-domain-credentials" + fi + + if [ -z ${adminServerName} ]; then + echo "adminServerName is empty, setting to default \"AdminServer\"" + adminServerName="AdminServer" + fi + + if [ -z ${adminServerPort} ]; then + echo "adminServerPort is empty, setting to default \"7001\"" + adminServerPort="7001" + fi + + if [ -z ${soaClusterName} ]; then + echo "soaClusterName is empty, setting to default \"soa_cluster\"" + soaClusterName="soa_cluster" + fi + + if [ -z ${soaManagedServerPort} ]; then + echo "soaManagedServerPort is empty, setting to default \"8001\"" + soaManagedServerPort="8001" + fi + + if [ -z ${wlsMonitoringExporterTosoaCluster} ]; then + echo "wlsMonitoringExporterTosoaCluster is empty, setting to default \"false\"" + wlsMonitoringExporterTosoaCluster="false" + fi + if [ -z ${osbClusterName} ]; then + echo "osbClusterName is empty, setting to default \"osb_cluster\"" + osbClusterName="osb_cluster" + fi + + if [ -z ${osbManagedServerPort} ]; then + echo "osbManagedServerPort is empty, setting to default \"9001\"" + osbManagedServerPort="9001" + fi + + if [ -z ${wlsMonitoringExporterToosbCluster} ]; then + echo "wlsMonitoringExporterToosbCluster is empty, setting to default \"false\"" + wlsMonitoringExporterToosbCluster="false" + fi +} + diff --git a/OracleSOASuite/kubernetes/monitoring-service/setup-monitoring.sh b/OracleSOASuite/kubernetes/monitoring-service/setup-monitoring.sh index 330466365..c36b4bb82 100755 --- a/OracleSOASuite/kubernetes/monitoring-service/setup-monitoring.sh +++ b/OracleSOASuite/kubernetes/monitoring-service/setup-monitoring.sh @@ -10,98 +10,6 @@ scriptDir="$( cd "$( dirname "${script}" )" && pwd )" OLD_PWD=`pwd` -function usage { - echo "usage: ${script} -t -n -d -u -p -k -l -m -n [-h]" - echo " -t Domain Type. (required)" - echo " -n Domain namespace (optional)" - echo " (default: soans)" - echo " -d domainUID of Domain. (optional)" - echo " (default: soainfra)" - echo " -u username. (optional)" - echo " (default: weblogic)" - echo " -p password. (optional)" - echo " (default: Welcome1)" - echo " -k Setup Prometheus and Grafana in monitoring namespace? If \"no\", script assumes setup is already available in monitoring namespace. (optional)" - echo " (default: yes)" - echo " -l Prometheus NodePort. (optional)" - echo " (default: 32101)" - echo " -m Grafana NodePort. (optional)" - echo " (default: 32100)" - echo " -n Altermanager NodePort. (optional)" - echo " (default: 32102)" - echo " -h Help" - exit $1 -} - - -while getopts ":h:t:n:d:u:p:k:l:m:n:" opt; do - case $opt in - t) domainType="${OPTARG}" - ;; - n) namespace="${OPTARG}" - ;; - d) domainUID="${OPTARG}" - ;; - u) username="${OPTARG}" - ;; - p) password="${OPTARG}" - ;; - k) kubeprometheus=`echo "${OPTARG}" | tr "[:upper:]" "[:lower:]"` - ;; - l) prometheusNodePort="${OPTARG}" - ;; - m) grafanaNodePort="${OPTARG}" - ;; - n) alertmanagerNodePort="${OPTARG}" - ;; - h) usage 0 - ;; - *) usage 1 - ;; - esac -done - -# Setting default values -if [ -z ${domainType} ]; then - echo "${script}: -t must be specified." - usage 1 -fi - -if [ -z ${namespace} ]; then - namespace="soans" -fi - - -if [ -z ${domainUID} ]; then - domainUID="soainfra" -fi - -if [ -z ${username} ]; then - username="weblogic" -fi - -if [ -z ${password} ]; then - password="Welcome1" -fi - -if [ -z ${kubeprometheus} ]; then - kubeprometheus="yes" -fi - -if [ -z ${prometheusNodePort} ]; then - prometheusNodePort="32101" -fi - -if [ -z ${grafanaNodePort} ]; then - grafanaNodePort="32100" -fi - -if [ -z ${alertmanagerNodePort} ]; then - alertmanagerNodePort="32102" -fi - -adminServerName="AdminServer" -adminServerPort="7001" # # Function to exit and print an error message @@ -116,6 +24,17 @@ function printError { echo [ERROR] $* } + +# +# Function to remove a file if it exists +# +function removeFileIfExists { + echo "input is $1" + if [ -f $1 ]; then + rm -f $1 + fi +} + function exitIfError { if [ "$1" != "0" ]; then echo "$2" @@ -123,112 +42,151 @@ function exitIfError { fi } -function getKubernetesClusterIP { +# +# Function to parse a yaml file and generate the bash exports +# $1 - Input filename +# $2 - Output filename +function parseYaml { + local s='[[:space:]]*' w='[a-zA-Z0-9_]*' fs=$(echo @|tr @ '\034') + sed -ne "s|^\($s\):|\1|" \ + -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \ + -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $1 | + awk -F$fs '{ + if (length($3) > 0) { + # javaOptions may contain tokens that are not allowed in export command + # we need to handle it differently. + if ($2=="javaOptions") { + printf("%s=%s\n", $2, $3); + } else { + printf("export %s=\"%s\"\n", $2, $3); + } + } + }' > $2 +} + +function usage { + echo usage: ${script} -i file [-v] [-h] + echo " -i Parameter inputs file, must be specified." + echo " -h Help" + exit $1 +} - # Get name of the current context - local CUR_CTX=`kubectl config current-context | awk ' { print $1; } '` +function installKubePrometheusStack { + helm repo add prometheus-community https://prometheus-community.github.io/helm-charts + helm repo update + echo "Setup prometheus-community/kube-prometheus-stack in progress" + if [ ${exposeMonitoringNodePort} == "true" ]; then + + helm install ${monitoringNamespace} prometheus-community/kube-prometheus-stack \ + --namespace ${monitoringNamespace} \ + --set prometheus.service.type=NodePort --set prometheus.service.nodePort=${prometheusNodePort} \ + --set alertmanager.service.type=NodePort --set alertmanager.service.nodePort=${alertmanagerNodePort} \ + --set grafana.adminPassword=admin --set grafana.service.type=NodePort --set grafana.service.nodePort=${grafanaNodePort} \ + --version "16.5.0" ${additionalParamForKubePrometheusStack} \ + --atomic --wait + else + helm install ${monitoringNamespace} prometheus-community/kube-prometheus-stack \ + --namespace ${monitoringNamespace} \ + --set grafana.adminPassword=admin \ + --version "16.5.0" ${additionalParamForKubePrometheusStack} \ + --atomic --wait + fi + exitIfError $? "ERROR: prometheus-community/kube-prometheus-stack install failed." +} +#Parse the inputs +while getopts "hi:" opt; do + case $opt in + i) valuesInputFile="${OPTARG}" + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done - # Get the name of the current cluster - local CUR_CLUSTER_CMD="kubectl config view -o jsonpath='{.contexts[?(@.name == \"${CUR_CTX}\")].context.cluster}' | awk ' { print $1; } '" - local CUR_CLUSTER=`eval ${CUR_CLUSTER_CMD}` +if [ -z ${valuesInputFile} ]; then + echo "${script}: -i must be specified." + missingRequiredOption="true" +fi - # Get the server address for the current cluster - local SVR_ADDR_CMD="kubectl config view -o jsonpath='{.clusters[?(@.name == \"${CUR_CLUSTER}\")].cluster.server}' | awk ' { print $1; } '" - local SVR_ADDR=`eval ${SVR_ADDR_CMD}` +if [ "${missingRequiredOption}" == "true" ]; then + usage 1 +fi - # Server address is expected to be of the form http://address:port. Delimit - # string on the colon to obtain the address. - local array=(${SVR_ADDR//:/ }) - K8S_IP="${array[1]/\/\//}" +if [ ! -f ${valuesInputFile} ]; then + echo "Unable to locate the input parameters file ${valuesInputFile}" + fail 'The error listed above must be resolved before the script can continue' +fi -} +exportValuesFile=$(mktemp /tmp/export-values-XXXXXXXXX.sh) +parseYaml ${valuesInputFile} ${exportValuesFile} -function setupPrometheusGrafana { - cd ${scriptDir} - rm -rf kube-prometheus-0.5.0 - wget -q -c https://github.com/prometheus-operator/kube-prometheus/archive/refs/tags/v0.5.0.tar.gz -O -| tar -zx - cd kube-prometheus-0.5.0 - sh scripts/monitoring-deploy.sh -} +source ${exportValuesFile} +rm ${exportValuesFile} -if [ "${kubeprometheus}" = "yes" ]; then - echo -e "Prometheus and Grafana setup in monitoring namespace in progress.......\n" +if [ "${setupKubePrometheusStack}" = "true" ]; then + if test "$(kubectl get namespace ${monitoringNamespace} --ignore-not-found | wc -l)" = 0; then + echo "The namespace ${monitoringNamespace} for install prometheus-community/kube-prometheus-stack does not exist. Creating the namespace ${monitoringNamespace}" + kubectl create namespace ${monitoringNamespace} + fi + echo -e "Monitoring setup in ${monitoringNamespace} in progress\n" # Create the namespace and CRDs, and then wait for them to be availble before creating the remaining resources kubectl label nodes --all kubernetes.io/os=linux --overwrite=true - echo "Seting up Prometheus and grafana started" - setupPrometheusGrafana + echo "Setup prometheus-community/kube-prometheus-stack started" + installKubePrometheusStack cd $OLD_PWD - # Wait for resources to be available - kubectl -n monitoring rollout status --watch --timeout=600s daemonset.apps/node-exporter - kubectl -n monitoring rollout status --watch --timeout=600s deployment.apps/grafana - kubectl -n monitoring rollout status --watch --timeout=600s deployment.apps/kube-state-metrics - kubectl -n monitoring rollout status --watch --timeout=600s deployment.apps/prometheus-adapter - kubectl -n monitoring rollout status --watch --timeout=600s deployment.apps/prometheus-operator - kubectl -n monitoring rollout status --watch --timeout=600s statefulset.apps/alertmanager-main - kubectl -n monitoring rollout status --watch --timeout=600s statefulset.apps/prometheus-k8s - - echo "Seting up Prometheus and grafana completed" - - # Expose the monitoring service using NodePort - - SET_NODEPORT_GRAFANA="kubectl patch svc grafana -n monitoring --type=json -p '[{"op": "replace", "path": "/spec/type", "value": "NodePort" },{"op": "replace", "path": "/spec/ports/0/nodePort", "value": ${grafanaNodePort} }]'" - SET_NODEPORT_PROMETHEUS="kubectl patch svc prometheus-k8s -n monitoring --type=json -p '[{"op": "replace", "path": "/spec/type", "value": "NodePort" },{"op": "replace", "path": "/spec/ports/0/nodePort", "value": ${prometheusNodePort} }]'" - SET_NODEPORT_ALERTMANAGER="kubectl patch svc alertmanager-main -n monitoring --type=json -p '[{"op": "replace", "path": "/spec/type", "value": "NodePort" },{"op": "replace", "path": "/spec/ports/0/nodePort", "value": ${alertmanagerNodePort} }]'" - eval ${SET_NODEPORT_GRAFANA} - eval ${SET_NODEPORT_PROMETHEUS} - eval ${SET_NODEPORT_ALERTMANAGER} -else - if test "$(kubectl get namespace monitoring --ignore-not-found | wc -l)" = 0; then - fail "The monitoring namespace does not exist. Run ${script} with \"-k yes\" to setup monitoring" - fi + echo "Setup prometheus-community/kube-prometheus-stack completed" fi +username=`kubectl get secrets ${weblogicCredentialsSecretName} -n ${domainNamespace} -o=jsonpath='{.data.username}'|base64 --decode` +password=`kubectl get secrets ${weblogicCredentialsSecretName} -n ${domainNamespace} -o=jsonpath='{.data.password}'|base64 --decode` + # Setting up the WebLogic Monitoring Exporter echo "Deploy WebLogic Monitoring Exporter started" script=${scriptDir}/scripts/deploy-weblogic-monitoring-exporter.sh -sh ${script} ${domainType} ${namespace} ${domainUID} ${adminServerName} ${adminServerPort} ${username} ${password} +sh ${script} exitIfError $? "ERROR: $script failed." echo "Deploy WebLogic Monitoring Exporter completed" # Deploy servicemonitors serviceMonitor=${scriptDir}/manifests/wls-exporter-ServiceMonitor.yaml -cp ${serviceMonitor}.template ${serviceMonitor} -sed -i -e "s/user:.*/user: `echo -n $username|base64 -w0`/g" ${serviceMonitor} -sed -i -e "s/password: V2VsY29tZTE=/password: `echo -n $password|base64 -w0`/g" ${serviceMonitor} +cp "${serviceMonitor}.template" "${serviceMonitor}" +sed -i -e "s/release: monitoring/release: ${monitoringNamespace}/g" ${serviceMonitor} +sed -i -e "s/user: %USERNAME%/user: `echo -n $username|base64 -w0`/g" ${serviceMonitor} +sed -i -e "s/password: %PASSWORD%/password: `echo -n $password|base64 -w0`/g" ${serviceMonitor} +sed -i -e "s/namespace:.*/namespace: ${domainNamespace}/g" ${serviceMonitor} sed -i -e "s/weblogic.domainName:.*/weblogic.domainName: ${domainUID}/g" ${serviceMonitor} -sed -i -e "$!N;s/matchNames:\n -.*/matchNames:\n - ${namespace}/g;P;D" ${serviceMonitor} -kubectl apply -f ${serviceMonitor} - -roleBinding=${scriptDir}/manifests/prometheus-roleBinding-domain-namespace.yaml -sed -i -e "s/namespace: soans/namespace: ${namespace}/g" ${roleBinding} -kubectl apply -f ${roleBinding} +sed -i -e "$!N;s/matchNames:\n -.*/matchNames:\n - ${domainNamespace}/g;P;D" ${serviceMonitor} -roleSpecific=${scriptDir}/manifests/prometheus-roleSpecific-domain-namespace.yaml -sed -i -e "s/namespace: soans/namespace: ${namespace}/g" ${roleSpecific} -kubectl apply -f ${roleSpecific} - -# get the Master IP to access Grafana -getKubernetesClusterIP +kubectl apply -f ${serviceMonitor} -if [[ ($K8S_IP != "") && ("${kubeprometheus}" = "yes") ]]; then +if [ "${setupKubePrometheusStack}" = "true" ]; then # Deploying WebLogic Server Grafana Dashboard echo "Deploying WebLogic Server Grafana Dashboard...." - curl --noproxy "*" -X POST -H "Content-Type: application/json" -d @config/weblogic-server-dashboard.json http://admin:admin@$K8S_IP:${grafanaNodePort}/api/dashboards/db - echo "Deployed WebLogic Server Grafana Dashboard successfully" + grafanaEndpointIP=$(kubectl get endpoints ${monitoringNamespace}-grafana -n ${monitoringNamespace} -o=jsonpath="{.subsets[].addresses[].ip}") + grafanaEndpointPort=$(kubectl get endpoints ${monitoringNamespace}-grafana -n ${monitoringNamespace} -o=jsonpath="{.subsets[].ports[].port}") + grafanaEndpoint="${grafanaEndpointIP}:${grafanaEndpointPort}" + curl --noproxy "*" -X POST -H "Content-Type: application/json" -d @config/weblogic-server-dashboard.json http://admin:admin@${grafanaEndpoint}/api/dashboards/db echo "" - echo "Grafana is available at NodePort: ${grafanaNodePort}" - echo "Prometheus is available at NodePort: ${prometheusNodePort}" - echo "Altermanager is available at NodePort: ${alertmanagerNodePort}" + echo "Deployed WebLogic Server Grafana Dashboard successfully" echo "" - echo "=======================================================" + if [ ${exposeMonitoringNodePort} == "true" ]; then + echo "Grafana is available at NodePort: ${grafanaNodePort}" + echo "Prometheus is available at NodePort: ${prometheusNodePort}" + echo "Altermanager is available at NodePort: ${alertmanagerNodePort}" + echo "==============================================================" + fi else - echo "WARNING !!!! - Could not import WebLogic Server Grafana Dashboard as Grafana details not available" - echo "WARNING !!!! - Please import config/weblogic-server-dashboard.json manually into Grafana" + echo "Please import config/weblogic-server-dashboard.json manually into Grafana" fi + +echo "" + diff --git a/OracleSOASuite/kubernetes/rest/README.md b/OracleSOASuite/kubernetes/rest/README.md old mode 100644 new mode 100755 diff --git a/OracleSOASuite/kubernetes/scaling/scalingAction.sh b/OracleSOASuite/kubernetes/scaling/scalingAction.sh new file mode 100755 index 000000000..0da098e68 --- /dev/null +++ b/OracleSOASuite/kubernetes/scaling/scalingAction.sh @@ -0,0 +1,504 @@ +#!/bin/bash +# Copyright (c) 2017, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +# script parameters +scaling_action="" +wls_domain_uid="" +wls_cluster_name="" +wls_domain_namespace="default" +operator_service_name="internal-weblogic-operator-svc" +operator_namespace="weblogic-operator" +operator_service_account="weblogic-operator" +scaling_size=1 +access_token="" +no_op="" +kubernetes_master="https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT}" +log_file_name="scalingAction.log" + +# timestamp +# purpose: echo timestamp in the form yyyy-mm-ddThh:mm:ss.nnnnnnZ +# example: 2018-10-01T14:00:00.000001Z +function timestamp() { + local timestamp="`date --utc '+%Y-%m-%dT%H:%M:%S.%NZ' 2>&1`" + if [ ! "${timestamp/illegal/xyz}" = "${timestamp}" ]; then + # old shell versions don't support %N or --utc + timestamp="`date -u '+%Y-%m-%dT%H:%M:%S.000000Z' 2>&1`" + fi + echo "${timestamp}" +} + +function trace() { + echo "@[$(timestamp)][$wls_domain_namespace][$wls_domain_uid][$wls_cluster_name][INFO]" "$@" >> ${log_file_name} +} + +function print_usage() { + echo "Usage: scalingAction.sh --action=[scaleUp | scaleDown] --domain_uid= --cluster_name= [--kubernetes_master=https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT}] [--access_token=] [--wls_domain_namespace=default] [--operator_namespace=weblogic-operator] [--operator_service_name=weblogic-operator] [--scaling_size=1] [--no_op]" + echo " where" + echo " action - scaleUp or scaleDown" + echo " domain_uid - WebLogic Domain Unique Identifier" + echo " cluster_name - WebLogic Cluster Name" + echo " kubernetes_master - Kubernetes master URL, default=https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT}" + echo " access_token - Service Account Bearer token for authentication and authorization for access to REST Resources" + echo " wls_domain_namespace - Kubernetes name space WebLogic Domain is defined in, default=default" + echo " operator_service_name - WebLogic Operator Service name, default=internal-weblogic-operator-svc" + echo " operator_service_account - Kubernetes Service Account for WebLogic Operator, default=weblogic-operator" + echo " operator_namespace - WebLogic Operator Namespace, default=weblogic-operator" + echo " scaling_size - number of WebLogic server instances by which to scale up or down, default=1" + echo " no_op - if specified, returns without doing anything. For use by unit test to include methods in the script" + exit 1 +} + +# Retrieve WebLogic Operator Service Account Token for Authorization +function initialize_access_token() { + if [ -z "$access_token" ] + then + access_token=`cat /var/run/secrets/kubernetes.io/serviceaccount/token` + fi +} + +function logScalingParameters() { + trace "scaling_action: $scaling_action" + trace "wls_domain_uid: $wls_domain_uid" + trace "wls_cluster_name: $wls_cluster_name" + trace "wls_domain_namespace: $wls_domain_namespace" + trace "operator_service_name: $operator_service_name" + trace "operator_service_account: $operator_service_account" + trace "operator_namespace: $operator_namespace" + trace "scaling_size: $scaling_size" +} + +function jq_available() { + if [ -x "$(command -v jq)" ] && [ -z "$DONT_USE_JQ" ]; then + return; + fi + false +} + +# Query WebLogic Operator Service Port +function get_operator_internal_rest_port() { + local STATUS=$(curl \ + -v \ + --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt \ + -H "Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" \ + -X GET $kubernetes_master/api/v1/namespaces/$operator_namespace/services/$operator_service_name/status) + if [ $? -ne 0 ] + then + trace "Failed to retrieve status of $operator_service_name in name space: $operator_namespace" + trace "STATUS: $STATUS" + exit 1 + fi + + local port + if jq_available; then + local extractPortCmd="(.spec.ports[] | select (.name == \"rest\") | .port)" + port=$(echo "${STATUS}" | jq "${extractPortCmd}" 2>> ${log_file_name}) + else +cat > cmds-$$.py << INPUT +import sys, json +for i in json.load(sys.stdin)["spec"]["ports"]: + if i["name"] == "rest": + print(i["port"]) +INPUT +port=$(echo "${STATUS}" | python cmds-$$.py 2>> ${log_file_name}) + fi + echo "$port" +} + +# Retrieve the api version of the deployed Custom Resource Domain +function get_domain_api_version() { + # Retrieve Custom Resource Definition for WebLogic domain + local APIS=$(curl \ + -v \ + --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt \ + -H "Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" \ + -X GET \ + $kubernetes_master/apis) + if [ $? -ne 0 ] + then + trace "Failed to retrieve list of APIs from Kubernetes cluster" + trace "APIS: $APIS" + exit 1 + fi + +# Find domain version + local domain_api_version + if jq_available; then + local extractVersionCmd="(.groups[] | select (.name == \"weblogic.oracle\") | .preferredVersion.version)" + domain_api_version=$(echo "${APIS}" | jq -r "${extractVersionCmd}" 2>> ${log_file_name}) + else +cat > cmds-$$.py << INPUT +import sys, json +for i in json.load(sys.stdin)["groups"]: + if i["name"] == "weblogic.oracle": + print(i["preferredVersion"]["version"]) +INPUT +domain_api_version=`echo ${APIS} | python cmds-$$.py 2>> ${log_file_name}` + fi + echo "$domain_api_version" +} + +# Retrieve Custom Resource Domain +function get_custom_resource_domain() { + local DOMAIN=$(curl \ + -v \ + --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt \ + -H "Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" \ + $kubernetes_master/apis/weblogic.oracle/$domain_api_version/namespaces/$wls_domain_namespace/domains/$wls_domain_uid) + if [ $? -ne 0 ]; then + trace "Failed to retrieve WebLogic Domain Custom Resource Definition" + exit 1 + fi + echo "$DOMAIN" +} + +# Verify if cluster is defined in clusters of the Custom Resource Domain +# args: +# $1 Custom Resource Domain +function is_defined_in_clusters() { + local DOMAIN="$1" + local in_cluster_startup="False" + + if jq_available; then + local inClusterStartupCmd="(.spec.clusters[] | select (.clusterName == \"${wls_cluster_name}\"))" + local clusterDefinedInCRD=$(echo "${DOMAIN}" | jq "${inClusterStartupCmd}" 2>> ${log_file_name}) + if [ "${clusterDefinedInCRD}" != "" ]; then + in_cluster_startup="True" + fi + else +cat > cmds-$$.py << INPUT +import sys, json +outer_loop_must_break = False +for j in json.load(sys.stdin)["spec"]["clusters"]: + if j["clusterName"] == "$wls_cluster_name": + outer_loop_must_break = True + print (True) + break +if outer_loop_must_break == False: + print (False) +INPUT +in_cluster_startup=`echo ${DOMAIN} | python cmds-$$.py 2>> ${log_file_name}` + fi + echo "$in_cluster_startup" +} + +# Gets the current replica count of the cluster +# args: +# $1 Custom Resource Domain +function get_num_ms_in_cluster() { + local DOMAIN="$1" + local num_ms + if jq_available; then + local numManagedServersCmd="(.spec.clusters[] | select (.clusterName == \"${wls_cluster_name}\") | .replicas)" + num_ms=$(echo "${DOMAIN}" | jq "${numManagedServersCmd}" 2>> ${log_file_name}) + else +cat > cmds-$$.py << INPUT +import sys, json +for j in json.load(sys.stdin)["spec"]["clusters"]: + if j["clusterName"] == "$wls_cluster_name": + print (j["replicas"]) +INPUT + num_ms=`echo ${DOMAIN} | python cmds-$$.py 2>> ${log_file_name}` + fi + + if [ "${num_ms}" == "null" ] || [ "${num_ms}" == '' ] ; then + num_ms=0 + fi + + echo "$num_ms" +} + +# Gets the replica count at the Domain level +# args: +# $1 Custom Resource Domain +function get_num_ms_domain_scope() { + local DOMAIN="$1" + local num_ms + if jq_available; then + num_ms=$(echo "${DOMAIN}" | jq -r '.spec.replicas' 2>> ${log_file_name}) + else +cat > cmds-$$.py << INPUT +import sys, json +print (json.load(sys.stdin)["spec"]["replicas"]) +INPUT + num_ms=`echo ${DOMAIN} | python cmds-$$.py 2>> ${log_file_name}` + fi + + if [ "${num_ms}" == "null" ] || [ "${num_ms}" == '' ] ; then + # if not defined then default to 0 + num_ms=0 + fi + + echo "$num_ms" +} + +# +# Function to get minimum replica count for cluster +# $1 - Domain resource in json format +# $2 - Name of the cluster +# $3 - Return value containing minimum replica count +# +function get_min_replicas { + local domainJson=$1 + local clusterName=$2 + local __result=$3 + + eval $__result=0 + if jq_available; then + minReplicaCmd="(.status.clusters[] | select (.clusterName == \"${clusterName}\")) \ + | .minimumReplicas" + minReplicas=$(echo ${domainJson} | jq "${minReplicaCmd}" 2>> ${log_file_name}) + else +cat > cmds-$$.py << INPUT +import sys, json +for j in json.load(sys.stdin)["status"]["clusters"]: + if j["clusterName"] == "$clusterName": + print (j["minimumReplicas"]) +INPUT + minReplicas=`echo ${DOMAIN} | python cmds-$$.py 2>> ${log_file_name}` + fi + eval $__result=${minReplicas} +} + +# Get the current replica count for the WLS cluster if defined in the CRD's Cluster +# configuration. If WLS cluster is not defined in the CRD then return the Domain +# scoped replica value, if present. Returns replica count = 0 if no replica count found. +# args: +# $1 "True" if WLS cluster configuration defined in CRD, "False" otherwise +# $2 Custom Resource Domain +function get_replica_count() { + local in_cluster_startup="$1" + local DOMAIN="$2" + local num_ms + if [ "$in_cluster_startup" == "True" ] + then + trace "$wls_cluster_name defined in clusters" + num_ms=$(get_num_ms_in_cluster "$DOMAIN") + else + trace "$wls_cluster_name NOT defined in clusters" + num_ms=$(get_num_ms_domain_scope "$DOMAIN") + fi + + get_min_replicas "${DOMAIN}" "${wls_cluster_name}" minReplicas + if [[ "${num_ms}" -lt "${minReplicas}" ]]; then + # Reset managed server count to minimum replicas + num_ms=${minReplicas} + fi + + echo "$num_ms" +} + +# Determine the nuber of managed servers to scale +# args: +# $1 scaling action (scaleUp or scaleDown) +# $2 current replica count +# $3 scaling increment value +function calculate_new_ms_count() { + local scaling_action="$1" + local current_replica_count="$2" + local scaling_size="$3" + local new_ms + if [ "$scaling_action" == "scaleUp" ]; + then + # Scale up by specified scaling size + # shellcheck disable=SC2004 + new_ms=$(($current_replica_count + $scaling_size)) + else + # Scale down by specified scaling size + new_ms=$(($current_replica_count - $scaling_size)) + fi + echo "$new_ms" +} + +# Verify if requested managed server scaling count is less than the configured +# minimum replica count for the cluster. +# args: +# $1 Managed server count +# $2 Custom Resource Domain +# $3 Cluster name +function verify_minimum_ms_count_for_cluster() { + local new_ms="$1" + local domainJson="$2" + local clusterName="$3" + # check if replica count is less than minimum replicas + get_min_replicas "${domainJson}" "${clusterName}" minReplicas + if [ "${new_ms}" -lt "${minReplicas}" ]; then + trace "Scaling request to new managed server count $new_ms is less than configured minimum \ + replica count $minReplicas" + exit 1 + fi +} + +# Create the REST endpoint CA certificate in PEM format +# args: +# $1 certificate file name to create +function create_ssl_certificate_file() { + local pem_filename="$1" + if [ ${INTERNAL_OPERATOR_CERT} ]; + then + echo ${INTERNAL_OPERATOR_CERT} | base64 --decode > $pem_filename + else + trace "Operator Cert File not found" + exit 1 + fi +} + +# Create request body for scaling request +# args: +# $1 replica count +function get_request_body() { +local new_ms="$1" +local request_body=$(cat <}}) for recent changes and known issues for Oracle Access Management domain deployment on Kubernetes. + ### Limitations See [here]({{< relref "/oam/prerequisites#limitations">}}) for limitations in this release. @@ -31,8 +42,12 @@ For detailed information about deploying Oracle Access Management domains, start If performing an Enterprise Deployment, refer to the [Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster](https://docs.oracle.com/en/middleware/fusion-middleware/12.2.1.4/ikedg/index.html) instead. +### Documentation for earlier releases + +To view documentation for an earlier release, see: + +* [Version 21.4.1](https://oracle.github.io/fmw-kubernetes/21.4.1/oam/) + -### Current release -The current supported release of the WebLogic Kubernetes Operator, for Oracle Access Management domains deployment is [3.0.1](https://github.com/oracle/weblogic-kubernetes-operator/releases/tag/v3.0.1). diff --git a/docs-source/content/oam/configure-ingress/_index.md b/docs-source/content/oam/configure-ingress/_index.md index a2941ca1a..90aa5ff3c 100644 --- a/docs-source/content/oam/configure-ingress/_index.md +++ b/docs-source/content/oam/configure-ingress/_index.md @@ -5,7 +5,447 @@ pre = "5. " description= "This document provides steps to configure an Ingress to direct traffic to the OAM domain." +++ -Choose one of the following supported methods to configure an Ingress to direct traffic for your OAM domain. +### Setting up an ingress for NGINX for the OAM Domain + +The instructions below explain how to set up NGINX as an ingress for the OAM domain with SSL termination. + +**Note**: All the steps below should be performed on the **master** node. + +1. [Generate a SSL Certificate](#generate-a-ssl-certificate) +2. [Install NGINX](#install-nginx) +3. [Create an Ingress for the Domain](#create-an-ingress-for-the-domain) +4. [Verify that you can access the domain URL](#verify-that-you-can-access-the-domain-url) + + +#### Generate a SSL Certificate + +1. Generate a private key and certificate signing request (CSR) using a tool of your choice. Send the CSR to your certificate authority (CA) to generate the certificate. + + If you want to use a certificate for testing purposes you can generate a self signed certificate using openssl: + + ```bash + $ mkdir /ssl + $ cd /ssl + $ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=" + ``` + + For example: + + ```bash + $ mkdir /scratch/OAMK8S/ssl + $ cd /scratch/OAMK8S/ssl + $ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=masternode.example.com" + ``` + + **Note**: The `CN` should match the host.domain of the master node in order to prevent hostname problems during certificate verification. + + The output will look similar to the following: + + ``` + Generating a 2048 bit RSA private key + ..........................................+++ + .......................................................................................................+++ + writing new private key to 'tls.key' + ----- + ``` + +2. Create a secret for SSL by running the following command: + + ```bash + $ kubectl -n oamns create secret tls -tls-cert --key /tls.key --cert /tls.crt + ``` + + For example: + + ```bash + $ kubectl -n oamns create secret tls accessdomain-tls-cert --key /scratch/OAMK8S/ssl/tls.key --cert /scratch/OAMK8S/ssl/tls.crt + ``` + + The output will look similar to the following: + + ``` + secret/accessdomain-tls-cert created + ``` + + +#### Install NGINX + +Use helm to install NGINX. + +1. Add the helm chart repository for NGINX using the following command: + + ```bash + $ helm repo add stable https://kubernetes.github.io/ingress-nginx + ``` + + The output will look similar to the following: + + ``` + "stable" has been added to your repositories + ``` + + +1. Update the repository using the following command: + + ```bash + $ helm repo update + ``` + + The output will look similar to the following: + + ``` + Hang tight while we grab the latest from your chart repositories... + ...Successfully got an update from the "stable" chart repository + Update Complete. ⎈ Happy Helming!⎈ + ``` + +##### Install NGINX using helm + +If you can connect directly to the master node IP address from a browser, then install NGINX with the `--set controller.service.type=NodePort` parameter. + +If you are using a Managed Service for your Kubernetes cluster, for example Oracle Kubernetes Engine (OKE) on Oracle Cloud Infrastructure (OCI), and connect from a browser to the Load Balancer IP address, then use the `--set controller.service.type=LoadBalancer` parameter. This instructs the Managed Service to setup a Load Balancer to direct traffic to the NGINX ingress. + +1. To install NGINX use the following helm command depending on if you are using `NodePort` or `LoadBalancer`: + + a) Using NodePort + + ```bash + $ helm install nginx-ingress -n --set controller.extraArgs.default-ssl-certificate=/ --set controller.service.type=NodePort --set controller.admissionWebhooks.enabled=false stable/ingress-nginx + ``` + + For example: + + ```bash + $ helm install nginx-ingress -n oamns --set controller.extraArgs.default-ssl-certificate=oamns/accessdomain-tls-cert --set controller.service.type=NodePort --set controller.admissionWebhooks.enabled=false stable/ingress-nginx + ``` + + **Note**: If using Kubernetes 1.18 then add `--version=3.34.0` to the end of command. + + + The output will look similar to the following: + + ``` + NAME: nginx-ingress + LAST DEPLOYED: Mon Nov 1 07:34:25 2021 + + NAMESPACE: oamns + STATUS: deployed + REVISION: 1 + TEST SUITE: None + NOTES: + The nginx-ingress controller has been installed. + Get the application URL by running these commands: + export HTTP_NODE_PORT=$(kubectl --namespace oamns get services -o jsonpath="{.spec.ports[0].nodePort}" nginx-ingress-controller) + export HTTPS_NODE_PORT=$(kubectl --namespace oamns get services -o jsonpath="{.spec.ports[1].nodePort}" nginx-ingress-controller) + export NODE_IP=$(kubectl --namespace oamns get nodes -o jsonpath="{.items[0].status.addresses[1].address}") + + echo "Visit http://$NODE_IP:$HTTP_NODE_PORT to access your application via HTTP." + echo "Visit https://$NODE_IP:$HTTPS_NODE_PORT to access your application via HTTPS." + + An example Ingress that makes use of the controller: + + apiVersion: networking.k8s.io/v1 + kind: Ingress + metadata: + annotations: + kubernetes.io/ingress.class: nginx + name: example + namespace: foo + spec: + ingressClassName: example-class + rules: + - host: www.example.com + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: exampleService + port: 80 + # This section is only required if TLS is to be enabled for the Ingress + tls: + - hosts: + - www.example.com + secretName: example-tls + + + If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: + + apiVersion: v1 + kind: Secret + metadata: + name: example-tls + namespace: foo + data: + tls.crt: + tls.key: + type: kubernetes.io/tls + ``` + + b) Using LoadBalancer + + ``` + $ helm install nginx-ingress -n oamns --set controller.extraArgs.default-ssl-certificate=oamns/accessdomain-tls-cert --set controller.service.type=LoadBalancer --set controller.admissionWebhooks.enabled=false stable/ingress-nginx + ``` + + **Note**: If using Kubernetes 1.18 then add `--version=3.34.0` to the end of command. + + The output will look similar to the following: + + ``` + $ helm install nginx-ingress -n oamns --set controller.extraArgs.default-ssl-certificate=oamns/accessdomain-tls-cert --set controller.service.type=LoadBalancer --set controller.admissionWebhooks.enabled=false stable/ingress-nginx + + NAME: nginx-ingress + LAST DEPLOYED: Mon Nov 1 07:34:25 2021 + NAMESPACE: nginxssl + STATUS: deployed + REVISION: 1 + TEST SUITE: None + NOTES: + The ingress-nginx controller has been installed. + It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status by running 'kubectl --namespace oamns get services -o wide -w nginx-ingress-ingress-nginx-controller' + + An example Ingress that makes use of the controller: + + apiVersion: networking.k8s.io/v1 + kind: Ingress + metadata: + annotations: + kubernetes.io/ingress.class: nginx + name: example + namespace: foo + spec: + ingressClassName: example-class + rules: + - host: www.example.com + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: exampleService + port: 80 + # This section is only required if TLS is to be enabled for the Ingress + tls: + - hosts: + - www.example.com + secretName: example-tls + + + If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: + + apiVersion: v1 + kind: Secret + metadata: + name: example-tls + namespace: foo + data: + tls.crt: + tls.key: + type: kubernetes.io/tls + ``` + +#### Create an Ingress for the Domain + +1. Navigate to the following directory: + + ``` + $ cd $WORKDIR/kubernetes/charts/ingress-per-domain + ``` + + +1. Edit the `values.yaml` and change the `domainUID:` parameter to match your `domainUID`, for example `domainUID: accessdomain`. The file should look as follows: + + ``` + # Load balancer type. Supported values are: NGINX + type: NGINX + + # Type of Configuration Supported Values are : SSL and NONSSL + sslType: SSL + + # domainType Supported values are soa,osb and soaosb. + + #WLS domain as backend to the load balancer + wlsDomain: + domainUID: accessdomain + adminServerName: AdminServer + adminServerPort: 7001 + adminServerSSLPort: + oamClusterName: oam_cluster + oamManagedServerPort: 14100 + oamManagedServerSSLPort: + policyClusterName: policy_cluster + policyManagedServerPort: 15100 + policyManagedServerSSLPort: + ``` + +1. Run the following helm command to install the ingress: + + ```bash + $ cd $WORKDIR + $ helm install oam-nginx kubernetes/charts/ingress-per-domain --namespace --values kubernetes/charts/ingress-per-domain/values.yaml + ``` + + For example: + + ```bash + $ cd $WORKDIR + $ helm install oam-nginx kubernetes/charts/ingress-per-domain --namespace oamns --values kubernetes/charts/ingress-per-domain/values.yaml + ``` + + The output will look similar to the following: + + ``` + NAME: oam-nginx + LAST DEPLOYED: Mon Nov 1 07:57:30 2021 + NAMESPACE: oamns + STATUS: deployed + REVISION: 1 + TEST SUITE: None + ``` + +1. Run the following command to show the ingress is created successfully: + + ```bash + $ kubectl get ing -n + ``` + + For example: + + ```bash + $ kubectl get ing -n oamns + ``` + + The output will look similar to the following: + + ``` + NAME CLASS HOSTS ADDRESS PORTS AGE + access-ingress * 10.101.132.251 80 2m53s + ``` + +1. Find the node port of NGINX using the following command: + + ```bash + $ kubectl --namespace get services -o jsonpath="{.spec.ports[1].nodePort}" nginx-ingress-ingress-nginx-controller + ``` + + For example: + + ```bash + $ kubectl --namespace oamns get services -o jsonpath="{.spec.ports[1].nodePort}" nginx-ingress-ingress-nginx-controller + ``` + + The output will look similar to the following: + + ``` + 31051 + ``` + +1. Run the following command to check the ingress: + + ```bash + $ kubectl describe ing access-ingress -n + ``` + + For example: + + ```bash + $ kubectl describe ing access-ingress -n oamns + ``` + + The output will look similar to the following: + + ``` + Name: access-ingress + Namespace: oamns + Address: 10.101.132.251 + Default backend: default-http-backend:80 () + Rules: + Host Path Backends + ---- ---- -------- + * + /console accessdomain-adminserver:7001 (10.244.6.63:7001) + /rreg/rreg accessdomain-adminserver:7001 (10.244.6.63:7001) + /em accessdomain-adminserver:7001 (10.244.6.63:7001) + /oamconsole accessdomain-adminserver:7001 (10.244.6.63:7001) + /dms accessdomain-adminserver:7001 (10.244.6.63:7001) + /oam/services/rest accessdomain-adminserver:7001 (10.244.6.63:7001) + /iam/admin/config accessdomain-adminserver:7001 (10.244.6.63:7001) + /iam/admin/diag accessdomain-adminserver:7001 (10.244.6.63:7001) + /iam/access accessdomain-cluster-oam-cluster:14100 (10.244.5.12:14100,10.244.6.64:14100) + /oam/admin/api accessdomain-adminserver:7001 (10.244.6.63:7001) + /oam/services/rest/access/api accessdomain-cluster-oam-cluster:14100 (10.244.5.12:14100,10.244.6.64:14100) + /access accessdomain-cluster-policy-cluster:15100 (10.244.5.13:15100,10.244.6.65:15100) + / accessdomain-cluster-oam-cluster:14100 (10.244.5.12:14100,10.244.6.64:14100) + Annotations: kubernetes.io/ingress.class: nginx + meta.helm.sh/release-name: oam-nginx + meta.helm.sh/release-namespace: oamns + nginx.ingress.kubernetes.io/configuration-snippet: + more_set_input_headers "X-Forwarded-Proto: https"; + more_set_input_headers "WL-Proxy-SSL: true"; + nginx.ingress.kubernetes.io/enable-access-log: false + nginx.ingress.kubernetes.io/ingress.allow-http: false + nginx.ingress.kubernetes.io/proxy-buffer-size: 2000k + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Sync 6m22s (x2 over 6m31s) nginx-ingress-controller Scheduled for sync + ``` + + +1. To confirm that the new ingress is successfully routing to the domain's server pods, run the following command to send a request to the URL for the `WebLogic ReadyApp framework`: + + ```bash + $ curl -v -k https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/weblogic/ready + ``` + + For example: + + a) For NodePort + + ```bash + $ curl -v -k https://masternode.example.com:31051/weblogic/ready + ``` + + b) For LoadBalancer: + + ```bash + $ curl -v -k https://masternode.example.com/weblogic/ready + ``` + + The output will look similar to the following: + + ``` + * Trying 12.345.67.89... + * Connected to 12.345.67.89 (12.345.67.89) port 31051 (#0) + * Initializing NSS with certpath: sql:/etc/pki/nssdb + * skipping SSL peer certificate verification + * SSL connection using TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + * Server certificate: + * subject: CN=masternode.example.com + * start date: Nov 01 14:31:07 2021 GMT + * expire date: Nov 01 14:31:07 2022 GMT + * common name: masternode.example.com + * issuer: CN=masternode.example.com + > GET /weblogic/ready HTTP/1.1 + > User-Agent: curl/7.29.0 + > Host: masternode.example.com:31051 + > Accept: */* + > + < HTTP/1.1 200 OK + < Date: Mon, 01 Nov 2021 15:06:12 GMT + < Content-Length: 0 + < Connection: keep-alive + < Strict-Transport-Security: max-age=15724800; includeSubDomains + < + * Connection #0 to host 12.345.67.89 left intact + ``` + +#### Verify that you can access the domain URL + +After setting up the NGINX ingress, verify that the domain applications are accessible through the NGINX ingress port (for example 31051) as per [Validate Domain URLs ](../validate-domain-urls) -{{% children style="h4" description="true" %}} diff --git a/docs-source/content/oam/configure-ingress/ingress-Voyager-setup-for-oam-domain-setup-on-K8S.md b/docs-source/content/oam/configure-ingress/ingress-Voyager-setup-for-oam-domain-setup-on-K8S.md deleted file mode 100644 index 5af1be821..000000000 --- a/docs-source/content/oam/configure-ingress/ingress-Voyager-setup-for-oam-domain-setup-on-K8S.md +++ /dev/null @@ -1,335 +0,0 @@ ---- -title: "b. Using an Ingress with Voyager" -description: "Steps to set up an Ingress for Voyager to direct traffic to the OAM domain." ---- - -### Setting Up an Ingress for Voyager for the OAM Domain on K8S - -The instructions below explain how to set up Voyager as an Ingress for the OAM domain with SSL termination. - -**Note**: All the steps below should be performed on the **master** node. - -1. [Generate a SSL Certificate](#generate-a-ssl-certificate) -2. [Install Voyager](#install-voyager) -3. [Create an Ingress for the Domain](#create-an-ingress-for-the-domain) -4. [Verify that you can access the domain URL](#verify-that-you-can-access-the-domain-url) - - -#### Generate a SSL Certificate - -1. Generate a private key and certificate signing request (CSR) using a tool of your choice. Send the CSR to your certificate authority (CA) to generate the certificate. - - If you want to use a certificate for testing purposes you can generate a self signed certificate using openssl: - - ```bash - $ mkdir /ssl - $ cd /ssl - $ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=" - ``` - - For example: - - ```bash - $ mkdir /scratch/OAMDockerK8S/ssl - $ cd /scratch/OAMDockerK8S/ssl - $ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=masternode.example.com" - ``` - - **Note**: The CN should match the host.domain of the master node in order to prevent hostname problems during certificate verification. - - The output will look similar to the following: - - ```bash - Generating a 2048 bit RSA private key - ..........................................+++ - .......................................................................................................+++ - writing new private key to 'tls.key' - ----- - ``` - -2. Create a secret for SSL by running the following command: - - ```bash - $ kubectl -n oamns create secret tls -tls-cert --key /tls.key --cert /tls.crt - ``` - - For example: - - ```bash - $ kubectl -n oamns create secret tls accessdomain-tls-cert --key /scratch/OAMDockerK8S/ssl/tls.key --cert /scratch/OAMDockerK8S/ssl/tls.crt - ``` - - The output will look similar to the following: - - ```bash - secret/accessdomain-tls-cert created - ``` - - -#### Install Voyager - -Use helm to install Voyager. - -1. Add the appscode chart repository using the following command: - - ```bash - $ helm repo add appscode https://charts.appscode.com/stable/ - ``` - - The output will look similar to the following: - - ```bash - "appscode" has been added to your repositories - ``` - -1. Update the repository using the following command: - - ```bash - $ helm repo update - ``` - - The output will look similar to the following: - - ```bash - Hang tight while we grab the latest from your chart repositories... - ...Successfully got an update from the "appscode" chart repository - ...Successfully got an update from the "stable" chart repository - Update Complete. ⎈ Happy Helming!⎈ - ``` - -1. Run the following command to show the voyager chart was added successfully. - - ```bash - $ helm search repo appscode/voyager - ``` - - The output will look similar to the following: - - ```bash - NAME CHART VERSION APP VERSION DESCRIPTION - appscode/voyager v12.0.0 v12.0.0 Voyager by AppsCode - Secure HAProxy Ingress Co... - ``` - -1. Create a namespace for the voyager: - - ```bash - $ kubectl create namespace voyager - ``` - - The output will look similar to the following: - - ```bash - namespace/voyager created - ``` - - -1. Install Voyager using the following helm command: - - ```bash - $ helm install voyager-operator appscode/voyager --version 12.0.0 --namespace voyager --set cloudProvider=baremetal --set apiserver.enableValidatingWebhook=false - ``` - - **Note**: For bare metal Kubernetes use `--set cloudProvider=baremetal`. If using a managed Kubernetes service then the value should be set for your specific service as per the [Voyager](https://voyagermesh.com/docs/6.0.0/setup/install/) install guide. - - The output will look similar to the following: - - ```bash - NAME: voyager-operator - LAST DEPLOYED: Fri Sep 25 01:15:31 2020 - NAMESPACE: voyager - STATUS: deployed - REVISION: 1 - TEST SUITE: None - NOTES: - Set cloudProvider for installing Voyager - - To verify that Voyager has started, run: - - kubectl get deployment --namespace voyager -l "app.kubernetes.io/name=voyager,app.kubernetes.io/instance=voyager-operator" - ``` - -#### Create an Ingress for the Domain - -1. Edit the `values.yaml` and change domainUID to the domainUID you created previously: - - ```bash - $ cd /weblogic-kubernetes-operator/kubernetes/samples/charts/ingress-per-domain - ``` - - For example: - - ```bash - $ cd /scratch/OAMDockerK8S/weblogic-kubernetes-operator/kubernetes/samples/charts/ingress-per-domain - ``` - - Edit `values.yaml` and change `Namespace: `, for example `Namespace: oamns`. Also change `domainUID: `, for example `domainUID: accessdomain`. - -1. Navigate to the following directory: - - ```bash - $ cd /weblogic-kubernetes-operator/kubernetes/samples/charts/ingress-per-domain/templates - ``` - - For example: - - ```bash - $ cd /scratch/OAMDockerK8S/weblogic-kubernetes-operator/kubernetes/samples/charts/ingress-per-domain/templates - ``` - - Edit the `voyager-ingress.yaml` and change the `secretName` to the value created earlier, for example: - - ```bash - # Copyright (c) 2020, Oracle Corporation and/or its affiliates. - # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. - - - {{- if eq .Values.type "VOYAGER" }} - --- - apiVersion: voyager.appscode.com/v1beta1 - kind: Ingress - metadata: - name: {{ .Values.wlsDomain.domainUID }}-voyager - namespace: {{ .Release.Namespace }} - annotations: - ingress.appscode.com/type: 'NodePort' - kubernetes.io/ingress.class: 'voyager' - ingress.appscode.com/stats: 'true' - ingress.appscode.com/default-timeout: '{"connect": "1800s", "server": "1800s"}' - ingress.appscode.com/proxy-body-size: "2000000" - labels: - weblogic.resourceVersion: domain-v2 - spec: - {{- if eq .Values.tls "SSL" }} - frontendRules: - - port: 443 - rules: - - http-request set-header WL-Proxy-SSL true - tls: - - secretName: accessdomain-tls-cert - hosts: - - '*' - {{- end }} - ... - ``` - - -1. Create an Ingress for the domain (`oam-voyager-ingress`), in the domain namespace by using the sample Helm chart. - - ```bash - $ cd /weblogic-kubernetes-operator - $ helm install oam-voyager-ingress kubernetes/samples/charts/ingress-per-domain --namespace --values kubernetes/samples/charts/ingress-per-domain/values.yaml - ``` - - For example: - - ```bash - $ cd /scratch/OAMDockerK8S/weblogic-kubernetes-operator - $ helm install oam-voyager-ingress kubernetes/samples/charts/ingress-per-domain --namespace oamns --values kubernetes/samples/charts/ingress-per-domain/values.yaml - ``` - - The output will look similar to the following: - ```bash - NAME: oam-voyager-ingress - Fri Sep 25 01:18:01 2020 - NAMESPACE: oamns - STATUS: deployed - REVISION: 1 - TEST SUITE: None - ``` - -1. Run the following command to show the ingress is created successfully: - - ```bash - $ kubectl get ingress.voyager.appscode.com --all-namespaces - ``` - - The output will look similar to the following: - ```bash - NAMESPACE NAME HOSTS LOAD_BALANCER_IP AGE - oamns accessdomain-voyager * 80s - ``` - -1. Find the node port of the ingress using the following command: - - ```bash - $ kubectl describe svc voyager-accessdomain-voyager -n - ``` - - For example: - - ```bash - $ kubectl describe svc voyager-accessdomain-voyager -n oamns - ``` - - The output will look similar to the following: - ```bash - Name: voyager-accessdomain-voyager - Namespace: oamns - Labels: app.kubernetes.io/managed-by=Helm - origin=voyager - origin-api-group=voyager.appscode.com - origin-name=accessdomain-voyager - weblogic.resourceVersion=domain-v2 - Annotations: ingress.appscode.com/last-applied-annotation-keys: - ingress.appscode.com/origin-api-schema: voyager.appscode.com/v1beta1 - ingress.appscode.com/origin-name: accessdomain-voyager - Selector: origin-api-group=voyager.appscode.com,origin-name=accessdomain-voyager,origin=voyager - Type: NodePort - IP: 10.105.242.191 - Port: tcp-443 443/TCP - TargetPort: 443/TCP - NodePort: tcp-443 30305/TCP - Endpoints: 10.244.2.4:443 - Port: tcp-80 80/TCP - TargetPort: 80/TCP - NodePort: tcp-80 32064/TCP - Endpoints: 10.244.2.4:80 - Session Affinity: None - External Traffic Policy: Cluster - Events: - ``` - - In the above example the `NodePort` for `tcp-443` is `30305`. - -1. To confirm that the new Ingress is successfully routing to the domain's server pods, run the following command to send a request to the URL for the "WebLogic ReadyApp framework": - - ```bash - $ curl -v https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/weblogic/ready - ``` - - For example: - - ```bash - $ curl -v -k https://masternode.example.com:30305/weblogic/ready - ``` - - The output will look similar to the following: - - ```bash - * Trying 12.345.67.89... - * Connected to 12.345.67.89 (12.345.67.89) port 30305 (#0) - * Initializing NSS with certpath: sql:/etc/pki/nssdb - * skipping SSL peer certificate verification - * SSL connection using TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - * Server certificate: - * subject: CN=masternode.example.com - * start date: Sep 24 14:30:46 2020 GMT - * expire date: Sep 24 14:30:46 2021 GMT - * common name: masternode.example.com - * issuer: CN=masternode.example.com - > GET /weblogic/ready HTTP/1.1 - > User-Agent: curl/7.29.0 - > Host: masternode.example.com:30305 - > Accept: */* - > - < HTTP/1.1 200 OK - < Date: 25 Sep 2020 08:22:11 GMT - < Content-Length: 0 - < Strict-Transport-Security: max-age=15768000 - < - * Connection #0 to host 12.345.67.89 left intact - ``` - -#### Verify that you can access the domain URL - -After setting up the Voyager ingress, verify that the domain applications are accessible through the Voyager ingress port (for example 30305) as per [Validate Domain URLs ]({{< relref "/oam/validate-domain-urls" >}}) diff --git a/docs-source/content/oam/create-oam-domains/_index.md b/docs-source/content/oam/create-oam-domains/_index.md index fe49b0c4a..7f7a88eec 100644 --- a/docs-source/content/oam/create-oam-domains/_index.md +++ b/docs-source/content/oam/create-oam-domains/_index.md @@ -18,21 +18,14 @@ Before you begin, perform the following steps: #### Prepare to use the create domain script -The sample scripts for Oracle Access Management domain deployment are available at `/kubernetes/samples/scripts/create-access-domain`. +The sample scripts for Oracle Access Management domain deployment are available at `$WORKDIR/kubernetes/create-access-domain`. -1. Make a copy of the create-domain-inputs.yaml file: +1. Make a copy of the `create-domain-inputs.yaml` file: ```bash - $ cd /weblogic-kubernetes-operator/kubernetes/samples/scripts/create-access-domain/domain-home-on-pv + $ cd $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv $ cp create-domain-inputs.yaml create-domain-inputs.yaml.orig ``` - - For example: - - ```bash - $ cd /scratch/OAMDockerK8S/weblogic-kubernetes-operator/kubernetes/samples/scripts/create-access-domain/domain-home-on-pv - $ cp create-domain-inputs.yaml create-domain-inputs.yaml.orig - ``` You must edit `create-domain-inputs.yaml` (or a copy of it) to provide the details for your domain. Please refer to the configuration parameters below to understand the information that you must @@ -40,16 +33,16 @@ provide in this file. #### Edit configuration parameters -1. Edit the create-domain-inputs.yaml and modify the following parameters. Save the file when complete: +1. Edit the `create-domain-inputs.yaml` and modify the following parameters. Save the file when complete: ```bash domainUID: domainHome: /u01/oracle/user_projects/domains/ image: - namespace: weblogicCredentialsSecretName: - persistentVolumeClaimName: logHome: /u01/oracle/user_projects/domains/logs/ + namespace: + persistentVolumeClaimName: rcuSchemaPrefix: rcuDatabaseURL: :/ rcuCredentialsSecret: @@ -60,18 +53,18 @@ provide in this file. ```bash domainUID: accessdomain domainHome: /u01/oracle/user_projects/domains/accessdomain - image: oracle/oam:12.2.1.4.0 + image: oracle/oam:12.2.1.4.0-8-ol7-210721.0755 + weblogicCredentialsSecretName: accessdomain-credentials + logHome: /u01/oracle/user_projects/domains/logs/accessdomain namespace: oamns - weblogicCredentialsSecretName: accessdomain-domain-credentials persistentVolumeClaimName: accessdomain-domain-pvc - logHome: /u01/oracle/user_projects/domains/logs/accessdomain rcuSchemaPrefix: OAMK8S rcuDatabaseURL: mydatabasehost.example.com:1521/orcl.example.com rcuCredentialsSecret: accessdomain-rcu-credentials ``` -A full list of parameters in the create-domain-inputs.yaml file are shown below: +A full list of parameters in the `create-domain-inputs.yaml` file are shown below: | Parameter | Definition | Default | | --- | --- | --- | @@ -128,20 +121,20 @@ to create the domain home for other use cases. The generated domain YAML file co generated artifacts: ```bash - cd /weblogic-kubernetes-operator/kubernetes/samples/scripts/create-access-domain/domain-home-on-pv + cd $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv $ ./create-domain.sh -i create-domain-inputs.yaml -o / ``` For example: ```bash - $ cd /scratch/OAMDockerK8S/weblogic-kubernetes-operator/kubernetes/samples/scripts/create-access-domain/domain-home-on-pv + $ cd $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv $ ./create-domain.sh -i create-domain-inputs.yaml -o output ``` The output will look similar to the following: - ```bash + ``` Input parameters being used export version="create-weblogic-sample-domain-inputs-v1" export adminPort="7001" @@ -155,12 +148,13 @@ generated artifacts: export initialManagedServerReplicas="2" export managedServerNameBase="oam_server" export managedServerPort="14100" - export image="oracle/oam:12.2.1.4.0" + export image="oracle/oam:12.2.1.4.0-8-ol7-210721.0755" export imagePullPolicy="IfNotPresent" export productionModeEnabled="true" - export weblogicCredentialsSecretName="accessdomain-domain-credentials" + export weblogicCredentialsSecretName="accessdomain-credentials" export includeServerOutInPodLog="true" export logHome="/u01/oracle/user_projects/domains/logs/accessdomain" + export httpAccessLogInLogHome="true" export t3ChannelPort="30012" export exposeAdminT3Channel="false" export adminNodePort="30701" @@ -176,10 +170,12 @@ generated artifacts: export rcuDatabaseURL="mydatabasehost.example.com:1521/orcl.example.com" export rcuCredentialsSecret="accessdomain-rcu-credentials" + createFiles - valuesInputFile is create-domain-inputs.yaml + createDomainScriptName is create-domain-job.sh Generating output/weblogic-domains/accessdomain/create-domain-job.yaml Generating output/weblogic-domains/accessdomain/delete-domain-job.yaml Generating output/weblogic-domains/accessdomain/domain.yaml - Checking to see if the secret accessdomain-domain-credentials exists in namespace oamns + Checking to see if the secret accessdomain-credentials exists in namespace oamns configmap/accessdomain-create-oam-infra-domain-job-cm created Checking the configmap accessdomain-create-oam-infra-domain-job-cm was created configmap/accessdomain-create-oam-infra-domain-job-cm labeled @@ -189,64 +185,91 @@ generated artifacts: job.batch/accessdomain-create-oam-infra-domain-job created Waiting for the job to complete... status on iteration 1 of 20 - pod accessdomain-create-oam-infra-domain-job-vj69h status is Running + pod accessdomain-create-oam-infra-domain-job-6tgw4 status is Running status on iteration 2 of 20 - pod accessdomain-create-oam-infra-domain-job-vj69h status is Running + pod accessdomain-create-oam-infra-domain-job-6tgw4 status is Running status on iteration 3 of 20 - pod accessdomain-create-oam-infra-domain-job-vj69h status is Running + pod accessdomain-create-oam-infra-domain-job-6tgw4 status is Running status on iteration 4 of 20 - pod accessdomain-create-oam-infra-domain-job-vj69h status is Running + pod accessdomain-create-oam-infra-domain-job-6tgw4 status is Running status on iteration 5 of 20 - pod accessdomain-create-oam-infra-domain-job-vj69h status is Completed + pod accessdomain-create-oam-infra-domain-job-6tgw4 status is Running + status on iteration 6 of 20 + pod accessdomain-create-oam-infra-domain-job-6tgw4 status is Completed - Domain accessdomain was created and will be started by the Oracle WebLogic Kubernetes Operator - - The following files were generated: - output/weblogic-domains/accessdomain/create-domain-inputs.yaml - output/weblogic-domains/accessdomain/create-domain-job.yaml - output/weblogic-domains/accessdomain/domain.yaml + Domain accessdomain was created and will be started by the WebLogic Kubernetes Operator - Completed + The following files were generated: + output/weblogic-domains/accessdomain/create-domain-inputs.yaml + output/weblogic-domains/accessdomain/create-domain-job.yaml + output/weblogic-domains/accessdomain/domain.yaml ``` **Note**: If the domain creation fails, refer to the [Troubleshooting](../troubleshooting) section. The command creates a `domain.yaml` file required for domain creation. - + +#### Set the OAM server memory parameters + +By default, the java memory parameters assigned to the oam_server cluster are very small. The minimum recommended values are `-Xms4096m -Xmx8192m`. However, Oracle recommends you to set these to `-Xms8192m -Xmx8192m` in a production environment. + 1. Navigate to the `/output/weblogic-domains/` directory: ```bash - $ cd /weblogic-kubernetes-operator/kubernetes/samples/scripts/create-access-domain/domain-home-on-pv/output/weblogic-domains/ + $ cd $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv/output/weblogic-domains/ ``` For example: ```bash - $ cd /scratch/OAMDockerK8S/weblogic-kubernetes-operator/kubernetes/samples/scripts/create-access-domain/domain-home-on-pv/output/weblogic-domains/accessdomain + $ cd $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv/output/weblogic-domains/accessdomain ``` - Edit the `domain.yaml` file, increase the min and max heap size save the file. Change the following value from: +1. Edit the `domain.yaml` file and locate the section of the file starting with: `- clusterName: oam_cluster`. Immediately after the line: `topologyKey: "kubernetes.io/hostname"`, add the following lines: ``` - - name: USER_MEM_ARGS" - value: "-Djava.security.egd=file:/dev/./urandom -Xms256m -Xmx1024m " + env: + - name: USER_MEM_ARGS + value: "-XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m" ``` - to: + For example: ``` - - name: USER_MEM_ARGS" - value: "-XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m" + - clusterName: oam_cluster + serverService: + precreateService: true + serverStartState: "RUNNING" + serverPod: + # Instructs Kubernetes scheduler to prefer nodes for new cluster members where there are not + # already members of the same cluster. + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: "weblogic.clusterName" + operator: In + values: + - $(CLUSTER_NAME) + topologyKey: "kubernetes.io/hostname" + env: + - name: USER_MEM_ARGS + value: "-XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m" + replicas: 2 ``` + - Add the following environment variable to the domain.yaml file for the Admin Server under "env" tag. This is needed for running the `idmconfigtool` from the AdminServer. +1. In the `domain.yaml` locate the section of the file starting with `adminServer:`. Under the `env:` tag add the following `CLASSPATH` entries. This is required for running the `idmconfigtool` from the Administration Server. ``` - name: CLASSPATH value: "/u01/oracle/wlserver/server/lib/weblogic.jar" ``` - Sample below: + For example: ``` adminServer: @@ -293,22 +316,20 @@ generated artifacts: image: "oracle/oam:12.2.1.4.0" .... ``` - +1. Save the changes to `domain.yaml` + + +#### Initializing the domain + 1. Create the Kubernetes resource using the following command: ```bash - $ kubectl apply -f /weblogic-kubernetes-operator/kubernetes/samples/scripts/create-access-domain/domain-home-on-pv/output/weblogic-domains/accessdomain/domain.yaml - ``` - - For example: - - ```bash - $ kubectl apply -f /scratch/OAMDockerK8S/weblogic-kubernetes-operator/kubernetes/samples/scripts/create-access-domain/domain-home-on-pv/output/weblogic-domains/accessdomain/domain.yaml + $ kubectl apply -f $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv/output/weblogic-domains/accessdomain/domain.yaml ``` The output will look similar to the following: - ```bash + ``` domain.weblogic.oracle/accessdomain created ``` @@ -329,35 +350,36 @@ generated artifacts: The output will look similar to the following: - ```bash - NAME READY STATUS RESTARTS AGE - pod/accessdomain-adminserver 1/1 Running 0 17m - pod/accessdomain-create-oam-infra-domain-job-vj69h 0/1 Completed 0 42m - pod/accessdomain-oam-policy-mgr1 1/1 Running 0 9m7s - pod/accessdomain-oam-server1 1/1 Running 0 9m7s - pod/accessdomain-oam-server2 1/1 Running 0 9m7s - pod/helper 1/1 Running 0 23h - - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - service/accessdomain-adminserver ClusterIP None 7001/TCP 17m - service/accessdomain-cluster-oam-cluster ClusterIP 10.110.50.168 14100/TCP 9m8s - service/accessdomain-cluster-policy-cluster ClusterIP 10.102.32.247 15100/TCP 9m8s - service/accessdomain-oam-policy-mgr1 ClusterIP None 15100/TCP 9m8s - service/accessdomain-oam-policy-mgr2 ClusterIP 10.104.147.108 15100/TCP 9m8s - service/accessdomain-oam-policy-mgr3 ClusterIP 10.108.233.86 15100/TCP 9m8s - service/accessdomain-oam-policy-mgr4 ClusterIP 10.105.15.228 15100/TCP 9m7s - service/accessdomain-oam-policy-mgr5 ClusterIP 10.99.66.92 15100/TCP 9m8s - service/accessdomain-oam-server1 ClusterIP None 14100/TCP 9m8s - service/accessdomain-oam-server2 ClusterIP None 14100/TCP 9m8s - service/accessdomain-oam-server3 ClusterIP 10.111.231.33 14100/TCP 9m8s - service/accessdomain-oam-server4 ClusterIP 10.110.10.183 14100/TCP 9m7s - service/accessdomain-oam-server5 ClusterIP 10.103.192.174 14100/TCP 9m8s - - NAME COMPLETIONS DURATION AGE - job.batch/accessdomain-create-oam-infra-domain-job 1/1 2m14s 42m - - NAME AGE - domain.weblogic.oracle/accessdomain 25m + ``` + NAME READY STATUS RESTARTS AGE + pod/accessdomain-adminserver 1/1 Running 0 11m + pod/accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 18m + pod/accessdomain-oam-policy-mgr1 1/1 Running 0 3m31s + pod/accessdomain-oam-policy-mgr2 1/1 Running 0 3m31s + pod/accessdomain-oam-server1 1/1 Running 0 3m31s + pod/accessdomain-oam-server2 1/1 Running 0 3m31s + pod/helper 1/1 Running 0 33m + + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + service/accessdomain-adminserver ClusterIP None 7001/TCP 11m + service/accessdomain-cluster-oam-cluster ClusterIP 10.101.59.154 14100/TCP 3m31s + service/accessdomain-cluster-policy-cluster ClusterIP 10.98.236.51 15100/TCP 3m31s + service/accessdomain-oam-policy-mgr1 ClusterIP None 15100/TCP 3m31s + service/accessdomain-oam-policy-mgr2 ClusterIP None 15100/TCP 3m31s + service/accessdomain-oam-policy-mgr3 ClusterIP 10.96.244.37 15100/TCP 3m31s + service/accessdomain-oam-policy-mgr4 ClusterIP 10.105.201.23 15100/TCP 3m31s + service/accessdomain-oam-policy-mgr5 ClusterIP 10.110.12.227 15100/TCP 3m31s + service/accessdomain-oam-server1 ClusterIP None 14100/TCP 3m31s + service/accessdomain-oam-server2 ClusterIP None 14100/TCP 3m31s + service/accessdomain-oam-server3 ClusterIP 10.103.178.35 14100/TCP 3m31s + service/accessdomain-oam-server4 ClusterIP 10.97.254.78 14100/TCP 3m31s + service/accessdomain-oam-server5 ClusterIP 10.105.65.104 14100/TCP 3m31s + + NAME COMPLETIONS DURATION AGE + job.batch/accessdomain-create-oam-infra-domain-job 1/1 2m6s 18m + + NAME AGE + domain.weblogic.oracle/accessdomain 12m ``` **Note**: It will take several minutes before all the services listed above show. When a pod has a `STATUS` of `0/1` the pod is started but the OAM server associated with it is currently starting. While the pods are starting you can check the startup status in the pod logs, by running the following command: @@ -376,7 +398,7 @@ generated artifacts: * A configured OAM cluster named `oam_cluster` of size 5. * A configured Policy Manager cluster named `policy_cluster` of size 5. * Two started OAM managed Servers, named `oam_server1` and `oam_server2`, listening on port `14100`. - * One started Policy Manager managed server named `oam-policy-mgr1`, listening on port `15100`. + * Two started Policy Manager managed servers named `oam-policy-mgr1` and `oam-policy-mgr2`, listening on port `15100`. * Log files that are located in `/logs/`. @@ -393,16 +415,30 @@ generated artifacts: The output will look similar to the following: - ```bash + ``` Name: accessdomain Namespace: oamns Labels: weblogic.domainUID=accessdomain - Annotations: API Version: weblogic.oracle/v8 + Annotations: + API Version: weblogic.oracle/v8 Kind: Domain Metadata: - Creation Timestamp: 2020-09-24T14:00:34Z + Creation Timestamp: 2021-11-01T11:59:51Z Generation: 1 Managed Fields: + API Version: weblogic.oracle/v8 + Fields Type: FieldsV1 + fieldsV1: + f:status: + .: + f:clusters: + f:conditions: + f:introspectJobFailureCount: + f:servers: + f:startTime: + Manager: Kubernetes Java Client + Operation: Update + Time: 2021-11-01T11:59:51Z API Version: weblogic.oracle/v8 Fields Type: FieldsV1 fieldsV1: @@ -413,36 +449,23 @@ generated artifacts: f:labels: .: f:weblogic.domainUID: - Manager: kubectl - Operation: Update - Time: 2020-09-24T14:00:34Z - API Version: weblogic.oracle/v8 - Fields Type: FieldsV1 - fieldsV1: - f:status: - .: - f:clusters: - f:conditions: - f:servers: - f:startTime: - Manager: OpenAPI-Generator + Manager: kubectl-client-side-apply Operation: Update - Time: 2020-09-24T14:12:51Z - Resource Version: 244336 - Self Link: /apis/weblogic.oracle/v8/namespaces/oamns/domains/accessdomain - UID: 0edf8266-4419-45f1-bd50-e26ac41340e5 + Time: 2021-11-01T11:59:51Z + Resource Version: 1495179 + UID: a90107d5-dbaf-4d86-9439-d5369faabd35 Spec: Admin Server: Server Pod: Env: Name: USER_MEM_ARGS - Value: -XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m + Value: -Djava.security.egd=file:/dev/./urandom -Xms512m -Xmx1024m Name: CLASSPATH Value: /u01/oracle/wlserver/server/lib/weblogic.jar Server Start State: RUNNING Clusters: Cluster Name: policy_cluster - Replicas: 1 + Replicas: 2 Server Pod: Affinity: Pod Anti Affinity: @@ -472,16 +495,19 @@ generated artifacts: Operator: In Values: $(CLUSTER_NAME) - Topology Key: kubernetes.io/hostname - Weight: 100 - Server Service: - Precreate Service: true - Server Start State: RUNNING + Topology Key: kubernetes.io/hostname + Weight: 100 + Env: + Name: USER_MEM_ARGS + Value: -XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m + Server Service: + Precreate Service: true + Server Start State: RUNNING Data Home: Domain Home: /u01/oracle/user_projects/domains/accessdomain Domain Home Source Type: PersistentVolume Http Access Log In Log Home: true - Image: oracle/oam:12.2.1.4.0 + Image: oracle/oam:12.2.1.4.0-8-ol7-210721.0755 Image Pull Policy: IfNotPresent Include Server Out In Pod Log: true Log Home: /u01/oracle/user_projects/domains/logs/accessdomain @@ -498,12 +524,12 @@ generated artifacts: Volumes: Name: weblogic-domain-storage-volume Persistent Volume Claim: - Claim Name: accessdomain-domain-pvc - Server Start Policy: IF_NEEDED - Web Logic Credentials Secret: - Name: accessdomain-domain-credentials - Status: - Clusters: + Claim Name: accessdomain-domain-pvc + Server Start Policy: IF_NEEDED + Web Logic Credentials Secret: + Name: accessdomain-credentials + Status: + Clusters: Cluster Name: oam_cluster Maximum Replicas: 5 Minimum Replicas: 0 @@ -513,45 +539,46 @@ generated artifacts: Cluster Name: policy_cluster Maximum Replicas: 5 Minimum Replicas: 0 - Ready Replicas: 1 - Replicas: 1 - Replicas Goal: 1 + Ready Replicas: 2 + Replicas: 2 + Replicas Goal: 2 Conditions: - Last Transition Time: 2020-09-24T14:12:02.037Z - Reason: ServersReady - Status: True - Type: Available + Last Transition Time: 2021-11-01T12:11:52.623959Z + Reason: ServersReady + Status: True + Type: Available + Introspect Job Failure Count: 0 Servers: Desired State: RUNNING Health: - Activation Time: 2020-09-24T14:09:01.164Z + Activation Time: 2021-11-01T12:08:29.271000Z Overall Health: ok Subsystems: Subsystem Name: ServerRuntime Symptoms: - Node Name: 10.250.111.112 + Node Name: 10.250.42.252 Server Name: AdminServer State: RUNNING Cluster Name: oam_cluster Desired State: RUNNING Health: - Activation Time: 2020-09-24T14:11:06.015Z + Activation Time: 2021-11-01T12:11:02.696000Z Overall Health: ok Subsystems: Subsystem Name: ServerRuntime Symptoms: - Node Name: 10.250.111.111 + Node Name: 10.250.42.255 Server Name: oam_server1 State: RUNNING Cluster Name: oam_cluster Desired State: RUNNING Health: - Activation Time: 2020-09-24T14:11:35.454Z + Activation Time: 2021-11-01T12:11:46.175000Z Overall Health: ok Subsystems: Subsystem Name: ServerRuntime Symptoms: - Node Name: 10.250.111.112 + Node Name: 10.250.42.252 Server Name: oam_server2 State: RUNNING Cluster Name: oam_cluster @@ -566,17 +593,25 @@ generated artifacts: Cluster Name: policy_cluster Desired State: RUNNING Health: - Activation Time: 2020-09-24T14:11:54.938Z + Activation Time: 2021-11-01T12:11:20.404000Z Overall Health: ok Subsystems: Subsystem Name: ServerRuntime Symptoms: - Node Name: 10.250.111.112 + Node Name: 10.250.42.255 Server Name: oam_policy_mgr1 State: RUNNING Cluster Name: policy_cluster - Desired State: SHUTDOWN + Desired State: RUNNING + Health: + Activation Time: 2021-11-01T12:11:09.719000Z + Overall Health: ok + Subsystems: + Subsystem Name: ServerRuntime + Symptoms: + Node Name: 10.250.42.252 Server Name: oam_policy_mgr2 + State: RUNNING Cluster Name: policy_cluster Desired State: SHUTDOWN Server Name: oam_policy_mgr3 @@ -586,8 +621,13 @@ generated artifacts: Cluster Name: policy_cluster Desired State: SHUTDOWN Server Name: oam_policy_mgr5 - Start Time: 2020-09-24T14:00:34.395Z - Events: + Start Time: 2021-11-01T11:59:51.682687Z + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal DomainCreated 13m weblogic.operator Domain resource accessdomain was created + Normal DomainProcessingStarting 5m9s (x2 over 13m) weblogic.operator Creating or updating Kubernetes presence for WebLogic Domain with UID accessdomain + Normal DomainProcessingCompleted 114s weblogic.operator Successfully completed processing domain resource accessdomain ``` In the `Status` section of the output, the available servers and clusters are listed. @@ -608,12 +648,13 @@ generated artifacts: ``` NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES - accessdomain-adminserver 1/1 Running 0 26m 10.244.1.7 10.250.111.112 - accessdomain-create-oam-infra-domain-job-vj69h 0/1 Completed 0 5h55m 10.244.1.5 10.250.111.112 - accessdomain-oam-policy-mgr1 1/1 Running 0 18m 10.244.1.9 10.250.111.112 - accessdomain-oam-server1 1/1 Running 0 18m 10.244.2.3 10.250.111.111 - accessdomain-oam-server2 1/1 Running 0 18m 10.244.1.8 10.250.111.112 - helper 1/1 Running 0 22h 10.244.1.4 10.250.111.112 + accessdomain-adminserver 1/1 Running 0 18m 10.244.6.63 10.250.42.252 + accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 25m 10.244.6.61 10.250.42.252 + accessdomain-oam-policy-mgr1 1/1 Running 0 10m 10.244.5.13 10.250.42.255 + accessdomain-oam-policy-mgr2 1/1 Running 0 10m 10.244.6.65 10.250.42.252 + accessdomain-oam-server1 1/1 Running 0 10m 10.244.5.12 10.250.42.255 + accessdomain-oam-server2 1/1 Running 0 10m 10.244.6.64 10.250.42.252 + helper 1/1 Running 0 40m 10.244.6.60 10.250.42.252 ``` You are now ready to configure an Ingress to direct traffic for your OAM domain as per [Configure an Ingress for an OAM domain](../configure-ingress/). diff --git a/docs-source/content/oam/create-or-update-image/_index.md b/docs-source/content/oam/create-or-update-image/_index.md new file mode 100644 index 000000000..10698cd91 --- /dev/null +++ b/docs-source/content/oam/create-or-update-image/_index.md @@ -0,0 +1,360 @@ ++++ +title = "Create or update an image" +weight = 10 +pre = "10. " +description= "Create or update an Oracle Access Management (OAM) container image used for deploying OAM domains. An OAM container image can be created using the WebLogic Image Tool or using the Dockerfile approach." ++++ + + +As described in [Prepare Your Environment]({{< relref "/oam/prepare-your-environment" >}}) you can obtain or build OAM container images in the following ways: + +1. Download the latest prebuilt OAM container image from [My Oracle Support](https://support.oracle.com) by referring to the document ID 2723908.1. This image is prebuilt by Oracle and includes Oracle Access Management 12.2.1.4.0 and the latest PSU. + +1. Build your own OAM image using the WebLogic Image Tool or by using the dockerfile, scripts and base images from Oracle Container Registry (OCR). You can also build your own image by using only the dockerfile and scripts. [Building the OAM Image](https://github.com/oracle/docker-images/tree/master/OracleAccessManagement/#building-the-oam-image). + +If you have access to the My Oracle Support (MOS), and there is a need to build a new image with an interim or one off patch, it is recommended to use the WebLogic Image Tool to build an Oracle Access Management image for production deployments. + + +### Create or update an Oracle Access Management image using the WebLogic Image Tool + +Using the WebLogic Image Tool, you can [create]({{< relref "/oam/create-or-update-image/#create-an-image" >}}) a new Oracle Access Management image with PSU's and interim patches or [update]({{< relref "/oam/create-or-update-image/#update-an-image" >}}) an existing image with one or more interim patches. + +> **Recommendations:** +> * Use [create]({{< relref "/oam/create-or-update-image/#create-an-image" >}}) for creating a new Oracle Access Management image containing the Oracle Access Management binaries, bundle patch and interim patches. This is the recommended approach if you have access to the OAM patches because it optimizes the size of the image. +> * Use [update]({{< relref "/oam/create-or-update-image/#update-an-image" >}}) for patching an existing Oracle Access Management image with a single interim patch. Note that the patched image size may increase considerably due to additional image layers introduced by the patch application tool. + +#### Create an image + +#### Set up the WebLogic Image Tool + +* [Prerequisites](#prerequisites) +* [Set up the WebLogic Image Tool](#set-up-the-weblogic-image-tool) +* [Validate setup](#validate-setup) +* [WebLogic Image Tool build directory](#weblogic-image-tool-build-directory) +* [WebLogic Image Tool cache](#weblogic-image-tool-cache) + +##### Prerequisites + +Verify that your environment meets the following prerequisites: + +* Docker client and daemon on the build machine, with minimum Docker version 18.03.1.ce. +* Bash version 4.0 or later, to enable the command complete feature. +* JAVA_HOME environment variable set to the appropriate JDK location e.g: /scratch/export/oracle/product/jdk + +##### Set up the WebLogic Image Tool + +To set up the WebLogic Image Tool: + +1. Create a working directory and change to it: + + ```bash + $ mdir + $ cd + ``` + + For example: + + ```bash + $ mkdir /scratch/imagetool-setup + $ cd /scratch/imagetool-setup + ``` +1. Download the latest version of the WebLogic Image Tool from the [releases page](https://github.com/oracle/weblogic-image-tool/releases/latest). + + ```bash + $ wget https://github.com/oracle/weblogic-image-tool/releases/download/release-X.X.X/imagetool.zip + ``` + + where X.X.X is the latest release referenced on the [releases page](https://github.com/oracle/weblogic-image-tool/releases/latest). + + +1. Unzip the release ZIP file in the `imagetool-setup` directory. + + ```bash + $ unzip imagetool.zip + ```` + +1. Execute the following commands to set up the WebLogic Image Tool: + + ```bash + $ cd /imagetool-setup/imagetool/bin + $ source setup.sh + ``` + + For example: + + ```bash + $ cd /scratch/imagetool-setup/imagetool/bin + $ source setup.sh + ``` + +##### Validate setup +To validate the setup of the WebLogic Image Tool: + +1. Enter the following command to retrieve the version of the WebLogic Image Tool: + + ``` bash + $ imagetool --version + ``` + +2. Enter `imagetool` then press the Tab key to display the available `imagetool` commands: + + ``` bash + $ imagetool + cache create help rebase update + ``` + +##### WebLogic Image Tool build directory + +The WebLogic Image Tool creates a temporary Docker context directory, prefixed by `wlsimgbuilder_temp`, every time the tool runs. Under normal circumstances, this context directory will be deleted. However, if the process is aborted or the tool is unable to remove the directory, it is safe for you to delete it manually. By default, the WebLogic Image Tool creates the Docker context directory under the user's home directory. If you prefer to use a different directory for the temporary context, set the environment variable `WLSIMG_BLDDIR`: + +``` bash +$ export WLSIMG_BLDDIR="/path/to/buid/dir" +``` + +##### WebLogic Image Tool cache + +The WebLogic Image Tool maintains a local file cache store. This store is used to look up where the Java, WebLogic Server installers, and WebLogic Server patches reside in the local file system. By default, the cache store is located in the user's `$HOME/cache` directory. Under this directory, the lookup information is stored in the `.metadata` file. All automatically downloaded patches also reside in this directory. You can change the default cache store location by setting the environment variable `WLSIMG_CACHEDIR`: + +```bash +$ export WLSIMG_CACHEDIR="/path/to/cachedir" +``` + +##### Set up additional build scripts + +Creating an Oracle Access Management Docker image using the WebLogic Image Tool requires additional container scripts for Oracle Access Management domains. + +1. Clone the [docker-images](https://github.com/oracle/docker-images.git) repository to set up those scripts. In these steps, this directory is `DOCKER_REPO`: + + ```bash + $ cd /imagetool-setup + $ git clone https://github.com/oracle/docker-images.git + ``` + + For example: + + ```bash + $ cd /scratch/imagetool-setup + $ git clone https://github.com/oracle/docker-images.git + ``` + +>Note: If you want to create the image continue with the following steps, otherwise to update the image see [update an image](#update-an-image). + +#### Create an image + +After [setting up the WebLogic Image Tool]({{< relref "/oam/create-or-update-image/#set-up-the-weblogic-image-tool" >}}), follow these steps to use the WebLogic Image Tool to `create` a new Oracle Access Management image. + +##### Download the Oracle Access Management installation binaries and patches + +You must download the required Oracle Access Management installation binaries and patches as listed below from the [Oracle Software Delivery Cloud](https://edelivery.oracle.com/) and save them in a directory of your choice. + +The installation binaries and patches required are: + +* Oracle Identity and Access Management 12.2.1.4.0 + * fmw_12.2.1.4.0_idm.jar + +* Oracle Fusion Middleware 12c Infrastructure 12.2.1.4.0 + * fmw_12.2.1.4.0_infrastructure.jar + +* OAM and FMW Infrastructure Patches: + * View document ID 2723908.1 on [My Oracle Support](https://support.oracle.com). In the `Container Image Download/Patch Details` section, locate the `Oracle Access Management (OAM)` table. For the latest PSU click the `README` link in the `Documentation` column. In the README, locate the "Installed Software" section. All the patch numbers to be download are listed here. Download all these individual patches from My Oracle Support. + +* Oracle JDK v8 + * jdk-8uXXX-linux-x64.tar.gz where XXX is the JDK version referenced in the README above. + +##### Update required build files + +The following files in the code repository location `/docker-images/OracleAccessManagement/imagetool/12.2.1.4.0` are used for creating the image: + +* `additionalBuildCmds.txt` +* `buildArgs` + +1. Edit the `/imagetool-setup/docker-images/OracleAccessManagement/imagetool/12.2.1.4.0/buildArgs` file and change `%DOCKER_REPO%`, `%JDK_VERSION%` and `%BUILDTAG%` appropriately. + + For example: + + ``` + create + --jdkVersion=8u301 + --type oam + --version=12.2.1.4.0 + --tag=oam-latestpsu:12.2.1.4.0 + --pull + --installerResponseFile /scratch/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file,/scratch/imagetool-setup/docker-images/OracleAccessManagement/dockerfiles/12.2.1.4.0/install/iam.response + --additionalBuildCommands /scratch/imagetool-setup/docker-images/OracleAccessManagement/imagetool/12.2.1.4.0/addtionalBuildCmds.txt + --additionalBuildFiles /scratch/imagetool-setup/docker-images/OracleAccessManagement/dockerfiles/12.2.1.4.0/container-scripts + ``` + +1. Edit the `/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file` and under the GENERIC section add the line INSTALL_TYPE="Fusion Middleware Infrastructure". For example: + + ``` + [GENERIC] + INSTALL_TYPE="Fusion Middleware Infrastructure" + DECLINE_SECURITY_UPDATES=true + SECURITY_UPDATES_VIA_MYORACLESUPPORT=false + ``` + +##### Create the image + +1. Add a JDK package to the WebLogic Image Tool cache. For example: + + ``` bash + $ imagetool cache addInstaller --type jdk --version 8uXXX --path /jdk-8uXXX-linux-x64.tar.gz + ``` + + where `XXX` is the JDK version downloaded + +1. Add the downloaded installation binaries to the WebLogic Image Tool cache. For example: + + ``` bash + $ imagetool cache addInstaller --type fmw --version 12.2.1.4.0 --path /fmw_12.2.1.4.0_infrastructure.jar + + $ imagetool cache addInstaller --type OAM --version 12.2.1.4.0 --path /fmw_12.2.1.4.0_idm.jar + ``` + +1. Add the downloaded OPatch patch to the WebLogic Image Tool cache. For example: + + ``` bash + $ imagetool cache addEntry --key 28186730_13.9.4.2.7 --value /p28186730_139427_Generic.zip + ``` + +1. Add the rest of the downloaded product patches to the WebLogic Image Tool cache: + + ``` bash + $ imagetool cache addEntry --key _12.2.1.4.0 --value /p_122140_Generic.zip + ``` + + For example: + + ```bash + $ imagetool cache addEntry --key 32971905_12.2.1.4.0 --value /p32971905_122140_Generic.zip + + $ imagetool cache addEntry --key 20812896_12.2.1.4.0 --value /p20812896_122140_Generic.zip + + $ imagetool cache addEntry --key 32880070_12.2.1.4.0 --value /p32880070_122140_Generic.zip + + $ imagetool cache addEntry --key 33059296_12.2.1.4.0 --value /p33059296_122140_Generic.zip + + $ imagetool cache addEntry --key 32905339_12.2.1.4.0 --value /p32905339_122140_Generic.zip + + $ imagetool cache addEntry --key 33084721_12.2.1.4.0 --value /p33084721_122140_Generic.zip + + $ imagetool cache addEntry --key 31544353_12.2.1.4.0 --value /p31544353_122140_Linux-x86-64.zip + + $ imagetool cache addEntry --key 32957281_12.2.1.4.0 --value /p32957281_122140_Generic.zip + + $ imagetool cache addEntry --key 33093748_12.2.1.4.0 --value /p33093748_122140_Generic.zip + ``` + +1. Edit the `/imagetool-setup/docker-images/OracleAccessManagement/imagetool/12.2.1.4.0/buildArgs` file and append the product patches and opatch patch as follows: + + ``` + --patches 32971905_12.2.1.4.0,20812896_12.2.1.4.0,32880070_12.2.1.4.0,33059296_12.2.1.4.0,32905339_12.2.1.4.0,33084721_12.2.1.4.0,31544353_12.2.1.4.0,32957281_12.2.1.4.0,33093748_12.2.1.4.0 + --opatchBugNumber=28186730_13.9.4.2.7 + ``` + + An example `buildArgs` file is now as follows: + + ``` + create + --jdkVersion=8u301 + --type oam + --version=12.2.1.4.0 + --tag=oam-latestpsu:12.2.1.4.0 + --pull + --installerResponseFile /scratch/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file,/scratch/imagetool-setup/docker-images/OracleAccessManagement/dockerfiles/12.2.1.4.0/install/iam.response + --additionalBuildCommands /scratch/imagetool-setup/docker-images/OracleAccessManagement/imagetool/12.2.1.4.0/additionalBuildCmds.txt + --additionalBuildFiles /scratch/imagetool-setup/docker-images/OracleAccessManagement/dockerfiles/12.2.1.4.0/container-scripts + --patches 32971905_12.2.1.4.0,20812896_12.2.1.4.0,32880070_12.2.1.4.0,33059296_12.2.1.4.0,32905339_12.2.1.4.0,33084721_12.2.1.4.0,31544353_12.2.1.4.0,32957281_12.2.1.4.0,33093748_12.2.1.4.0 + --opatchBugNumber=28186730_13.9.4.2.7 + ``` + + >Note: In the `buildArgs` file: + > * `--jdkVersion` value must match the `--version` value used in the `imagetool cache addInstaller` command for `--type jdk`. + > * `--version` value must match the `--version` value used in the `imagetool cache addInstaller` command for `--type OAM`. + > * `--pull` always pulls the latest base Linux image `oraclelinux:7-slim` from the Docker registry. + + Refer to [this page](https://oracle.github.io/weblogic-image-tool/userguide/tools/create-image/) for the complete list of options available with the WebLogic Image Tool `create` command. + +1. Create the Oracle Access Management image: + + ```bash + $ imagetool @ + ``` + >Note: Make sure that the absolute path to the `buildargs` file is prepended with a `@` character, as shown in the example above. + + For example: + + ```bash + $ imagetool @/docker-images/OracleAccessManagement/imagetool/12.2.1.4.0/buildArgs + ``` + +1. Check the created image using the `docker images` command: + + ```bash + $ docker images | grep oam + ``` + + The output will look similar to the following: + + ```bash + oam-latestpsu 12.2.1.4.0 ad732fc7c16b About a minute ago 3.35GB + ``` + +#### Update an image + +The steps below show how to update an existing Oracle Access Management image with an interim patch. In the examples below the image `oracle/oam:12.2.1.4.0` is updated with an interim patch. + +```bash +$ docker images + +REPOSITORY TAG IMAGE ID CREATED SIZE +oracle/oam 12.2.1.4.0 b051804ba15f 3 months ago 3.34GB +``` + +1. [Set up the WebLogic Image Tool]({{< relref "/oam/create-or-update-image/#set-up-the-weblogic-image-tool" >}}). + +1. Download the required interim patch and latest Opatch (28186730) from [My Oracle Support](https://support.oracle.com). and save them in a directory of your choice. + +1. Add the OPatch patch to the WebLogic Image Tool cache, for example: + + ```bash + $ imagetool cache addEntry --key 28186730_13.9.4.2.7 --value /p28186730_139427_Generic.zip + ``` + +1. Execute the `imagetool cache addEntry` command for each patch to add the required patch(es) to the WebLogic Image Tool cache. For example, to add patch `p32701831_12214210607_Generic.zip`: + + ```bash wrap + $ imagetool cache addEntry --key=32701831_12.2.1.4.210607 --value /p32701831_12214210607_Generic.zip + ``` + +1. Provide the following arguments to the WebLogic Image Tool `update` command: + + * `–-fromImage` - Identify the image that needs to be updated. In the example below, the image to be updated is `oracle/oam:12.2.1.4.0`. + * `–-patches` - Multiple patches can be specified as a comma-separated list. + * `--tag` - Specify the new tag to be applied for the image being built. + + Refer [here](https://oracle.github.io/weblogic-image-tool/userguide/tools/update-image/) for the complete list of options available with the WebLogic Image Tool `update` command. + + > Note: The WebLogic Image Tool cache should have the latest OPatch zip. The WebLogic Image Tool will update the OPatch if it is not already updated in the image. + + For example: + + ```bash + $ imagetool update --fromImage oracle/oam:12.2.1.4.0 --tag=oracle/oam-new:12.2.1.4.0 --patches=32701831_12.2.1.4.210607 --opatchBugNumber=28186730_13.9.4.2.7 + ``` + + > Note: If the command fails because the files in the image being upgraded are not owned by `oracle:oracle`, then add the parameter `--chown :` to correspond with the values returned in the error. + +1. Check the built image using the `docker images` command: + + ```bash + $ docker images | grep oam + ``` + + The output will look similar to the following: + + ``` + REPOSITORY TAG IMAGE ID CREATED SIZE + oracle/oam-new 12.2.1.4.0 78ccd1ad67eb 5 minutes ago 3.8GB + oracle/oam 12.2.1.4.0 b051804ba15f 3 months ago 3.34GB + ``` diff --git a/docs-source/content/oam/manage-oam-domains/delete-domain-home.md b/docs-source/content/oam/manage-oam-domains/delete-domain-home.md index c6fef81be..41523599a 100644 --- a/docs-source/content/oam/manage-oam-domains/delete-domain-home.md +++ b/docs-source/content/oam/manage-oam-domains/delete-domain-home.md @@ -1,27 +1,22 @@ --- -title: "Delete the OAM domain home" -draft: false -weight: 5 -pre : "5. " +title: "e. Delete the OAM domain home" description: "Learn about the steps to cleanup the OAM domain home." --- Sometimes in production, but most likely in testing environments, you might want to remove the domain home that is generated using the `create-domain.sh` script. -1. Run the following command to delete the jobs, domain, and configmaps: +1. Run the following command to delete the domain: ```bash - $ kubectl delete jobs -n - $ kubectl delete domain -n - $ kubectl delete configmaps -cm -n + $ cd $WORKDIR/kubernetes/delete-domain + $ ./delete-weblogic-domain-resources.sh -d ``` For example: ```bash - $ kubectl delete jobs accessdomain-create-oam-infra-domain-job -n oamns - $ kubectl delete domain accessdomain -n oamns - $ kubectl delete configmaps accessdomain-create-oam-infra-domain-job-cm -n oamns + $ cd $WORKDIR/kubernetes/delete-domain + $ ./delete-weblogic-domain-resources.sh -d accessdomain ``` 1. Drop the RCU schemas as follows: @@ -49,54 +44,76 @@ Sometimes in production, but most likely in testing environments, you might want -component MDS -component IAU -component IAU_APPEND -component IAU_VIEWER -component OPSS \ -component WLS -component STB -component OAM -f < /tmp/pwd.txt ``` - -1. Delete the Persistent Volume and Persistent Volume Claim: +1. Delete the contents of the persistent volume, for example: ```bash - $ kubectl delete pv - $ kubectl delete pvc -n + $ rm -rf /accessdomainpv/* ``` - + For example: ```bash - $ kubectl delete pv accessdomain-domain-pv - $ kubectl delete pvc accessdomain-domain-pvc -n oamns + $ rm -rf /scratch/OAMK8S/accessdomainpv/* ``` - -1. Delete the contents of the persistent volume, for example: + +1. Delete the WebLogic Kubernetes Operator, by running the following command: ```bash - $ rm -rf /accessdomainpv/* + $ helm delete weblogic-kubernetes-operator -n opns ``` +1. Delete the label from the OAM namespace: + + ```bash + $ kubectl label namespaces weblogic-operator- + ``` + For example: ```bash - $ rm -rf /scratch/OAMDockerK8S/accessdomainpv/* + $ kubectl label namespaces oamns weblogic-operator- ``` - -5. Delete the Oracle WebLogic Server Kubernetes Operator, by running the following command: +1. Delete the service account for the operator: ```bash - $ helm delete weblogic-kubernetes-operator -n opns + $ kubectl delete serviceaccount -n ``` -6. To delete NGINX: + For example: + ```bash + $ kubectl delete serviceaccount op-sa -n opns + ``` +1. Delete the operator namespace: + + ```bash + $ kubectl delete namespace + ``` + + For example: + ```bash + $ kubectl delete namespace opns + ``` + +1. To delete NGINX: ```bash - cd /weblogic-kubernetes-operator/kubernetes/samples/charts/ingress-per-domain - $ kubectl delete -f ssl-nginx-ingress.yaml + $ helm delete oam-nginx -n + ``` + + For example: + + ```bash + $ helm delete oam-nginx -n oamns ``` Then run: ```bash - $ helm delete nginx-ingress -n + $ helm delete nginx-ingress -n ``` For example: @@ -105,22 +122,13 @@ Sometimes in production, but most likely in testing environments, you might want $ helm delete nginx-ingress -n oamns ``` +1. Delete the OAM namespace: -7. To delete Voyager: - ```bash - helm delete voyager-operator -n voyager - ``` - then: - - ```bash - $ helm delete oam-voyager-ingress -n + $ kubectl delete namespace ``` For example: - ```bash - $ helm delete oam-voyager-ingress -n oamns + $ kubectl delete namespace oamns ``` - - diff --git a/docs-source/content/oam/manage-oam-domains/domain-lifecycle.md b/docs-source/content/oam/manage-oam-domains/domain-lifecycle.md index 2d76d9f70..8023410e5 100644 --- a/docs-source/content/oam/manage-oam-domains/domain-lifecycle.md +++ b/docs-source/content/oam/manage-oam-domains/domain-lifecycle.md @@ -1,9 +1,6 @@ --- -title: "Domain Life Cycle" -draft: false -weight: 1 -pre : "1. " -description: "Learn about the domain life cyle of an OAM domain." +title: "a. Domain Life Cycle" +description: "Learn about the domain life cycle of an OAM domain." --- As OAM domains use the WebLogic Kubernetes Operator, domain lifecyle operations are managed using the WebLogic Kubernetes Operator itself. @@ -18,9 +15,9 @@ Do not use the WebLogic Server Administration Console or Oracle Enterprise Manag ### View existing OAM servers -The default OAM deployment starts the AdminServer (`AdminServer`), two OAM Managed Servers (`oam_server1` and `oam_server2`) and one OAM Policy Manager server (`oam_policy_mgr1`). +The default OAM deployment starts the Administration Server (`AdminServer`), two OAM Managed Servers (`oam_server1` and `oam_server2`) and two OAM Policy Manager server (`oam_policy_mgr1` and `oam_policy_mgr2` ). -The deployment also creates, but doesn't start, three extra OAM Managed Servers (`oam-server3` to `oam-server5`) and four more OAM Policy Manager servers (`oam_policy_mgr2` to `oam_policy_mgr5`). +The deployment also creates, but doesn't start, three extra OAM Managed Servers (`oam-server3` to `oam-server5`) and three more OAM Policy Manager servers (`oam_policy_mgr3` to `oam_policy_mgr5`). All these servers are visible in the WebLogic Server Console `https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console` by navigating to *Domain Structure* > *oamcluster* > *Environment* > *Servers*. @@ -38,14 +35,16 @@ $ kubectl get pods -n oamns The output should look similar to the following: -```bash -NAME READY STATUS RESTARTS AGE -accessdomain-adminserver 1/1 Running 0 18h -accessdomain-create-oam-infra-domain-job-vj69h 0/1 Completed 0 23h -accessdomain-oam-policy-mgr1 1/1 Running 0 18h -accessdomain-oam-server1 1/1 Running 0 18h -accessdomain-oam-server2 1/1 Running 0 18h -helper 1/1 Running 0 40h +``` +NAME READY STATUS RESTARTS AGE +accessdomain-adminserver 1/1 Running 0 3h29m +accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 3h36m +accessdomain-oam-policy-mgr1 1/1 Running 0 3h21m +accessdomain-oam-policy-mgr2 1/1 Running 0 3h21m +accessdomain-oam-server1 1/1 Running 0 3h21m +accessdomain-oam-server2 1/1 Running 0 3h21m +helper 1/1 Running 0 3h51m +nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 55m ``` ### Starting/Scaling up OAM Managed Servers @@ -126,30 +125,35 @@ The number of OAM Managed Servers running is dependent on the `replicas` paramet The output will look similar to the following: - ```bash - NAME READY STATUS RESTARTS AGE - accessdomain-adminserver 1/1 Running 0 18h - accessdomain-create-oam-infra-domain-job-vj69h 0/1 Completed 0 23h - accessdomain-oam-policy-mgr1 1/1 Running 0 18h - accessdomain-oam-server1 1/1 Running 0 18h - accessdomain-oam-server2 1/1 Running 0 18h - accessdomain-oam-server3 0/1 Running 0 6s - accessdomain-oam-server4 0/1 Running 0 6s - helper 1/1 Running 0 40h + ``` + NAME READY STATUS RESTARTS AGE + accessdomain-adminserver 1/1 Running 0 3h33m + accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 3h40m + accessdomain-oam-policy-mgr1 1/1 Running 0 3h25m + accessdomain-oam-policy-mgr2 1/1 Running 0 3h25m + accessdomain-oam-server1 1/1 Running 0 3h25m + accessdomain-oam-server2 1/1 Running 0 3h25m + accessdomain-oam-server3 0/1 Running 0 9s + accessdomain-oam-server4 0/1 Running 0 9s + helper 1/1 Running 0 3h55m + nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 59m ``` Two new pods (`accessdomain-oam-server3` and `accessdomain-oam-server4`) are started, but currently have a `READY` status of `0/1`. This means `oam_server3` and `oam_server4` are not currently running but are in the process of starting. The servers will take several minutes to start so keep executing the command until `READY` shows `1/1`: - ```bash - NAME READY STATUS RESTARTS AGE - accessdomain-adminserver 1/1 Running 0 18h - accessdomain-create-oam-infra-domain-job-vj69h 0/1 Completed 0 24h - accessdomain-oam-policy-mgr1 1/1 Running 0 18h - accessdomain-oam-server1 1/1 Running 0 18h - accessdomain-oam-server2 1/1 Running 0 18h - accessdomain-oam-server3 1/1 Running 0 5m5s - accessdomain-oam-server4 1/1 Running 0 5m5s - helper 1/1 Running 0 41h + ``` + NAME READY STATUS RESTARTS AGE + accessdomain-adminserver 1/1 Running 0 3h37m + accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 3h43m + accessdomain-oam-policy-mgr1 1/1 Running 0 3h29m + accessdomain-oam-policy-mgr2 1/1 Running 0 3h29m + accessdomain-oam-server1 1/1 Running 0 3h29m + accessdomain-oam-server2 1/1 Running 0 3h29m + accessdomain-oam-server3 1/1 Running 0 3m45s + accessdomain-oam-server4 1/1 Running 0 3m45s + helper 1/1 Running 0 3h59m + nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 63m + ``` **Note**: To check what is happening during server startup when `READY` is `0/1`, run the following command to view the log of the pod that is starting: @@ -164,9 +168,9 @@ The number of OAM Managed Servers running is dependent on the `replicas` paramet $ kubectl logs accessdomain-oam-server3 -n oamns ``` -1. To start more OAM Policy Manager servers, repeat the previous commands but change the `replicas` parameter for the `policy_cluster`. In the example below `replicas` has been increased to "2": +1. To start more OAM Policy Manager servers, repeat the previous commands but change the `replicas` parameter for the `policy_cluster`. In the example below `replicas` has been increased to "4": - ```bash + ``` - clusterName: policy_cluster replicas: 2 serverPod: @@ -182,19 +186,22 @@ The number of OAM Managed Servers running is dependent on the `replicas` paramet - $(CLUSTER_NAME) ``` - After saving the changes a new pod will be started. After a few minutes it will have a `READY` status of `1/1`. In the example below `accessdomain-oam-policy-mgr2` is started: + After saving the changes two new pods will be started (`accessdomain-oam-policy-mgr3` and `accessdomain-oam-policy-mgr4`). After a few minutes they will have a `READY` status of `1/1`. In the example below `accessdomain-oam-policy-mgr3` and `accessdomain-oam-policy-mgr4` are started: - ```bash - NAME READY STATUS RESTARTS AGE - accessdomain-adminserver 1/1 Running 0 18h - accessdomain-create-oam-infra-domain-job-vj69h 0/1 Completed 0 24h - accessdomain-oam-policy-mgr1 1/1 Running 0 18h - accessdomain-oam-policy-mgr2 1/1 Running 0 4m3s - accessdomain-oam-server1 1/1 Running 0 18h - accessdomain-oam-server2 1/1 Running 0 18h - accessdomain-oam-server3 1/1 Running 0 10m - accessdomain-oam-server4 1/1 Running 0 10m - helper 1/1 Running 0 41h + ``` + NAME READY STATUS RESTARTS AGE + accessdomain-adminserver 1/1 Running 0 3h43m + accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 3h49m + accessdomain-oam-policy-mgr1 1/1 Running 0 3h35m + accessdomain-oam-policy-mgr2 1/1 Running 0 3h35m + accessdomain-oam-policy-mgr3 1/1 Running 0 4m18s + accessdomain-oam-policy-mgr4 1/1 Running 0 4m18s + accessdomain-oam-server1 1/1 Running 0 3h35m + accessdomain-oam-server2 1/1 Running 0 3h35m + accessdomain-oam-server3 1/1 Running 0 9m27s + accessdomain-oam-server4 1/1 Running 0 9m27s + helper 1/1 Running 0 4h4m + nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 69m ``` ### Stopping/Scaling down OAM Managed Servers @@ -213,9 +220,9 @@ As mentioned in the previous section, the number of OAM Managed Servers running $ kubectl edit domain accessdomain -n oamns ``` -1. In the edit session search for "clusterName: oam_cluster" and look for the `replicas` parameter. In the example below `replicas` is set to "4", hence four OAM Managed Servers are started (oam_server1 - oam_server4): +1. In the edit session search for `clusterName: oam_cluster` and look for the `replicas` parameter. In the example below `replicas` is set to "4", hence four OAM Managed Servers are started (`access-domain-oam_server1` - `access-domain-oam_server4`): - ```bash + ``` clusters: - clusterName: oam_cluster replicas: 4 @@ -267,35 +274,41 @@ As mentioned in the previous section, the number of OAM Managed Servers running The output will look similar to the following: - ```bash - NAME READY STATUS RESTARTS AGE - accessdomain-adminserver 1/1 Running 0 18h - accessdomain-create-oam-infra-domain-job-vj69h 0/1 Completed 0 24h - accessdomain-oam-policy-mgr1 1/1 Running 0 18h - accessdomain-oam-policy-mgr2 1/1 Running 0 5m21s - accessdomain-oam-server1 1/1 Running 0 18h - accessdomain-oam-server2 1/1 Running 0 18h - accessdomain-oam-server3 1/1 Terminating 0 12m - accessdomain-oam-server4 1/1 Terminating 0 12m - helper 1/1 Running 0 41h + ``` + NAME READY STATUS RESTARTS AGE + accessdomain-adminserver 1/1 Running 0 3h45m + accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 3h51m + accessdomain-oam-policy-mgr1 1/1 Running 0 3h37m + accessdomain-oam-policy-mgr2 1/1 Running 0 3h37m + accessdomain-oam-policy-mgr3 1/1 Running 0 6m18s + accessdomain-oam-policy-mgr4 1/1 Running 0 6m18s + accessdomain-oam-server1 1/1 Running 0 3h37m + accessdomain-oam-server2 1/1 Running 0 3h37m + accessdomain-oam-server3 1/1 Running 0 11m + accessdomain-oam-server4 1/1 Terminating 0 11m + helper 1/1 Running 0 4h6m + nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 71m ``` - Two pods now have a `STATUS` of `Terminating` (accessdomain-oam-server3 and accessdomain-oam-server4). The servers will take a minute or two to stop, so keep executing the command until the pods have disappeared: + One pod now has a `STATUS` of `Terminating` (`accessdomain-oam-server4`). The server will take a minute or two to stop. Once terminated the other pod (`accessdomain-oam-server3`) will move to `Terminating` and then stop. Keep executing the command until the pods have disappeared: - ```bash + ``` NAME READY STATUS RESTARTS AGE - accessdomain-adminserver 1/1 Running 0 18h - accessdomain-create-oam-infra-domain-job-vj69h 0/1 Completed 0 24h - accessdomain-oam-policy-mgr1 1/1 Running 0 18h - accessdomain-oam-policy-mgr2 1/1 Running 0 6m3s - accessdomain-oam-server1 1/1 Running 0 18h - accessdomain-oam-server2 1/1 Running 0 18h - helper 1/1 Running 0 41h + accessdomain-adminserver 1/1 Running 0 3h48m + accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 3h54m + accessdomain-oam-policy-mgr1 1/1 Running 0 3h40m + accessdomain-oam-policy-mgr2 1/1 Running 0 3h40m + accessdomain-oam-policy-mgr3 1/1 Running 0 9m18s + accessdomain-oam-policy-mgr4 1/1 Running 0 9m18s + accessdomain-oam-server1 1/1 Running 0 3h40m + accessdomain-oam-server2 1/1 Running 0 3h40m + helper 1/1 Running 0 4h9m + nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 74m ``` -1. To stop OAM Policy Manager servers, repeat the previous commands but change the `replicas` parameter for the `policy_cluster`. In the example below `replicas` has been decreased from "2" to "1": +1. To stop OAM Policy Manager servers, repeat the previous commands but change the `replicas` parameter for the `policy_cluster`. In the example below `replicas` has been decreased from "4" to "2": - ```bash + ``` - clusterName: policy_cluster replicas: 1 serverPod: @@ -311,34 +324,39 @@ As mentioned in the previous section, the number of OAM Managed Servers running - $(CLUSTER_NAME) ``` - After saving the changes one pod will move to a `STATUS` of `Terminating` (accessdomain-oam-policy-mgr2). + After saving the changes one pod will move to a `STATUS` of `Terminating` (`accessdomain-oam-policy-mgr4`). - ```bash + ``` NAME READY STATUS RESTARTS AGE - accessdomain-adminserver 1/1 Running 0 18h - accessdomain-create-oam-infra-domain-job-vj69h 0/1 Completed 0 24h - accessdomain-oam-policy-mgr1 1/1 Running 0 18h - accessdomain-oam-policy-mgr2 1/1 Terminating 0 7m12s - accessdomain-oam-server1 1/1 Running 0 18h - accessdomain-oam-server2 1/1 Running 0 18h - helper 1/1 Running 0 41h + accessdomain-adminserver 1/1 Running 0 3h49m + accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 3h55m + accessdomain-oam-policy-mgr1 1/1 Running 0 3h41m + accessdomain-oam-policy-mgr2 1/1 Running 0 3h41m + accessdomain-oam-policy-mgr3 1/1 Running 0 10m + accessdomain-oam-policy-mgr4 1/1 Terminating 0 10m + accessdomain-oam-server1 1/1 Running 0 3h41m + accessdomain-oam-server2 1/1 Running 0 3h41m + helper 1/1 Running 0 4h11m + nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 75m ``` - The server will take a minute or two to stop, so keep executing the command until the pod has disappeared: + The pods will take a minute or two to stop, so keep executing the command until the pods has disappeared: - ```bash - NAME READY STATUS RESTARTS AGE - accessdomain-adminserver 1/1 Running 0 18h - accessdomain-create-oam-infra-domain-job-vj69h 0/1 Completed 0 24h - accessdomain-oam-policy-mgr1 1/1 Running 0 18h - accessdomain-oam-server1 1/1 Running 0 18h - accessdomain-oam-server2 1/1 Running 0 18h - helper 1/1 Running 0 41h + ``` + NAME READY STATUS RESTARTS AGE + accessdomain-adminserver 1/1 Running 0 3h50m + accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 3h57m + accessdomain-oam-policy-mgr1 1/1 Running 0 3h42m + accessdomain-oam-policy-mgr2 1/1 Running 0 3h42m + accessdomain-oam-server1 1/1 Running 0 3h42m + accessdomain-oam-server2 1/1 Running 0 3h42m + helper 1/1 Running 0 4h12m + nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 76m ``` -### Stopping and Starting the AdminServer and Managed Servers +### Stopping and Starting the Administration Server and Managed Servers -To stop all the OAM Managed Servers and the AdminServer in one operation: +To stop all the OAM Managed Servers and the Administration Server in one operation: 1. Run the following kubectl command to edit the domain: @@ -361,7 +379,7 @@ To stop all the OAM Managed Servers and the AdminServer in one operation: claimName: accessdomain-domain-pvc serverStartPolicy: IF_NEEDED webLogicCredentialsSecret: - name: accessdomain-domain-credentials + name: accessdomain-credentials ``` 1. Change `serverStartPolicy: IF_NEEDED` to `NEVER` as follows: @@ -373,7 +391,7 @@ To stop all the OAM Managed Servers and the AdminServer in one operation: claimName: accessdomain-domain-pvc serverStartPolicy: NEVER webLogicCredentialsSecret: - name: accessdomain-domain-credentials + name: accessdomain-credentials ``` 1. Save the file and exit (:wq!). @@ -392,25 +410,28 @@ To stop all the OAM Managed Servers and the AdminServer in one operation: The output will look similar to the following: - ```bash - NAME READY STATUS RESTARTS AGE - accessdomain-adminserver 1/1 Terminating 0 18h - accessdomain-create-oam-infra-domain-job-vj69h 0/1 Completed 0 24h - accessdomain-oam-policy-mgr1 1/1 Terminating 0 18h - accessdomain-oam-server1 1/1 Terminating 0 18h - accessdomain-oam-server2 1/1 Terminating 0 18h - helper 1/1 Running 0 41h + ``` + NAME READY STATUS RESTARTS AGE + accessdomain-adminserver 1/1 Terminating 0 3h52m + accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 3h59m + accessdomain-oam-policy-mgr1 1/1 Terminating 0 3h44m + accessdomain-oam-policy-mgr2 1/1 Terminating 0 3h44m + accessdomain-oam-server1 1/1 Terminating 0 3h44m + accessdomain-oam-server2 1/1 Terminating 0 3h44m + helper 1/1 Running 0 4h14m + nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 78m ``` - The AdminServer pods and Managed Server pods will move to a `STATUS` of `Terminating`. After a few minutes, run the command again and the pods should have disappeared: + The Administration Server pods and Managed Server pods will move to a `STATUS` of `Terminating`. After a few minutes, run the command again and the pods should have disappeared: - ```bash - NAME READY STATUS RESTARTS AGE - accessdomain-create-oam-infra-domain-job-vj69h 0/1 Completed 0 24h - helper 1/1 Running 0 41h + ``` + NAME READY STATUS RESTARTS AGE + accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 4h + helper 1/1 Running 0 4h15m + nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 80m ``` -1. To start the AdminServer and Managed Servers up again, repeat the previous steps but change `serverStartPolicy: NEVER` to `IF_NEEDED` as follows: +1. To start the Administration Server and Managed Servers up again, repeat the previous steps but change `serverStartPolicy: NEVER` to `IF_NEEDED` as follows: ``` volumes: @@ -419,7 +440,7 @@ To stop all the OAM Managed Servers and the AdminServer in one operation: claimName: accessdomain-domain-pvc serverStartPolicy: IF_NEEDED webLogicCredentialsSecret: - name: accessdomain-domain-credentials + name: accessdomain-credentials ``` 1. Run the following kubectl command to view the pods: @@ -435,21 +456,24 @@ To stop all the OAM Managed Servers and the AdminServer in one operation: The output will look similar to the following: - ```bash - NAME READY STATUS RESTARTS AGE - accessdomain-create-oam-infra-domain-job-vj69h 0/1 Completed 0 24h - accessdomain-introspect-domain-job-7qx29 1/1 Running 0 8s - helper 1/1 Running 0 41h + ``` + NAME READY STATUS RESTARTS AGE + accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 4h1m + accessdomain-introspector-jwqxw 1/1 Running 0 10s + helper 1/1 Running 0 4h17m + nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 81m ``` - The AdminServer pod will start followed by the OAM Managed Servers pods. This process will take several minutes, so keep executing the command until all the pods are running with `READY` status `1/1` : + The Administration Server pod will start followed by the OAM Managed Servers pods. This process will take several minutes, so keep executing the command until all the pods are running with `READY` status `1/1` : - ```bash - NAME READY STATUS RESTARTS AGE - accessdomain-adminserver 1/1 Running 0 6m4s - accessdomain-create-oam-infra-domain-job-vj69h 0/1 Completed 0 24h - accessdomain-oam-policy-mgr1 1/1 Running 0 3m5s - accessdomain-oam-server1 1/1 Running 0 3m5s - accessdomain-oam-server2 1/1 Running 0 3m5s - helper 1/1 Running 0 41h + ``` + NAME READY STATUS RESTARTS AGE + accessdomain-adminserver 1/1 Running 0 10m + accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 4h12m + accessdomain-oam-policy-mgr1 1/1 Running 0 7m35s + accessdomain-oam-policy-mgr2 1/1 Running 0 7m35s + accessdomain-oam-server1 1/1 Running 0 7m35s + accessdomain-oam-server2 1/1 Running 0 7m35s + helper 1/1 Running 0 4h28m + nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 92m ``` diff --git a/docs-source/content/oam/manage-oam-domains/logging-and-visualization.md b/docs-source/content/oam/manage-oam-domains/logging-and-visualization.md index ef4c233e7..0fbf0d6b5 100644 --- a/docs-source/content/oam/manage-oam-domains/logging-and-visualization.md +++ b/docs-source/content/oam/manage-oam-domains/logging-and-visualization.md @@ -1,8 +1,5 @@ --- -title: "Logging and Visualization" -draft: false -weight: 3 -pre : "3. " +title: "c. Logging and Visualization" description: "Describes the steps for logging and visualization with Elasticsearch and Kibana." --- @@ -10,7 +7,7 @@ After the OAM domain is set up you can publish operator and WebLogic Server logs In [Prepare your environment](../../prepare-your-environment) if you decided to use the Elasticsearch and Kibana by setting the parameter `elkIntegrationEnabled` to `true`, then the steps below must be followed to complete the setup. -If you did not set `elkIntegrationEnabled` to `true` and want to do so post configuration, run the following command: +If you did not set `elkIntegrationEnabled` to `true` and want to do so post configuration, run the following command from the `$WORKDIR` directory: ```bash $ helm upgrade --reuse-values --namespace operator --set "elkIntegrationEnabled=true" --set "logStashImage=logstash:6.6.0" --set "elasticSearchHost=elasticsearch.default.svc.cluster.local" --set "elasticSearchPort=9200" --wait weblogic-kubernetes-operator kubernetes/charts/weblogic-operator @@ -18,10 +15,10 @@ If you did not set `elkIntegrationEnabled` to `true` and want to do so post conf The output will look similar to the following: - ```bash + ``` Release "weblogic-kubernetes-operator" has been upgraded. Happy Helming! NAME: weblogic-kubernetes-operator - LAST DEPLOYED: Fri Sep 25 09:57:11 2020 + LAST DEPLOYED: Tue Nov 2 03:49:45 2021 NAMESPACE: operator STATUS: deployed REVISION: 3 @@ -33,13 +30,7 @@ If you did not set `elkIntegrationEnabled` to `true` and want to do so post conf 1. Create the Kubernetes resource using the following command: ```bash - $ kubectl apply -f /weblogic-kubernetes-operator/kubernetes/samples/scripts/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml - ``` - - For example: - - ```bash - $ kubectl apply -f /scratch/OAMDockerK8S/weblogic-kubernetes-operator/kubernetes/samples/scripts/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml + $ kubectl apply -f $WORKDIR/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml ``` The output will look similar to the following: @@ -61,19 +52,25 @@ If you did not set `elkIntegrationEnabled` to `true` and want to do so post conf ``` COMPUTED VALUES: - dedicated: false + clusterSizePaddingValidationEnabled: true + domainNamespaceLabelSelector: weblogic-operator=enabled + domainNamespaceSelectionStrategy: LabelSelector domainNamespaces: - - oamns + - default elasticSearchHost: elasticsearch.default.svc.cluster.local elasticSearchPort: 9200 elkIntegrationEnabled: true + enableClusterRoleBinding: true externalDebugHttpPort: 30999 externalRestEnabled: false externalRestHttpsPort: 31001 - image: weblogic-kubernetes-operator:3.0.1 + externalServiceNameSuffix: -ext + image: weblogic-kubernetes-operator:3.3.0 imagePullPolicy: IfNotPresent internalDebugHttpPort: 30999 - istioEnabled: false + introspectorJobNameSuffix: -introspector + javaLoggingFileCount: 10 + javaLoggingFileSizeLimit: 20000000 javaLoggingLevel: FINE logStashImage: logstash:6.6.0 remoteDebugNodePortEnabled: false @@ -89,10 +86,10 @@ If you did not set `elkIntegrationEnabled` to `true` and want to do so post conf The output will look similar to the following: - ```bash + ``` NAME READY STATUS RESTARTS AGE - elasticsearch-857bd5ff6b-h8bxm 1/1 Running 0 67s - kibana-594465687d-84hxz 1/1 Running 0 67s + elasticsearch-f7b7c4c4-tb4pp 1/1 Running 0 85s + kibana-57f6685789-mgwdl 1/1 Running 0 85s ``` @@ -114,9 +111,9 @@ OAM Server logs can be pushed to the Elasticsearch server using the `logstash` p The output will look similar to the following: - ```bash - NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE - accessdomain-domain-pv 10Gi RWX Retain Bound oamns/accessdomain-domain-pvc accessdomain-domain-storage-class 1h12m + ``` + NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE + accessdomain-domain-pv 10Gi RWX Retain Bound oamns/accessdomain-domain-pvc accessdomain-domain-storage-class 23h ``` Make note of the `CLAIM` value, for example in this case `accessdomain-domain-pvc` @@ -135,11 +132,11 @@ OAM Server logs can be pushed to the Elasticsearch server using the `logstash` p The output will look similar to the following: - ```bash + ``` Mount Path: /u01/oracle/user_projects/domains ``` -1. Navigate to the `/weblogic-kubernetes-operator/kubernetes/samples/scripts/elasticsearch-and-kibana` directory and create a `logstash.yaml` file as follows. +1. Navigate to the `$WORKDIR/kubernetes/elasticsearch-and-kibana` directory and create a `logstash.yaml` file as follows. Change the `claimName` and `mountPath` values to match the values returned in the previous commands. Change `namespace` to your domain namespace e.g `oamns`: ``` @@ -182,7 +179,7 @@ OAM Server logs can be pushed to the Elasticsearch server using the `logstash` p 1. In the NFS persistent volume directory that corresponds to the mountPath `/u01/oracle/user_projects/domains`, create a `logstash` directory. For example: ``` - mkdir -p /scratch/OAMDockerK8S/accessdomainpv/logstash + $ mkdir -p /scratch/OAMK8S/accessdomainpv/logstash ``` 1. Create a `logstash.conf` in the newly created `logstash` directory that contains the following. Make sure the paths correspond to your `mountPath` and `domain` name: @@ -250,9 +247,9 @@ OAM Server logs can be pushed to the Elasticsearch server using the `logstash` p 1. Deploy the `logstash` pod by executing the following command: ```bash - $ kubectl create -f /weblogic-kubernetes-operator/kubernetes/samples/scripts/elasticsearch-and-kibana/logstash.yaml + $ kubectl create -f $WORKDIR/kubernetes/elasticsearch-and-kibana/logstash.yaml ``` - + The output will look similar to the following: ``` @@ -273,15 +270,17 @@ OAM Server logs can be pushed to the Elasticsearch server using the `logstash` p The output should look similar to the following: - ```bash + ``` NAME READY STATUS RESTARTS AGE - accessdomain-adminserver 1/1 Running 0 36m - accessdomain-create-oam-infra-domain-job-vj69h 0/1 Completed 0 24h - accessdomain-oam-policy-mgr1 1/1 Running 0 33m - accessdomain-oam-server1 1/1 Running 0 33m - accessdomain-oam-server2 1/1 Running 0 33m - helper 1/1 Running 0 41h - logstash-wls-7957897645-67c4k 1/1 Running 0 7s + accessdomain-adminserver 1/1 Running 0 18h + accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 23h + accessdomain-oam-policy-mgr1 1/1 Running 0 18h + accessdomain-oam-policy-mgr2 1/1 Running 0 18h + accessdomain-oam-server1 1/1 Running 1 18h + accessdomain-oam-server2 1/1 Running 1 18h + helper 1/1 Running 0 23h + logstash-wls-6687c5bf6-jmmdp 1/1 Running 0 12s + nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 20h ``` Then run the following to get the Elasticsearch pod name: @@ -292,10 +291,10 @@ OAM Server logs can be pushed to the Elasticsearch server using the `logstash` p The output should look similar to the following: - ```bash + ``` NAME READY STATUS RESTARTS AGE - elasticsearch-857bd5ff6b-h8bxm 1/1 Running 0 5m45s - kibana-594465687d-84hxz 1/1 Running 0 5m45s + elasticsearch-f7b7c4c4-tb4pp 1/1 Running 0 9m28s + kibana-57f6685789-mgwdl 1/1 Running 0 9m28s ``` ### Verify and access the Kibana console @@ -303,34 +302,33 @@ OAM Server logs can be pushed to the Elasticsearch server using the `logstash` p 1. Check if the indices are created correctly in the elasticsearch pod: ```bash - $ kubectl exec -it elasticsearch-857bd5ff6b-h8bxm -- /bin/bash + $ kubectl exec -it elasticsearch-f7b7c4c4-tb4pp -- /bin/bash ``` This will take you into a bash shell in the elasticsearch pod: ```bash - [root@elasticsearch-857bd5ff6b-h8bxm elasticsearch]# + [root@elasticsearch-f7b7c4c4-tb4pp elasticsearch]# ``` 1. In the elasticsearch bash shell, run the following to check the indices: ```bash - [root@elasticsearch-857bd5ff6b-h8bxm elasticsearch]# curl -i "127.0.0.1:9200/_cat/indices?v" + [root@elasticsearch-f7b7c4c4-tb4pp elasticsearch]# curl -i "127.0.0.1:9200/_cat/indices?v" ``` The output will look similar to the following: - ```bash + ``` HTTP/1.1 200 OK content-type: text/plain; charset=UTF-8 content-length: 696 health status index uuid pri rep docs.count docs.deleted store.size pri.store.size - yellow open logstash-2020.09.23 -kVgdpB7TPSwnjvhEDD2RA 5 1 825 0 406.6kb 406.6kb - green open .kibana_1 F6DNmwQ5SZaOM7I2LonEVw 1 0 2 0 7.6kb 7.6kb - yellow open logstash-2020.09.25 9QQA-DwvQay8uOAe3dvKuQ 5 1 149293 0 39.3mb 39.3mb - yellow open logstash-2020.09.24 t5N8O0LxRRabND6StHFgSg 5 1 69748 0 21.1mb 21.1mb - green open .kibana_task_manager kt1uSgpnSGWgWR8nKDuiVA 1 0 2 0 12.5kb 12.5kb + green open .kibana_task_manager -IPDdiajTSyIRjelI2QJIg 1 0 2 0 12.6kb 12.6kb + green open .kibana_1 YI9CZAjsTsCCuAyBb1ho3A 1 0 2 0 7.6kb 7.6kb + yellow open logstash-2021.11.01 4pDJSTGVR3-oOwTtHnnTkQ 5 1 148 0 173.9kb 173.9kb + yellow open logstash-2021.11.02 raOvTDoOTuC49nq241h4wg 5 1 115834 0 31.7mb 31.7mb ``` Exit the bash shell by typing `exit`. @@ -338,19 +336,17 @@ OAM Server logs can be pushed to the Elasticsearch server using the `logstash` p 1. Find the Kibana port by running the following command: ```bash - $ kubectl get svc + $ kubectl get svc | grep kibana ``` The output will look similar to the following: - ```bash + ``` NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - elasticsearch ClusterIP 10.97.144.163 9200/TCP,9300/TCP 9m25s - kibana NodePort 10.103.150.116 5601:30707/TCP 9m25s - kubernetes ClusterIP 10.96.0.1 443/TCP 47h + kibana NodePort 10.104.248.203 5601:31394/TCP 11m ``` - In the example above the Kibana port is `30707`. + In the example above the Kibana port is `31394`. 1. Access the Kibana console with `http://${MASTERNODE-HOSTNAME}:${KIBANA-PORT}/app/kibana`. diff --git a/docs-source/content/oam/manage-oam-domains/monitoring-oam-domains.md b/docs-source/content/oam/manage-oam-domains/monitoring-oam-domains.md index 962d34129..d636f240b 100644 --- a/docs-source/content/oam/manage-oam-domains/monitoring-oam-domains.md +++ b/docs-source/content/oam/manage-oam-domains/monitoring-oam-domains.md @@ -1,8 +1,5 @@ --- -title: "Monitoring an OAM domain" -draft: false -weight: 4 -pre : "4. " +title: "d. Monitoring an OAM domain" description: "Describes the steps for Monitoring the OAM domain." --- @@ -10,22 +7,289 @@ After the OAM domain is set up you can monitor the OAM instance using Prometheus The WebLogic Monitoring Exporter uses the WLS RESTful Management API to scrape runtime information and then exports Prometheus-compatible metrics. It is deployed as a web application in a WebLogic Server (WLS) instance, version 12.2.1 or later, typically, in the instance from which you want to get metrics. -### Deploy the Prometheus operator +There are two ways to setup monitoring and you should choose one method or the other: -1. Clone Prometheus by running the following commands: +1. [Setup automatically using setup-monitoring.sh](#setup-automatically-using-setup-monitoring.sh) +1. [Setup using manual configuration](#setup-using-manual-configuration) + +### Setup automatically using setup-monitoring.sh + +The `$WORKDIR/kubernetes/monitoring-service/setup-monitoring.sh` sets up the monitoring for the OAM domain. It installs Prometheus, Grafana, WebLogic Monitoring Exporter and deploys the web applications to the OAM domain. It also deploys the WebLogic Server Grafana dashboard. + +For usage details execute `./setup-monitoring.sh -h`. + +1. Edit the `$WORKDIR/kubernetes/monitoring-service/monitoring-inputs.yaml` and change the `domainUID`, `domainNamespace`, and `weblogicCredentialsSecretName` to correspond to your deployment. For example: + + ``` + version: create-accessdomain-monitoring-inputs-v1 + + # Unique ID identifying your domain. + # This ID must not contain an underscope ("_"), and must be lowercase and unique across all domains in a Kubernetes cluster. + domainUID: accessdomain + + # Name of the domain namespace + domainNamespace: oamns + + # Boolean value indicating whether to install kube-prometheus-stack + setupKubePrometheusStack: true + + # Additional parameters for helm install kube-prometheus-stack + # Refer https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/values.yaml for additional parameters + # Sample : + # additionalParamForKubePrometheusStack: --set nodeExporter.enabled=false --set prometheusOperator.tls.enabled=false --set prometheusOperator.admissionWebhooks.enabled=false + additionalParamForKubePrometheusStack: + + # Name of the monitoring namespace + monitoringNamespace: monitoring + + # Name of the Admin Server + adminServerName: AdminServer + # + # Port number for admin server + adminServerPort: 7001 + + # Cluster name + oamClusterName: oam_cluster + + # Port number for managed server + oamManagedServerPort: 14100 + + # WebLogic Monitoring Exporter to Cluster + wlsMonitoringExporterTooamCluster: true + + # Cluster name + policyClusterName: policy_cluster + + # Port number for managed server + policyManagedServerPort: 15100 + + # WebLogic Monitoring Exporter to Cluster + wlsMonitoringExporterTopolicyCluster: true + + + # Boolean to indicate if the adminNodePort will be exposed + exposeMonitoringNodePort: true + + # NodePort to expose Prometheus + prometheusNodePort: 32101 + + # NodePort to expose Grafana + grafanaNodePort: 32100 + + # NodePort to expose Alertmanager + alertmanagerNodePort: 32102 + + # Name of the Kubernetes secret for the Admin Server's username and password + weblogicCredentialsSecretName: accessdomain-credentials + ``` + +1. Run the following command to setup monitoring. ```bash - $ cd - $ git clone https://github.com/coreos/kube-prometheus.git + $ cd $WORKDIR/kubernetes/monitoring-service + $ ./setup-monitoring.sh -i monitoring-inputs.yaml + ``` + + The output should be similar to the following: + + ``` + Monitoring setup in monitoring in progress + + node/worker-node1 not labeled + node/worker-node2 not labeled + node/master-node not labeled + Setup prometheus-community/kube-prometheus-stack started + "prometheus-community" has been added to your repositories + Hang tight while we grab the latest from your chart repositories... + ...Successfully got an update from the "stable" chart repository + ...Successfully got an update from the "prometheus" chart repository + ...Successfully got an update from the "prometheus-community" chart repository + ...Successfully got an update from the "appscode" chart repository + Update Complete. ⎈ Happy Helming!⎈ + Setup prometheus-community/kube-prometheus-stack in progress + NAME: monitoring + LAST DEPLOYED: Thu Nov 18 14:13:49 2021 + NAMESPACE: monitoring + STATUS: deployed + REVISION: 1 + NOTES: + kube-prometheus-stack has been installed. Check its status by running: + kubectl --namespace monitoring get pods -l "release=monitoring" + + Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create & configure Alertmanager and Prometheus instances using the Operator. + Setup prometheus-community/kube-prometheus-stack completed + Deploy WebLogic Monitoring Exporter started + Deploying WebLogic Monitoring Exporter with domainNamespace[oamns], domainUID[accessdomain], adminServerPodName[accessdomain-adminserver] + % Total % Received % Xferd Average Speed Time Time Time Current + Dload Upload Total Spent Left Speed + 100 655 100 655 0 0 1564 0 --:--:-- --:--:-- --:--:-- 1566 + 100 2196k 100 2196k 0 0 2025k 0 0:00:01 0:00:01 --:--:-- 5951k + created $WORKDIR/kubernetes/monitoring-service/scripts/wls-exporter-deploy dir + created /tmp/ci-EHhB7bP847 + /tmp/ci-EHhB7bP847 $WORKDIR/kubernetes/monitoring-service + in temp dir + adding: WEB-INF/weblogic.xml (deflated 61%) + adding: config.yml (deflated 60%) + $WORKDIR/kubernetes/monitoring-service + created /tmp/ci-e7wPrlLlud + 14:26 + /tmp/ci-e7wPrlLlud $WORKDIR/kubernetes/monitoring-service + in temp dir + adding: WEB-INF/weblogic.xml (deflated 61%) + adding: config.yml (deflated 60%) + $WORKDIR/kubernetes/monitoring-service + created /tmp/ci-U38XXs6d06 + /tmp/ci-U38XXs6d06 $WORKDIR/kubernetes/monitoring-service + in temp dir + adding: WEB-INF/weblogic.xml (deflated 61%) + adding: config.yml (deflated 60%) + $WORKDIR/kubernetes/monitoring-service + + Initializing WebLogic Scripting Tool (WLST) ... + + Welcome to WebLogic Server Administration Scripting Shell + + Type help() for help on available commands + + Connecting to t3://accessdomain-adminserver:7001 with userid weblogic ... + Successfully connected to Admin Server "AdminServer" that belongs to domain "accessdomain". + + Warning: An insecure protocol was used to connect to the server. + To ensure on-the-wire security, the SSL port or Admin port should be used instead. + + Deploying ......... + Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war to targets AdminServer (upload=true) ... + + .Completed the deployment of Application with status completed + Current Status of your Deployment: + Deployment command type: deploy + Deployment State : completed + Deployment Message : no message + Starting application wls-exporter-adminserver. + + Completed the start of Application with status completed + Current Status of your Deployment: + Deployment command type: start + Deployment State : completed + 14:27 + Deployment command type: start + Deployment State : completed + Deployment Message : no message + Deploying ......... + Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-oam.war to targets oam_cluster (upload=true) ... + + .Completed the deployment of Application with status completed + Current Status of your Deployment: + Deployment command type: deploy + Deployment State : completed + Deployment Message : no message + Starting application wls-exporter-oam. + + .Completed the start of Application with status completed + Current Status of your Deployment: + Deployment command type: start + Deployment State : completed + Deployment Message : no message + Deploying ......... + Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-policy.war to targets policy_cluster (upload=true) ... + + .Completed the deployment of Application with status completed + Current Status of your Deployment: + Deployment command type: deploy + Deployment State : completed + Deployment Message : no message + Starting application wls-exporter-policy. + + .Completed the start of Application with status completed + Current Status of your Deployment: + Deployment command type: start + Deployment State : completed + Deployment Message : no message + Disconnected from weblogic server: AdminServer + + + Exiting WebLogic Scripting Tool. + + + 14:27 + Deploy WebLogic Monitoring Exporter completed + secret/basic-auth created + servicemonitor.monitoring.coreos.com/wls-exporter created + Deploying WebLogic Server Grafana Dashboard.... + {"id":25,"slug":"weblogic-server-dashboard","status":"success","uid":"5yUwzbZWz","url":"/d/5yUwzbZWz/weblogic-server-dashboard","version":1} + Deployed WebLogic Server Grafana Dashboard successfully + + Grafana is available at NodePort: 32100 + Prometheus is available at NodePort: 32101 + Altermanager is available at NodePort: 32102 + ============================================================== ``` - **Note**: Please refer the compatibility matrix of [Kube Prometheus](https://github.com/coreos/kube-prometheus#kubernetes-compatibility-matrix). Please download the [release](https://github.com/prometheus-operator/kube-prometheus/releases) of the repository according to the Kubernetes version of your cluster. In the above example the latest release will be downloaded. +#### Prometheus service discovery + +After the ServiceMonitor is deployed, the wls-exporter should be discovered by Prometheus and be able to collect metrics. + +1. Access the following URL to view Prometheus service discovery: `http://${MASTERNODE-HOSTNAME}:32101/service-discovery` + +1. Click on `serviceMonitor/oamns/wls-exporter/0` and then *show more*. Verify all the targets are mentioned. + +**Note** : It may take several minutes for `serviceMonitor/oamns/wls-exporter/0` to appear, so refresh the page until it does. + +#### Grafana dashboard + +1. Access the Grafana dashboard with the following URL: `http://${MASTERNODE-HOSTNAME}:32100` and login with `admin/admin`. Change your password when prompted. + +1. In the `Dashboards` panel, click on `WebLogic Server Dashboard`. The dashboard for your OAM domain should be displayed. If it is not displayed, click the `Search` icon in the left hand menu and search for `WebLogic Server Dashboard`. + +#### Cleanup + +To uninstall the Prometheus, Grafana, WebLogic Monitoring Exporter and the deployments, you can run the `$WORKDIR/monitoring-service/kubernetes/delete-monitoring.sh` script. For usage details execute `./delete-monitoring.sh -h` + +1. To uninstall run the following command: For example: ```bash - $ cd /scratch/OAMDockerK8S - $ git clone https://github.com/coreos/kube-prometheus.git + $ cd $WORKDIR/kubernetes/monitoring-service + $ ./delete-monitoring.sh -i monitoring-inputs.yaml + ``` + + +### Setup using manual configuration + +Install Prometheus, Grafana and WebLogic Monitoring Exporter manually. Create the web applications and deploy to the OAM domain. + +#### Deploy the Prometheus operator + +1. Kube-Prometheus requires all nodes to be labelled with `kubernetes.io/os=linux`. To check if your nodes are labelled, run the following: + + ```bash + $ kubectl get nodes --show-labels + ``` + + If the nodes are labelled the output will look similar to the following: + + ``` + NAME STATUS ROLES AGE VERSION LABELS + worker-node1 Ready 42d v1.20.10 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker-node1,kubernetes.io/os=linux + worker-node2 Ready 42d v1.20.10 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker-node2,kubernetes.io/os=linux + master-node Ready master 42d v1.20.10 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=master-node,kubernetes.io/os=linux,node-role.kubernetes.io/master= + ``` + + If the nodes are not labelled, run the following command: + + ```bash + $ kubectl label nodes --all kubernetes.io/os=linux + ``` + +1. Clone Prometheus by running the following commands: + + ```bash + $ cd $WORKDIR/kubernetes/monitoring-service + $ git clone https://github.com/coreos/kube-prometheus.git -b v0.7.0 + ``` + + **Note**: Please refer the compatibility matrix of [Kube Prometheus](https://github.com/coreos/kube-prometheus#kubernetes-compatibility-matrix). Please download the [release](https://github.com/prometheus-operator/kube-prometheus/releases) of the repository according to the Kubernetes version of your cluster. ``` 1. Run the following command to create the namespace and custom resource definitions: @@ -37,8 +301,9 @@ The WebLogic Monitoring Exporter uses the WLS RESTful Management API to scrape r The output will look similar to the following: - ```bash + ``` namespace/monitoring created + customresourcedefinition.apiextensions.k8s.io/alertmanagerconfigs.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/alertmanagers.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/podmonitors.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/probes.monitoring.coreos.com created @@ -61,13 +326,23 @@ The WebLogic Monitoring Exporter uses the WLS RESTful Management API to scrape r The output will look similar to the following: - ```bash + ``` alertmanager.monitoring.coreos.com/main created + prometheusrule.monitoring.coreos.com/alertmanager-main-rules created secret/alertmanager-main created service/alertmanager-main created serviceaccount/alertmanager-main created - servicemonitor.monitoring.coreos.com/alertmanager created + servicemonitor.monitoring.coreos.com/alertmanager-main created + clusterrole.rbac.authorization.k8s.io/blackbox-exporter created + clusterrolebinding.rbac.authorization.k8s.io/blackbox-exporter created + configmap/blackbox-exporter-configuration created + deployment.apps/blackbox-exporter created + service/blackbox-exporter created + serviceaccount/blackbox-exporter created + servicemonitor.monitoring.coreos.com/blackbox-exporter created + secret/grafana-config created secret/grafana-datasources created + configmap/grafana-dashboard-alertmanager-overview created configmap/grafana-dashboard-apiserver created configmap/grafana-dashboard-cluster-total created configmap/grafana-dashboard-controller-manager created @@ -89,22 +364,30 @@ The WebLogic Monitoring Exporter uses the WLS RESTful Management API to scrape r configmap/grafana-dashboard-prometheus created configmap/grafana-dashboard-proxy created configmap/grafana-dashboard-scheduler created - configmap/grafana-dashboard-statefulset created configmap/grafana-dashboard-workload-total created configmap/grafana-dashboards created deployment.apps/grafana created service/grafana created serviceaccount/grafana created servicemonitor.monitoring.coreos.com/grafana created + prometheusrule.monitoring.coreos.com/kube-prometheus-rules created clusterrole.rbac.authorization.k8s.io/kube-state-metrics created clusterrolebinding.rbac.authorization.k8s.io/kube-state-metrics created deployment.apps/kube-state-metrics created + prometheusrule.monitoring.coreos.com/kube-state-metrics-rules created service/kube-state-metrics created serviceaccount/kube-state-metrics created servicemonitor.monitoring.coreos.com/kube-state-metrics created + prometheusrule.monitoring.coreos.com/kubernetes-monitoring-rules created + servicemonitor.monitoring.coreos.com/kube-apiserver created + servicemonitor.monitoring.coreos.com/coredns created + servicemonitor.monitoring.coreos.com/kube-controller-manager created + servicemonitor.monitoring.coreos.com/kube-scheduler created + servicemonitor.monitoring.coreos.com/kubelet created clusterrole.rbac.authorization.k8s.io/node-exporter created clusterrolebinding.rbac.authorization.k8s.io/node-exporter created daemonset.apps/node-exporter created + prometheusrule.monitoring.coreos.com/node-exporter-rules created service/node-exporter created serviceaccount/node-exporter created servicemonitor.monitoring.coreos.com/node-exporter created @@ -122,8 +405,10 @@ The WebLogic Monitoring Exporter uses the WLS RESTful Management API to scrape r servicemonitor.monitoring.coreos.com/prometheus-adapter created clusterrole.rbac.authorization.k8s.io/prometheus-k8s created clusterrolebinding.rbac.authorization.k8s.io/prometheus-k8s created + prometheusrule.monitoring.coreos.com/prometheus-operator-rules created servicemonitor.monitoring.coreos.com/prometheus-operator created prometheus.monitoring.coreos.com/k8s created + prometheusrule.monitoring.coreos.com/prometheus-k8s-prometheus-rules created rolebinding.rbac.authorization.k8s.io/prometheus-k8s-config created rolebinding.rbac.authorization.k8s.io/prometheus-k8s created rolebinding.rbac.authorization.k8s.io/prometheus-k8s created @@ -132,38 +417,15 @@ The WebLogic Monitoring Exporter uses the WLS RESTful Management API to scrape r role.rbac.authorization.k8s.io/prometheus-k8s created role.rbac.authorization.k8s.io/prometheus-k8s created role.rbac.authorization.k8s.io/prometheus-k8s created - prometheusrule.monitoring.coreos.com/prometheus-k8s-rules created service/prometheus-k8s created serviceaccount/prometheus-k8s created - servicemonitor.monitoring.coreos.com/prometheus created - servicemonitor.monitoring.coreos.com/kube-apiserver created - servicemonitor.monitoring.coreos.com/coredns created - servicemonitor.monitoring.coreos.com/kube-controller-manager created - servicemonitor.monitoring.coreos.com/kube-scheduler created - servicemonitor.monitoring.coreos.com/kubelet created + servicemonitor.monitoring.coreos.com/prometheus-k8s created + unable to recognize "manifests/alertmanager-podDisruptionBudget.yaml": no matches for kind "PodDisruptionBudget" in version "policy/v1" + unable to recognize "manifests/prometheus-adapter-podDisruptionBudget.yaml": no matches for kind "PodDisruptionBudget" in version "policy/v1" + unable to recognize "manifests/prometheus-podDisruptionBudget.yaml": no matches for kind "PodDisruptionBudget" in version "policy/v1" ``` -1. Kube-Prometheus requires all nodes to be labelled with `kubernetes.io/os=linux`. To check if your nodes are labelled, run the following: - ```bash - $ kubectl get nodes --show-labels - ``` - - If the nodes are labelled the output will look similar to the following: - - ```bash - NAME STATUS ROLES AGE VERSION LABELS - worker-node1 Ready 42d v1.18.4 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker-node1,kubernetes.io/os=linux - worker-node2 Ready 42d v1.18.4 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker-node2,kubernetes.io/os=linux - master-node Ready master 42d v1.18.4 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=master-node,kubernetes.io/os=linux,node-role.kubernetes.io/master= - ``` - - If the nodes are not labelled, run the following command: - - ```bash - $ kubectl label nodes --all kubernetes.io/os=linux - ``` - 1. Provide external access for Grafana, Prometheus, and Alertmanager, by running the following commands: ```bash @@ -178,7 +440,7 @@ The WebLogic Monitoring Exporter uses the WLS RESTful Management API to scrape r The output will look similar to the following: - ```bash + ``` service/grafana patched service/prometheus-k8s patched service/alertmanager-main patched @@ -192,360 +454,207 @@ The WebLogic Monitoring Exporter uses the WLS RESTful Management API to scrape r The output should look similar to the following: - ```bash - NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES - pod/alertmanager-main-0 2/2 Running 0 62s 10.244.2.10 worker-node2 - pod/alertmanager-main-1 2/2 Running 0 62s 10.244.1.19 worker-node1 - pod/alertmanager-main-2 2/2 Running 0 62s 10.244.2.11 worker-node2 - pod/grafana-86445dccbb-xz5d5 1/1 Running 0 62s 10.244.1.20 worker-node1 - pod/kube-state-metrics-b5b74495f-8bglg 3/3 Running 0 62s 10.244.1.21 worker-node2 - pod/node-exporter-wj4jw 2/2 Running 0 62s 10.196.4.112 master-node - pod/node-exporter-wl2jv 2/2 Running 0 62s 10.250.111.112 worker-node2 - pod/node-exporter-wt88k 2/2 Running 0 62s 10.250.111.111 worker-node1 - pod/prometheus-adapter-66b855f564-4pmwk 1/1 Running 0 62s 10.244.2.12 worker-node1 - pod/prometheus-k8s-0 3/3 Running 1 62s 10.244.2.13 worker-node1 - pod/prometheus-k8s-1 3/3 Running 1 62s 10.244.1.22 worker-node2 - pod/prometheus-operator-8ff9cc68-6q9lc 2/2 Running 0 69s 10.244.2.18 worker-node1 - - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR - service/alertmanager-main NodePort 10.106.217.213 9093:32102/TCP 62s alertmanager=main,app=alertmanager - service/alertmanager-operated ClusterIP None 9093/TCP,9094/TCP,9094/UDP 62s app=alertmanager - service/grafana NodePort 10.97.246.92 3000:32100/TCP 62s app=grafana - service/kube-state-metrics ClusterIP None 8443/TCP,9443/TCP 62s app.kubernetes.io/name=kube-state-metrics - service/node-exporter ClusterIP None 9100/TCP 62s app.kubernetes.io/name=node-exporter - service/prometheus-adapter ClusterIP 10.109.14.232 443/TCP 62s name=prometheus-adapter - service/prometheus-k8s NodePort 10.101.68.142 9090:32101/TCP 62s app=prometheus,prometheus=k8s - service/prometheus-operated ClusterIP None 9090/TCP 62s app=prometheus - service/prometheus-operator ClusterIP None 8443/TCP 70 app.kubernetes.io/component=controller,app.kubernetes.io/name=prometheus-operator - ``` - - - -### Deploy WebLogic Monitoring Exporter - -1. Download WebLogic Monitoring Exporter: - - ```bash - $ mkdir -p /wls_exporter - $ cd /wls_exporter - $ wget https://github.com/oracle/weblogic-monitoring-exporter/releases/download//wls-exporter.war - $ wget https://github.com/oracle/weblogic-monitoring-exporter/releases/download//get.sh ``` - - **Note**: see [WebLogic Monitoring Exporter Releases](https://github.com/oracle/weblogic-monitoring-exporter/releases) for latest releases. - - For example: - - ```bash - $ mkdir -p /scratch/OAMDockerK8S/wls_exporter - $ cd /scratch/OAMDockerK8S/wls_exporter - $ wget https://github.com/oracle/weblogic-monitoring-exporter/releases/download/v1.2.0/wls-exporter.war - $ wget https://github.com/oracle/weblogic-monitoring-exporter/releases/download/v1.2.0/get1.2.0.sh - ``` - -1. Create a configuration file `config-admin.yaml` in the `/wls_exporter` directory that contains the following. Modify the `restPort` to match the server port for the OAM Administration Server: - - ``` - metricsNameSnakeCase: true - restPort: 7001 - queries: - - key: name - keyName: location - prefix: wls_server_ - applicationRuntimes: - key: name - keyName: app - componentRuntimes: - prefix: wls_webapp_config_ - type: WebAppComponentRuntime - key: name - values: [deploymentState, contextRoot, sourceInfo, openSessionsHighCount, openSessionsCurrentCount, sessionsOpenedTotalCount, sessionCookieMaxAgeSecs, sessionInvalidationIntervalSecs, sessionTimeoutSecs, singleThreadedServletPoolSize, sessionIDLength, servletReloadCheckSecs, jSPPageCheckSecs] - servlets: - prefix: wls_servlet_ - key: servletName - - JVMRuntime: - prefix: wls_jvm_ - key: name - - executeQueueRuntimes: - prefix: wls_socketmuxer_ - key: name - values: [pendingRequestCurrentCount] - - workManagerRuntimes: - prefix: wls_workmanager_ - key: name - values: [stuckThreadCount, pendingRequests, completedRequests] - - threadPoolRuntime: - prefix: wls_threadpool_ - key: name - values: [executeThreadTotalCount, queueLength, stuckThreadCount, hoggingThreadCount] - - JMSRuntime: - key: name - keyName: jmsruntime - prefix: wls_jmsruntime_ - JMSServers: - prefix: wls_jms_ - key: name - keyName: jmsserver - destinations: - prefix: wls_jms_dest_ - key: name - keyName: destination - - persistentStoreRuntimes: - prefix: wls_persistentstore_ - key: name - - JDBCServiceRuntime: - JDBCDataSourceRuntimeMBeans: - prefix: wls_datasource_ - key: name - - JTARuntime: - prefix: wls_jta_ - key: name - ``` - - - -1. Create a configuration file `config-oamserver.yaml` in the `/wls_exporter` directory that contains the following. Modify the `restPort` to match the server port for the OAM Managed Servers: - - ``` - metricsNameSnakeCase: true - restPort: 14100 - queries: - - key: name - keyName: location - prefix: wls_server_ - applicationRuntimes: - key: name - keyName: app - componentRuntimes: - prefix: wls_webapp_config_ - type: WebAppComponentRuntime - key: name - values: [deploymentState, contextRoot, sourceInfo, openSessionsHighCount, openSessionsCurrentCount, sessionsOpenedTotalCount, sessionCookieMaxAgeSecs, sessionInvalidationIntervalSecs, sessionTimeoutSecs, singleThreadedServletPoolSize, sessionIDLength, servletReloadCheckSecs, jSPPageCheckSecs] - servlets: - prefix: wls_servlet_ - key: servletName - - JVMRuntime: - prefix: wls_jvm_ - key: name - - executeQueueRuntimes: - prefix: wls_socketmuxer_ - key: name - values: [pendingRequestCurrentCount] - - workManagerRuntimes: - prefix: wls_workmanager_ - key: name - values: [stuckThreadCount, pendingRequests, completedRequests] - - threadPoolRuntime: - prefix: wls_threadpool_ - key: name - values: [executeThreadTotalCount, queueLength, stuckThreadCount, hoggingThreadCount] - - JMSRuntime: - key: name - keyName: jmsruntime - prefix: wls_jmsruntime_ - JMSServers: - prefix: wls_jms_ - key: name - keyName: jmsserver - destinations: - prefix: wls_jms_dest_ - key: name - keyName: destination - - persistentStoreRuntimes: - prefix: wls_persistentstore_ - key: name - - JDBCServiceRuntime: - JDBCDataSourceRuntimeMBeans: - prefix: wls_datasource_ - key: name - - JTARuntime: - prefix: wls_jta_ - key: name - ``` - -1. Create a configuration file `config-policyserver.yaml` in the `/wls_exporter` directory that contains the following. Modify the `restPort` to match the server port for the OAM Policy Manager Servers: - - ``` - metricsNameSnakeCase: true - restPort: 15100 - queries: - - key: name - keyName: location - prefix: wls_server_ - applicationRuntimes: - key: name - keyName: app - componentRuntimes: - prefix: wls_webapp_config_ - type: WebAppComponentRuntime - key: name - values: [deploymentState, contextRoot, sourceInfo, openSessionsHighCount, openSessionsCurrentCount, sessionsOpenedTotalCount, sessionCookieMaxAgeSecs, sessionInvalidationIntervalSecs, sessionTimeoutSecs, singleThreadedServletPoolSize, sessionIDLength, servletReloadCheckSecs, jSPPageCheckSecs] - servlets: - prefix: wls_servlet_ - key: servletName - - JVMRuntime: - prefix: wls_jvm_ - key: name - - executeQueueRuntimes: - prefix: wls_socketmuxer_ - key: name - values: [pendingRequestCurrentCount] - - workManagerRuntimes: - prefix: wls_workmanager_ - key: name - values: [stuckThreadCount, pendingRequests, completedRequests] - - threadPoolRuntime: - prefix: wls_threadpool_ - key: name - values: [executeThreadTotalCount, queueLength, stuckThreadCount, hoggingThreadCount] - - JMSRuntime: - key: name - keyName: jmsruntime - prefix: wls_jmsruntime_ - JMSServers: - prefix: wls_jms_ - key: name - keyName: jmsserver - destinations: - prefix: wls_jms_dest_ - key: name - keyName: destination - - persistentStoreRuntimes: - prefix: wls_persistentstore_ - key: name - - JDBCServiceRuntime: - JDBCDataSourceRuntimeMBeans: - prefix: wls_datasource_ - key: name - - JTARuntime: - prefix: wls_jta_ - key: name - ``` - -1. Generate the deployment package for the OAM Administration Server: - - ```bash - $ chmod 777 get.sh - $ ./get config-admin.yaml + NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES + pod/alertmanager-main-0 2/2 Running 0 67s 10.244.1.7 worker-node1 + pod/alertmanager-main-1 2/2 Running 0 67s 10.244.2.26 worker-node2 + pod/alertmanager-main-2 2/2 Running 0 67s 10.244.1.8 worker-node1 + pod/grafana-f8cd57fcf-tmlqt 1/1 Running 0 65s 10.244.2.28 worker-node2 + pod/kube-state-metrics-587bfd4f97-l8knh 3/3 Running 0 65s 10.244.1.9 worker-node1 + pod/node-exporter-2ztpd 2/2 Running 0 65s 10.247.95.26 worker-node1 + pod/node-exporter-92sxb 2/2 Running 0 65s 10.250.40.59 worker-node2 + pod/node-exporter-d77tl 2/2 Running 0 65s 10.196.54.36 master-node + pod/prometheus-adapter-69b8496df6-6gqrz 1/1 Running 0 65s 10.244.2.29 worker-node2 + pod/prometheus-k8s-0 2/2 Running 1 66s 10.244.2.27 worker-node2 + pod/prometheus-k8s-1 2/2 Running 1 66s 10.244.1.10 worker-node1 + pod/prometheus-operator-7649c7454f-9p747 2/2 Running 0 2m 10.244.2.25 worker-node2 + + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR + service/alertmanager-main NodePort 10.104.92.62 9093:32102/TCP 67s alertmanager=main,app=alertmanager + service/alertmanager-operated ClusterIP None 9093/TCP,9094/TCP,9094/UDP 67s app=alertmanager + service/grafana NodePort 10.100.171.3 3000:32100/TCP 66s app=grafana + service/kube-state-metrics ClusterIP None 8443/TCP,9443/TCP 66s app.kubernetes.io/name=kube-state-metrics + service/node-exporter ClusterIP None 9100/TCP 66s app.kubernetes.io/name=node-exporter + service/prometheus-adapter ClusterIP 10.109.248.92 443/TCP 66s name=prometheus-adapter + service/prometheus-k8s NodePort 10.98.212.247 9090:32101/TCP 66s app=prometheus,prometheus=k8s + service/prometheus-operated ClusterIP None 9090/TCP 66s app=prometheus + service/prometheus-operator ClusterIP None 8443/TCP 2m1s app.kubernetes.io/component=controller,app.kubernetes.io/name=prometheus-operator ``` - - For example: - + + +#### Deploy WebLogic Monitoring Exporter + + +1. Generate the WebLogic Monitoring Exporter deployment package. The `wls-exporter.war` package need to be updated and created for each listening port (Administration Server and Managed Servers) in the domain. Set the below environment values and run the script `get-wls-exporter.sh` to generate the required WAR files at `${WORKDIR}/kubernetes/monitoring-service/scripts/wls-exporter-deploy`: + ```bash - $ chmod 777 get1.2.0.sh - $ ./get1.2.0.sh config-admin.yaml + $ cd $WORKDIR/kubernetes/monitoring-service/scripts + $ export adminServerPort=7001 + $ export wlsMonitoringExporterTopolicyCluster=true + $ export policyManagedServerPort=15100 + $ export wlsMonitoringExporterTooamCluster=true + $ export oamManagedServerPort=14100 + $ sh get-wls-exporter.sh ``` - + The output will look similar to the following: ``` - % Total % Received % Xferd Average Speed Time Time Time Current + % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed - 100 642 100 642 0 0 1272 0 --:--:-- --:--:-- --:--:-- 1273 - 100 2033k 100 2033k 0 0 1224k 0 0:00:01 0:00:01 --:--:-- 2503k - created /tmp/ci-AcBAO1eTer - /tmp/ci-AcBAO1eTer /scratch/OAMDockerK8S/wls_exporter + 100 655 100 655 0 0 1107 0 --:--:-- --:--:-- --:--:-- 1108 + 100 2196k 100 2196k 0 0 1787k 0 0:00:01 0:00:01 --:--:-- 9248k + created $WORKDIR/kubernetes/monitoring-service/scripts/wls-exporter-deploy dir + domainNamespace is empty, setting to default oamns + domainUID is empty, setting to default accessdomain + weblogicCredentialsSecretName is empty, setting to default "accessdomain-domain-credentials" + adminServerName is empty, setting to default "AdminServer" + oamClusterName is empty, setting to default "oam_cluster" + policyClusterName is empty, setting to default "policy_cluster" + created /tmp/ci-Bu74rCBxwu + /tmp/ci-Bu74rCBxwu $WORKDIR/kubernetes/monitoring-service/scripts in temp dir - adding: config.yml (deflated 65%) - /scratch/OAMDockerK8S/wls_exporter - + adding: WEB-INF/weblogic.xml (deflated 61%) + adding: config.yml (deflated 60%) + $WORKDIR/kubernetes/monitoring-service/scripts + created /tmp/ci-RQv3rLbLsX + /tmp/ci-RQv3rLbLsX $WORKDIR/kubernetes/monitoring-service/scripts + in temp dir + adding: WEB-INF/weblogic.xml (deflated 61%) + adding: config.yml (deflated 60%) + $WORKDIR/kubernetes/monitoring-service/scripts + created /tmp/ci-DWIYlocP5e + /tmp/ci-DWIYlocP5e $WORKDIR/kubernetes/monitoring-service/scripts + in temp dir + adding: WEB-INF/weblogic.xml (deflated 61%) + adding: config.yml (deflated 60%) + $WORKDIR/kubernetes/monitoring-service/scripts ``` - This will generate a `wls-exporter.war` file in the same directory. This war file contains a `config.yml` that corresponds to `config-admin.yaml`. Rename the file as follows: - +1. Deploy the WebLogic Monitoring Exporter WAR files into the Oracle Access Management domain: + ```bash - mv wls-exporter.war wls-exporter-admin.war + $ cd $WORKDIR/kubernetes/monitoring-service/scripts + $ kubectl cp wls-exporter-deploy /-adminserver:/u01/oracle + $ kubectl cp deploy-weblogic-monitoring-exporter.py /-adminserver:/u01/oracle/wls-exporter-deploy + $ kubectl exec -it -n -adminserver -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/deploy-weblogic-monitoring-exporter.py -domainName -adminServerName AdminServer -adminURL -adminserver:7001 -username weblogic -password -oamClusterName oam_cluster -wlsMonitoringExporterTooamCluster true -policyClusterName policy_cluster -wlsMonitoringExporterTopolicyCluster true ``` - -1. Generate the deployment package for the OAM Managed Server and Policy Manager Server, for example: + For example: ```bash - $ ./get1.2.0.sh config-oamserver.yaml - $ mv wls-exporter.war wls-exporter-oamserver.war - $ ./get1.2.0.sh config-policyserver.yaml - $ mv wls-exporter.war wls-exporter-policyserver.war + $ cd $WORKDIR/kubernetes/monitoring-service/scripts + $ kubectl cp wls-exporter-deploy oamns/accessdomain-adminserver:/u01/oracle + $ kubectl cp deploy-weblogic-monitoring-exporter.py oamns/accessdomain-adminserver:/u01/oracle/wls-exporter-deploy + $ kubectl exec -it -n oamns accessdomain-adminserver -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/deploy-weblogic-monitoring-exporter.py -domainName accessdomain -adminServerName AdminServer -adminURL accessdomain-adminserver:7001 -username weblogic -password -oamClusterName oam_cluster -wlsMonitoringExporterTooamCluster true -policyClusterName policy_cluster -wlsMonitoringExporterTopolicyCluster true ``` -1. Copy the war files to the persistent volume directory: + The output will look similar to the following: + + ``` + Initializing WebLogic Scripting Tool (WLST) ... + + Welcome to WebLogic Server Administration Scripting Shell + + Type help() for help on available commands + + Connecting to t3://accessdomain-adminserver:7001 with userid weblogic ... + Successfully connected to Admin Server "AdminServer" that belongs to domain "accessdomain". + + Warning: An insecure protocol was used to connect to the server. + To ensure on-the-wire security, the SSL port or Admin port should be used instead. + + Deploying ......... + Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war to targets AdminServer (upload=true) ... + + ..Completed the deployment of Application with status completed + Current Status of your Deployment: + Deployment command type: deploy + Deployment State : completed + Deployment Message : no message + Starting application wls-exporter-adminserver. + + .Completed the start of Application with status completed + Current Status of your Deployment: + Deployment command type: start + Deployment State : completed + Deployment Message : no message + Deploying ......... + Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-oam.war to targets oam_cluster (upload=true) ... + + .Completed the deployment of Application with status completed + Current Status of your Deployment: + Deployment command type: deploy + Deployment State : completed + Deployment Message : no message + Starting application wls-exporter-oam. + + .Completed the start of Application with status completed + Current Status of your Deployment: + Deployment command type: start + Deployment State : completed + Deployment Message : no message + Deploying ......... + Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-policy.war to targets policy_cluster (upload=true) ... + + .Completed the deployment of Application with status completed + Current Status of your Deployment: + Deployment command type: deploy + Deployment State : completed + Deployment Message : no message + Starting application wls-exporter-policy. + + .Completed the start of Application with status completed + Current Status of your Deployment: + Deployment command type: start + Deployment State : completed + Deployment Message : no message + Disconnected from weblogic server: AdminServer + + Exiting WebLogic Scripting Tool. + + + ``` + +#### Configure Prometheus Operator + +Prometheus enables you to collect metrics from the WebLogic Monitoring Exporter. The Prometheus Operator identifies the targets using service discovery. To get the WebLogic Monitoring Exporter end point discovered as a target, you must create a service monitor pointing to the service. + +The exporting of metrics from wls-exporter requires basicAuth, so a Kubernetes Secret is created with the user name and password that are base64 encoded. This Secret is used in the ServiceMonitor deployment. The `wls-exporter-ServiceMonitor.yaml` has basicAuth with credentials as username: `weblogic` and password: `` in base64 encoded. + +1. Run the following command to get the base64 encoded version of the weblogic password: ```bash - $ cp wls-exporter*.war // + $ echo -n "" | base64 ``` - For example: + The output will look similar to the following: - ```bash - $ cp wls-exporter*.war /scratch/OAMDockerK8S/accessdomainpv/ + ``` + V2VsY29tZTE= ``` - -### Deploy the wls-exporter war files in OAM WebLogic server - -1. Login to the Oracle Enterprise Manager Console using the URL `https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/em`. - -1. Navigate to *WebLogic Domain* > *Deployments*. Click on the padlock in the upper right hand corner and select *Lock and Edit*. - -1. From the 'Deployment' drop down menu select *Deploy*. - -1. In the *Select Archive* screen, under *Archive or exploded directory is on the server where Enterprise Manager is running*, click *Browse*. Navigate to the `/u01/oracle/user_projects/domains` -directory and select `wls-exporter-admin.war`. Click *OK* and then *Next*. - -1. In *Select Target* check *AdminServer* and click *Next*. - -1. In *Application Attributes* set the following and click *Next*: - - * Application Name: `wls-exporter-admin` - * Context Root: `wls-exporter` - * Distribution: `Install and start application (servicing all requests)` - -1. In *Deployment Settings* click *Deploy*. - -1. Once you see the message *Deployment Succeeded*, click *Close*. - -1. Click on the padlock in the upper right hand corner and select *Activate Changes*. - -1. Repeat the above steps to deploy `wls-exporter-oamserver.war` with the following caveats: - - * In *Select Target* choose *oam_cluster* - * In *Application Attributes* set Application Name: `wls-exporter-oamserver`, Context Root: `wls-exporter` - * In *Distribution* select `Install and start application (servicing all requests)` - -1. Repeat the above steps to deploy `wls-exporter-policyserver.war` with the following caveats: - - * In *Select Target* choose *policy_cluster* - * In *Application Attributes* set Application Name: `wls-exporter-policyserver`, Context Root: `wls-exporter` - * In *Distribution* select `Install and start application (servicing all requests)` - -1. Check the wls-exporter is accessible using the URL: `https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/wls-exporter`. - - You should see a page saying *This is the WebLogic Monitoring Exporter*. - - -### Prometheus Operator Configuration - -Prometheus has to be configured to collect the metrics from the weblogic-monitor-exporter. The Prometheus operator identifies the targets using service discovery. To get the weblogic-monitor-exporter end point discovered as a target, you will need to create a service monitor to point to the service. - -1. Create a `wls-exporter-service-monitor.yaml` in the `/wls_exporter` directory with the following contents: +1. Update the `$WORKDIR/kubernetes/monitoring-service/manifests/wls-exporter-ServiceMonitor.yaml` and change the `password:` value to the value returned above. Also change the `namespace:` and `weblogic.domainName:` values to match your OAM namespace and domain name: ``` apiVersion: v1 kind: Secret metadata: name: basic-auth - namespace: monitoring + namespace: oamns data: - password: V2VsY29tZTE= ## base64 - user: d2VibG9naWM= ## weblogic base64 + password: V2VsY29tZTE= + user: d2VibG9naWM= type: Opaque --- apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: - name: wls-exporter-accessdomain - namespace: monitoring + name: wls-exporter + namespace: oamns labels: k8s-app: wls-exporter + release: monitoring spec: namespaceSelector: matchNames: @@ -568,41 +677,33 @@ Prometheus has to be configured to collect the metrics from the weblogic-monitor interval: 10s honorLabels: true path: /wls-exporter/metrics - ``` - - **Note**: In the above example, change the `password: V2VsY29tZTE=` value to the base64 encoded version of your weblogic password. To find the base64 value run the following: - - ```bash - $ echo -n "" | base64 ``` - - If using a different namespace from `oamns` or a different domain_UID from `accessdomain`, then change accordingly. - -1. Add Rolebinding for the WebLogic OAM domain namespace: - ```bash - $ cd /kube-prometheus/manifests - ``` - - Edit the `prometheus-roleBindingSpecificNamespaces.yaml` file and add the following to the file for your OAM domain namespace, for example `oamns`. - +1. Update the `$WORKDIR/kubernetes/monitoring-service/manifests/prometheus-roleSpecific-domain-namespace.yaml` and change the `namespace` to match your OAM namespace. For example: + ``` + apiVersion: rbac.authorization.k8s.io/v1 + items: - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding + kind: Role metadata: name: prometheus-k8s namespace: oamns - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: prometheus-k8s - subjects: - - kind: ServiceAccount - name: prometheus-k8s - namespace: monitoring + rules: + - apiGroups: + - "" + resources: + - services + - endpoints + - pods + verbs: + - get + - list + - watch + kind: RoleList ``` - For example the file should now read: +1. Update the `$WORKDIR/kubernetes/monitoring-service/manifests/prometheus-roleBinding-domain-namespace.yaml and change the `namespace` to match your OAM namespace. For example: ``` apiVersion: rbac.authorization.k8s.io/v1 @@ -620,95 +721,69 @@ Prometheus has to be configured to collect the metrics from the weblogic-monitor - kind: ServiceAccount name: prometheus-k8s namespace: monitoring - - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: prometheus-k8s - namespace: default - .... - ``` - -1. Add the Role for WebLogic OAM domain namespace. Edit the `prometheus-roleSpecificNamespaces.yaml` and change the namespace to your OAM domain namespace, for example `oamns`. - - ``` - - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: prometheus-k8s - namespace: oamns - rules: - - apiGroups: - - "" - resources: - - services - - endpoints - - pods - verbs: - - get - - list - - watch - .... - ``` - -1. Apply the yaml files as follows: + kind: RoleBindingList + ``` + + +1. Run the following command to enable Prometheus: - ```bash - $ kubectl apply -f prometheus-roleBindingSpecificNamespaces.yaml - $ kubectl apply -f prometheus-roleSpecificNamespaces.yaml + ```bash + $ kubectl apply -f . ``` - The output should look similar to the following: - + The output will look similar to the following: + ``` - kubectl apply -f prometheus-roleBindingSpecificNamespaces.yaml rolebinding.rbac.authorization.k8s.io/prometheus-k8s created - Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply - rolebinding.rbac.authorization.k8s.io/prometheus-k8s configured - Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply - rolebinding.rbac.authorization.k8s.io/prometheus-k8s configured - Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply - rolebinding.rbac.authorization.k8s.io/prometheus-k8s configured - - $ kubectl apply -f prometheus-roleSpecificNamespaces.yaml role.rbac.authorization.k8s.io/prometheus-k8s created - Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply - role.rbac.authorization.k8s.io/prometheus-k8s configured - Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply - role.rbac.authorization.k8s.io/prometheus-k8s configured - Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply - role.rbac.authorization.k8s.io/prometheus-k8s configured + secret/basic-auth created + servicemonitor.monitoring.coreos.com/wls-exporter created ``` -### Deploy the ServiceMonitor - -1. Run the following command to create the ServiceMonitor: - - ```bash - $ cd /wls_exporter - $ kubectl create -f wls-exporter-service-monitor.yaml - ``` - The output will look similar to the following: - ``` - servicemonitor.monitoring.coreos.com/wls-exporter-accessdomain created - ``` -### Prometheus Service Discovery +#### Prometheus Service Discovery -After ServiceMonitor is deployed, the wls-exporter should be discovered by Prometheus and be able to scrape metrics. +After the ServiceMonitor is deployed, the wls-exporter should be discovered by Prometheus and be able to collect metrics. 1. Access the following URL to view Prometheus service discovery: `http://${MASTERNODE-HOSTNAME}:32101/service-discovery` -1. Click on `monitoring/wls-exporter-accessdomain/0 ` and then *show more*. Verify all the targets are mentioned. +1. Click on `oamns/wls-exporter/0 ` and then *show more*. Verify all the targets are mentioned. -### Grafana Dashboard +#### Grafana Dashboard 1. Access the Grafana dashboard with the following URL: `http://${MASTERNODE-HOSTNAME}:32100` and login with `admin/admin`. Change your password when prompted. -1. Import the Grafana dashboard by navigating on the left hand menu to *Create* > *Import*. Copy the content from `/fmw-kubernetes/OracleAccessManagement/kubernetes/3.0.1/grafana/weblogic_dashboard.json` and paste. Then click *Load* and *Import*. +1. Import the Grafana dashboard by navigating on the left hand menu to *Create* > *Import*. Copy the content from `$WORKDIR/kubernetes/monitoring-service/config/weblogic-server-dashboard-import.json` and paste. Then click *Load* and *Import*. The dashboard should be displayed in the Dashboards panel. - + +#### Cleanup + +To clean up a manual installation: + +1. Run the following commands: + + ```bash + $ cd $WORKDIR/kubernetes/monitoring-service/manifests/ + $ kubectl delete -f . + ``` + +1. Delete the deployments: + + ```bash + $ cd $WORKDIR/kubernetes/monitoring-service/scripts/ + $ kubectl cp undeploy-weblogic-monitoring-exporter.py /-adminserver:/u01/oracle/wls-exporter-deploy + $ kubectl exec -it -n -adminserver -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/undeploy-weblogic-monitoring-exporter.py -domainName -adminServerName AdminServer -adminURL -adminserver:7001 -username weblogic -password -oamClusterName oam_cluster -wlsMonitoringExporterTooamCluster true -policyClusterName policy_cluster -wlsMonitoringExporterTopolicyCluster true + ``` + +1. Delete Prometheus: + + ```bash + $ cd $WORKDIR/kubernetes/monitoring-service/kube-prometheus + $ kubectl delete -f manifests + $ kubectl delete -f manifests/setup + ``` diff --git a/docs-source/content/oam/manage-oam-domains/wlst-admin-operations.md b/docs-source/content/oam/manage-oam-domains/wlst-admin-operations.md index d22c94985..2d18ea9ee 100644 --- a/docs-source/content/oam/manage-oam-domains/wlst-admin-operations.md +++ b/docs-source/content/oam/manage-oam-domains/wlst-admin-operations.md @@ -1,43 +1,40 @@ --- -title: "WLST Administration Operations" -draft: false -weight: 2 -pre : "2. " +title: "b. WLST Administration Operations" description: "Describes the steps for WLST administration using helper pod running in the same Kubernetes Cluster as OAM Domain." --- To use WLST to administer the OAM domain, use the helper pod in the same Kubernetes cluster as the OAM Domain. -1. Run the following command to start a bash shell in the helper pod (if one is not already running): +1. Run the following command to start a bash shell in the helper pod: ```bash - $ kubectl exec -it helper -n -- /bin/bash - ``` + $ kubectl exec -it helper -n -- /bin/bash + ``` - For example: + For example: - ```bash - $ kubectl exec -it helper -n oamns -- /bin/bash - ``` + ```bash + $ kubectl exec -it helper -n oamns -- /bin/bash + ``` - This will take you into a bash shell in the running helper pod: + This will take you into a bash shell in the running helper pod: - ```bash - [oracle@helper ~]$ - ``` + ```bash + [oracle@helper ~]$ + ``` 1. Connect to WLST using the following command: ```bash - cd $ORACLE_HOME/oracle_common/common/bin - ./wlst.sh + $ cd $ORACLE_HOME/oracle_common/common/bin + $ ./wlst.sh ``` The output will look similar to the following: - ```bash + ``` Initializing WebLogic Scripting Tool (WLST) ... Jython scans all the jar files it can find at first startup. Depending on the system, this process may take a few minutes to complete, and WLST may not return a prompt right away. @@ -52,12 +49,12 @@ To use WLST to administer the OAM domain, use the helper pod in the same Kuberne 1. To access t3 for the Administration Server connect as follows: ```bash - connect('weblogic','','t3://accessdomain-adminserver:7001') + wls:/offline> connect('weblogic','','t3://accessdomain-adminserver:7001') ``` The output will look similar to the following: - ```bash + ``` Connecting to t3://accessdomain-adminserver:7001 with userid weblogic ... Successfully connected to Admin Server "AdminServer" that belongs to domain "accessdomain". @@ -75,7 +72,7 @@ To use WLST to administer the OAM domain, use the helper pod in the same Kuberne The output will look similar to the following: - ```bash + ``` Connecting to t3://accessdomain-cluster-oam-cluster:14100 with userid weblogic ... Successfully connected to managed Server "oam_server1" that belongs to domain "accessdomain". @@ -139,7 +136,6 @@ oracle.oam.common.runtimeent | oracle.oam.commonutil | oracle.oam.config | oracle.oam.controller | -oracle.oam.credcollector | oracle.oam.default | oracle.oam.diagnostic | oracle.oam.engine.authn | @@ -148,7 +144,6 @@ oracle.oam.engine.policy | oracle.oam.engine.ptmetadata | oracle.oam.engine.session | oracle.oam.engine.sso | -oracle.oam.engine.token | oracle.oam.esso | oracle.oam.extensibility.lifecycle | oracle.oam.foundation.access | @@ -162,7 +157,6 @@ oracle.oam.plugin | oracle.oam.proxy.oam | oracle.oam.proxy.oam.workmanager | oracle.oam.proxy.opensso | -oracle.oam.proxy.osso | oracle.oam.pswd.service.provider | oracle.oam.replication | oracle.oam.user.identity.provider | @@ -193,7 +187,6 @@ oracle.oam.common.runtimeent | oracle.oam.commonutil | oracle.oam.config | oracle.oam.controller | -oracle.oam.credcollector | oracle.oam.default | oracle.oam.diagnostic | oracle.oam.engine.authn | @@ -202,7 +195,6 @@ oracle.oam.engine.policy | oracle.oam.engine.ptmetadata | oracle.oam.engine.session | oracle.oam.engine.sso | -oracle.oam.engine.token | oracle.oam.esso | oracle.oam.extensibility.lifecycle | oracle.oam.foundation.access | @@ -216,7 +208,6 @@ oracle.oam.plugin | oracle.oam.proxy.oam | oracle.oam.proxy.oam.workmanager | oracle.oam.proxy.opensso | -oracle.oam.proxy.osso | oracle.oam.pswd.service.provider | oracle.oam.replication | oracle.oam.user.identity.provider | @@ -228,17 +219,13 @@ Verify that `TRACE:32` log level is set by connecting to the Administration Serv ```bash $ kubectl exec -it accessdomain-adminserver -n oamns -- /bin/bash [oracle@accessdomain-adminserver oracle]$ - [oracle@accessdomain-adminserver oracle]$ cd /u01/oracle/user_projects/domains/accessdomain/servers/oam_server1/logs [oracle@accessdomain-adminserver logs]$ tail oam_server1-diagnostic.log -[2020-09-25T09:02:19.492+00:00] [oam_server1] [TRACE:32] [] [oracle.oam.config] [tid: Configuration Store Observer] [userId: ] [ecid: 0dc53783-fada-4709-b7c1-8958bbbaac95-0000000b,0:1062] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.admin.config.util.store.DbStore] [SRC_METHOD: getSelectSQL] SELECT SQL:SELECT version from IDM_OBJECT_STORE where id = ? and version = (select max(version) from IDM_OBJECT_STORE where id = ?) -[2020-09-25T09:02:19.494+00:00] [oam_server1] [TRACE] [] [oracle.oam.config] [tid: Configuration Store Observer] [userId: ] [ecid: 0dc53783-fada-4709-b7c1-8958bbbaac95-0000000b,0:1062] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.admin.config.util.store.DbStore] [SRC_METHOD: load] Time (ms) to load key CONFIG:-1{FIELD_TYPES=INT, SELECT_FIELDS=SELECT version from IDM_OBJECT_STORE }:3 -[2020-09-25T09:02:19.494+00:00] [oam_server1] [TRACE:16] [] [oracle.oam.config] [tid: Configuration Store Observer] [userId: ] [ecid: 0dc53783-fada-4709-b7c1-8958bbbaac95-0000000b,0:1062] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.admin.config.util.store.DbStore] [SRC_METHOD: load] RETURN -[2020-09-25T09:02:20.050+00:00] [oam_server1] [TRACE:16] [] [oracle.oam.engine.session] [tid: OAM SME Service - 2] [userId: ] [ecid: 0dc53783-fada-4709-b7c1-8958bbbaac95-0000000b,0:1777] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.engines.sme.mgrdb.SessionManagerImpl$3] [SRC_METHOD: run] ENTRY -[2020-09-25T09:02:20.057+00:00] [oam_server1] [TRACE] [] [oracle.oam.engine.session] [tid: OAM SME Service - 2] [userId: ] [ecid: 0dc53783-fada-4709-b7c1-8958bbbaac95-0000000b,0:1777] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.engines.sme.mgrdb.SessionManagerImpl$3] [SRC_METHOD: run] Session Store Current status: UP, at time: Fri Sep 25 09:02:20 GMT 2020. Previous known status: UP. Polling Interval: 15000 milliseconds -[2020-09-25T09:02:20.057+00:00] [oam_server1] [TRACE:16] [] [oracle.oam.engine.session] [tid: OAM SME Service - 2] [userId: ] [ecid: 0dc53783-fada-4709-b7c1-8958bbbaac95-0000000b,0:1777] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.engines.sme.mgrdb.SessionManagerImpl$3] [SRC_METHOD: run] RETURN -[2020-09-25T09:02:22.602+00:00] [oam_server1] [NOTIFICATION] [] [oracle.wsm.agent.handler.jaxrs.RESTJeeResourceFilter] [tid: [ACTIVE].ExecuteThread: '9' for queue: 'weblogic.kernel.Default (self-tuning)'] [userId: weblogic] [ecid: 0dc53783-fada-4709-b7c1-8958bbbaac95-000000c8,0] [APP: wls-management-services] [partition-name: DOMAIN] [tenant-name: GLOBAL] Tenant: default, ProcessResponse is set to false -[2020-09-25T09:02:27.608+00:00] [oam_server1] [NOTIFICATION] [] [oracle.wsm.agent.handler.jaxrs.RESTJeeResourceFilter] [tid: [ACTIVE].ExecuteThread: '43' for queue: 'weblogic.kernel.Default (self-tuning)'] [userId: weblogic] [ecid: 0dc53783-fada-4709-b7c1-8958bbbaac95-000000c9,0] [APP: wls-management-services] [partition-name: DOMAIN] [tenant-name: GLOBAL] Tenant: default, ProcessResponse is set to false +2021-11-02T10:26:14.793+00:00] [oam_server1] [TRACE:32] [] [oracle.oam.config] [tid: Configuration Store Observer] [userId: ] [ecid: 8b3ac37b-c7cf-46dd-aeee-5ed67886be21-0000000b,0:1795] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.admin.config.util.observable.ObservableConfigStore$StoreWatcher] [SRC_METHOD: run] Start of run before start of detection at 1,635,848,774,793. Detector: oracle.security.am.admin.config.util.observable.DbStoreChangeDetector:Database configuration store:DSN:jdbc/oamds. Monitor: { StoreMonitor: { disabled: 'false' } } +[2021-11-02T10:26:14.793+00:00] [oam_server1] [TRACE] [] [oracle.oam.config] [tid: Configuration Store Observer] [userId: ] [ecid: 8b3ac37b-c7cf-46dd-aeee-5ed67886be21-0000000b,0:1795] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.admin.config.util.store.StoreUtil] [SRC_METHOD: getContainerProperty] Configuration property CONFIG_HISTORY not specified +[2021-11-02T10:26:14.793+00:00] [oam_server1] [TRACE] [] [oracle.oam.config] [tid: Configuration Store Observer] [userId: ] [ecid: 8b3ac37b-c7cf-46dd-aeee-5ed67886be21-0000000b,0:1795] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.admin.config.util.store.StoreUtil] [SRC_METHOD: getContainerProperty] Configuration property CONFIG not specified +[2021-11-02T10:26:14.795+00:00] [oam_server1] [TRACE:32] [] [oracle.oam.config] [tid: Configuration Store Observer] [userId: ] [ecid: 8b3ac37b-c7cf-46dd-aeee-5ed67886be21-0000000b,0:1795] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.admin.config.util.store.DbStore] [SRC_METHOD: getSelectSQL] SELECT SQL:SELECT version from IDM_OBJECT_STORE where id = ? and version = (select max(version) from IDM_OBJECT_STORE where id = ?) +[2021-11-02T10:26:14.797+00:00] [oam_server1] [TRACE] [] [oracle.oam.config] [tid: Configuration Store Observer] [userId: ] [ecid: 8b3ac37b-c7cf-46dd-aeee-5ed67886be21-0000000b,0:1795] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.admin.config.util.store.DbStore] [SRC_METHOD: load] Time (ms) to load key CONFIG:-1{FIELD_TYPES=INT, SELECT_FIELDS=SELECT version from IDM_OBJECT_STORE }:4 ``` ### Performing WLST Administration via SSL @@ -250,7 +237,15 @@ $ kubectl exec -it accessdomain-adminserver -n oamns -- /bin/bash 1. Create a `myscripts` directory as follows: ```bash - $ cd /weblogic-kubernetes-operator/kubernetes/samples/scripts + $ cd $WORKDIR/kubernetes/ + $ mkdir myscripts + $ cd myscripts + ``` + + For example: + + ```bash + $ cd $WORKDIR/kubernetes/ $ mkdir myscripts $ cd myscripts ``` @@ -259,7 +254,7 @@ $ kubectl exec -it accessdomain-adminserver -n oamns -- /bin/bash **Note**: Update the `domainName`, `domainUID` and `namespace` based on your environment. For example: - ```bash + ``` apiVersion: v1 kind: Service metadata: @@ -287,7 +282,7 @@ $ kubectl exec -it accessdomain-adminserver -n oamns -- /bin/bash and the following sample yaml template file `-oamcluster-ssl.yaml` for the OAM Managed Server: - ```bash + ``` apiVersion: v1 kind: Service metadata: @@ -352,7 +347,7 @@ $ kubectl exec -it accessdomain-adminserver -n oamns -- /bin/bash The output will look similar to the following: - ```bash + ``` accessdomain-adminserverssl ClusterIP None 7002/TCP 102s accessdomain-oamcluster-ssl ClusterIP None 14101/TCP 35s ``` @@ -376,9 +371,9 @@ $ kubectl exec -it accessdomain-adminserver -n oamns -- /bin/bash ```bash wls:/offline> connect('weblogic','','t3s://accessdomain-adminserverssl:7002') Connecting to t3s://accessdomain-adminserverssl:7002 with userid weblogic ... - - - + + + Successfully connected to Admin Server "AdminServer" that belongs to domain "accessdomain". wls:/accessdomain/serverConfig/> @@ -389,6 +384,9 @@ $ kubectl exec -it accessdomain-adminserver -n oamns -- /bin/bash ```bash wls:/offline> connect('weblogic','','t3s://accessdomain-oamcluster-ssl:14101') Connecting to t3s://accessdomain-oamcluster-ssl:14101 with userid weblogic ... + + + Successfully connected to managed Server "oam_server1" that belongs to domain "accessdomain". ``` diff --git a/docs-source/content/oam/patch-and-upgrade/_index.md b/docs-source/content/oam/patch-and-upgrade/_index.md index 91850adf8..acd22f520 100644 --- a/docs-source/content/oam/patch-and-upgrade/_index.md +++ b/docs-source/content/oam/patch-and-upgrade/_index.md @@ -1,7 +1,7 @@ +++ title = "Patch and Upgrade" -weight = 10 -pre = "10. " +weight = 11 +pre = "11. " description= "This document provides steps to patch or upgrade an OAM image, WebLogic Kubernetes Operator or Kubernetes Cluster." +++ diff --git a/docs-source/content/oam/patch-and-upgrade/patch_an_image.md b/docs-source/content/oam/patch-and-upgrade/patch_an_image.md index 5709c1f93..5b2904f2b 100644 --- a/docs-source/content/oam/patch-and-upgrade/patch_an_image.md +++ b/docs-source/content/oam/patch-and-upgrade/patch_an_image.md @@ -29,7 +29,7 @@ In all of the above cases, the WebLogic Kubernetes Operator will restart the Adm 1. Update the `image` tag to point at the new image, for example: - ```bash + ``` domainHomeInImage: false image: oracle/oam:12.2.1.4.0-new imagePullPolicy: IfNotPresent @@ -55,6 +55,6 @@ In all of the above cases, the WebLogic Kubernetes Operator will restart the Adm The output will look similar to the following: - ```bash + ``` domain.weblogic.oracle/accessdomain patched ``` diff --git a/docs-source/content/oam/patch-and-upgrade/upgrade_an_operator_release.md b/docs-source/content/oam/patch-and-upgrade/upgrade_an_operator_release.md index 79c0c07bf..58109ba1f 100644 --- a/docs-source/content/oam/patch-and-upgrade/upgrade_an_operator_release.md +++ b/docs-source/content/oam/patch-and-upgrade/upgrade_an_operator_release.md @@ -31,43 +31,73 @@ The new WebLogic Kubernetes Operator Docker image must be installed on the maste 1. On the master node, download the new WebLogic Kubernetes Operator source code from the operator github project: ```bash - $ mkdir /weblogic-kubernetes-operator-3.X.X - $ cd /weblogic-kubernetes-operator-3.X.X + $ mkdir /weblogic-kubernetes-operator-3.X.X + $ cd /weblogic-kubernetes-operator-3.X.X $ git clone https://github.com/oracle/weblogic-kubernetes-operator.git --branch v3.X.X ``` For example: ```bash - $ cd /scratch/OAMDockerK8S/weblogic-kubernetes-operator-3.X.X + $ mkdir /scratch/OAMK8S/weblogic-kubernetes-operator-3.X.X + $ cd /scratch/OAMK8S/weblogic-kubernetes-operator-3.X.X $ git clone https://github.com/oracle/weblogic-kubernetes-operator.git --branch v3.X.X ``` - This will create the directory `/weblogic-kubernetes-operator-3.X.X/weblogic-kubernetes-operator` + This will create the directory `/weblogic-kubernetes-operator-3.X.X/weblogic-kubernetes-operator` 1. Run the following helm command to upgrade the operator: ```bash - $ cd /weblogic-kubernetes-operator-3.X.X/weblogic-kubernetes-operator + $ cd /weblogic-kubernetes-operator-3.X.X/weblogic-kubernetes-operator $ helm upgrade --reuse-values --set image=weblogic-kubernetes-operator:3.X.X --namespace --wait weblogic-kubernetes-operator kubernetes/charts/weblogic-operator ``` For example: ```bash - $ cd /scratch/OAMDockerK8S/weblogic-kubernetes-operator-3.X.X/weblogic-kubernetes-operator + $ cd /scratch/OAMK8S/weblogic-kubernetes-operator-3.X.X/weblogic-kubernetes-operator $ helm upgrade --reuse-values --set image=weblogic-kubernetes-operator:3.X.X --namespace opns --wait weblogic-kubernetes-operator kubernetes/charts/weblogic-operator ``` The output will look similar to the following: - ```bash + ``` Release "weblogic-kubernetes-operator" has been upgraded. Happy Helming! NAME: weblogic-kubernetes-operator - LAST DEPLOYED: Mon Sep 28 02:50:07 2020 + LAST DEPLOYED: Wed Nov 3 04:36:10 2021 NAMESPACE: opns STATUS: deployed REVISION: 3 TEST SUITE: None ``` + +1. Verify that the operator's pod and services are running by executing the following command: + + ```bash + $ kubectl get all -n + ``` + + For example: + + ```bash + $ kubectl get all -n opns + ``` + + The output will look similar to the following: + + ``` + NAME READY STATUS RESTARTS AGE + pod/weblogic-operator-69546866bd-h58sk 2/2 Running 0 112s + + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + service/internal-weblogic-operator-svc ClusterIP 10.106.72.42 8082/TCP 2d + + NAME READY UP-TO-DATE AVAILABLE AGE + deployment.apps/weblogic-operator 1/1 1 1 2d + + NAME DESIRED CURRENT READY AGE + replicaset.apps/weblogic-operator-676d5cc6f4 0 0 0 2d + replicaset.apps/weblogic-operator-69546866bd 1 1 1 112s + ``` \ No newline at end of file diff --git a/docs-source/content/oam/post-install-config/_index.md b/docs-source/content/oam/post-install-config/_index.md index 32ec19745..3bdb7f4e1 100644 --- a/docs-source/content/oam/post-install-config/_index.md +++ b/docs-source/content/oam/post-install-config/_index.md @@ -18,14 +18,8 @@ Follow these post install configuration steps. 1. Navigate to the following directory: - ``` - $ cd /weblogic-kubernetes-operator/kubernetes/samples/scripts/create-access-domain/domain-home-on-pv/output/weblogic-domains/accessdomain - ``` - - For example: - - ``` - $ cd /scratch/OAMDockerK8S/weblogic-kubernetes-operator/kubernetes/samples/scripts/create-access-domain/domain-home-on-pv/output/weblogic-domains/accessdomain + ```bash + $ cd $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv/output/weblogic-domains/accessdomain ``` 1. Create a `setUserOverrides.sh` with the following contents: @@ -38,7 +32,7 @@ Follow these post install configuration steps. 1. Copy the `setUserOverrides.sh` file to the Administration Server pod: - ``` + ```bash $ chmod 755 setUserOverrides.sh $ kubectl cp setUserOverrides.sh oamns/accessdomain-adminserver:/u01/oracle/user_projects/domains/accessdomain/bin/setUserOverrides.sh ``` @@ -47,13 +41,13 @@ Follow these post install configuration steps. 1. Stop the OAM domain using the following command: - ``` + ```bash $ kubectl -n patch domains --type='json' -p='[{"op": "replace", "path": "/spec/serverStartPolicy", "value": "NEVER" }]' ``` For example: - ``` + ```bash $ kubectl -n oamns patch domains accessdomain --type='json' -p='[{"op": "replace", "path": "/spec/serverStartPolicy", "value": "NEVER" }]' ``` @@ -65,79 +59,85 @@ Follow these post install configuration steps. 1. Check that all the pods are stopped: - ``` + ```bash $ kubectl get pods -n ``` For example: - ``` + ```bash $ kubectl get pods -n oamns ``` The output will look similar to the following: ``` - NAME READY STATUS RESTARTS AGE - accessdomain-adminserver 1/1 Terminating 0 18h - accessdomain-create-oam-infra-domain-job-vj69h 0/1 Completed 0 24h - accessdomain-oam-policy-mgr1 1/1 Terminating 0 18h - accessdomain-oam-server1 1/1 Terminating 0 18h - accessdomain-oam-server2 1/1 Terminating 0 18h - helper 1/1 Running 0 41h + NAME READY STATUS RESTARTS AGE + accessdomain-adminserver 1/1 Terminating 0 27m + accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 4h29m + accessdomain-oam-policy-mgr1 1/1 Terminating 0 24m + accessdomain-oam-policy-mgr2 1/1 Terminating 0 24m + accessdomain-oam-server1 1/1 Terminating 0 24m + accessdomain-oam-server2 1/1 Terminating 0 24m + helper 1/1 Running 0 4h44m + nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 108m ``` The Administration Server pods and Managed Server pods will move to a STATUS of `Terminating`. After a few minutes, run the command again and the pods should have disappeared: ``` - NAME READY STATUS RESTARTS AGE - accessdomain-create-oam-infra-domain-job-vj69h 0/1 Completed 0 24h - helper 1/1 Running 0 41h + NAME READY STATUS RESTARTS AGE + accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 4h30m + helper 1/1 Running 0 4h45m + nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 109m ``` 1. Start the domain using the following command: - ``` + ```bash $ kubectl -n patch domains --type='json' -p='[{"op": "replace", "path": "/spec/serverStartPolicy", "value": "IF_NEEDED" }]' ``` For example: - ``` + ```bash $ kubectl -n oamns patch domains accessdomain --type='json' -p='[{"op": "replace", "path": "/spec/serverStartPolicy", "value": "IF_NEEDED" }]' ``` Run the following kubectl command to view the pods: - ``` + ```bash $ kubectl get pods -n ``` For example: - ``` + ```bash $ kubectl get pods -n oamns ``` The output will look similar to the following: ``` - NAME READY STATUS RESTARTS AGE - accessdomain-create-oam-infra-domain-job-vj69h 0/1 Completed 0 24h - accessdomain-introspect-domain-job-7qx29 1/1 Running 0 8s - helper 1/1 Running 0 41h + NAME READY STATUS RESTARTS AGE + accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 4h30m + accessdomain-introspector-mckp2 1/1 Running 0 8s + helper 1/1 Running 0 4h46m + nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 110m ``` The Administration Server pod will start followed by the OAM Managed Servers pods. This process will take several minutes, so keep executing the command until all the pods are running with `READY` status `1/1`: ``` - NAME READY STATUS RESTARTS AGE - accessdomain-adminserver 1/1 Running 0 6m4s - accessdomain-create-oam-infra-domain-job-vj69h 0/1 Completed 0 24h - accessdomain-oam-policy-mgr1 1/1 Running 0 3m5s - accessdomain-oam-server1 1/1 Running 0 3m5s - accessdomain-oam-server2 1/1 Running 0 3m5s - helper 1/1 Running 0 41h + NAME READY STATUS RESTARTS AGE + accessdomain-adminserver 1/1 Running 0 5m38s + accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 4h37m + accessdomain-oam-policy-mgr1 1/1 Running 0 2m51s + accessdomain-oam-policy-mgr2 1/1 Running 0 2m51s + accessdomain-oam-server1 1/1 Running 0 2m50s + accessdomain-oam-server2 1/1 Running 0 2m50s + helper 1/1 Running 0 4h52m + nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 116m ``` @@ -145,7 +145,7 @@ Follow these post install configuration steps. Exclude all Oracle Access Management (OAM) clusters (including Policy Manager and OAM runtime server) from the default WebLogic Server 12c coherence cluster by using the WebLogic Server Administration Console. -From 12.2.1.3.0 onwards, OAM server-side session management uses database and does not require coherence cluster to be established. In some environments, warnings and errors are observed due to default coherence cluster initialized by WebLogic. To avoid or fix these errors, exclude all of the OAM clusters from default WebLogic Server coherence cluster using the following steps: +From 12.2.1.3.0 onwards, OAM server-side session management uses the database and does not require coherence cluster to be established. In some environments, warnings and errors are observed due to default coherence cluster initialized by WebLogic. To avoid or fix these errors, exclude all of the OAM clusters from default WebLogic Server coherence cluster using the following steps: 1. Login to the WebLogic Server Console at `https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console`. 1. Click **Lock & Edit**. @@ -207,18 +207,18 @@ For production environments, the following WebLogic Server tuning parameters mus 1. Navigate to the following directory and change permissions for the `oamconfig_modify.sh`: ```bash - $ cd /weblogic-kubernetes-operator/kubernetes/samples/scripts/create-access-domain/domain-home-on-pv/common + $ cd $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv/common $ chmod 777 oamconfig_modify.sh ``` For example: ```bash - $ cd /scratch/OAMDockerK8S/weblogic-kubernetes-operator/kubernetes/samples/scripts/create-access-domain/domain-home-on-pv/common + $ cd $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv/common $ chmod 777 oamconfig_modify.sh ``` -1. Edit the `oamconfig.properties` and change the `OAM_NAMESPACE`, `INGRESS`, `INGRESS_NAME`, and `LBR_HOST` to match the values for your OAM Kubernetes environment. For example: +1. Edit the `oamconfig.properties` and change the `OAM_NAMESPACE` and `LBR_HOST` to match the values for your OAM Kubernetes environment. For example: ``` #Below are only the sample values, please modify them as per your setup @@ -226,7 +226,7 @@ For production environments, the following WebLogic Server tuning parameters mus # The name space where OAM servers are created OAM_NAMESPACE='oamns' - # Define the INGRESS CONTROLLER used. typical values are voyager/nginx + # Define the INGRESS CONTROLLER used. INGRESS="nginx" # Define the INGRESS CONTROLLER name used during installation. @@ -267,30 +267,31 @@ For production environments, the following WebLogic Server tuning parameters mus INGRESS_NAME: nginx-ingress ING_TYPE : NodePort LBR_HOST: masternode.example.com - LBR_PORT: 32190 + LBR_PORT: 31051 Started Executing Command - % Total % Received % Xferd Average Speed Time Time Time Current - Dload Upload Total Spent Left Speed - 100 752k 0 752k 0 0 526k 0 --:--:-- 0:00:01 --:--:-- 526k - new_cluster_id: 5e3ce-masternode - service/oamoap-service created - nginx-ingress-ingress-nginx-controller NodePort 10.104.229.200 80:32412/TCP,443:30555/TCP 2d17h - oamoap-service NodePort 10.106.155.193 5575:30540/TCP 0s + % Total % Received % Xferd Average Speed Time Time Time Current + Dload Upload Total Spent Left Speed + 100 764k 0 764k 0 0 221k 0 --:--:-- 0:00:03 --:--:-- 221k + new_cluster_id: a52fc-masternode + service/accessdomain-oamoap-service created + accessdomain-oamoap-service NodePort 10.100.202.44 5575:30540/TCP 1s + nginx-ingress-ingress-nginx-controller NodePort 10.101.132.251 80:32371/TCP,443:31051/TCP 144m HTTP/1.1 100 Continue HTTP/1.1 201 Created + Date: Mon, 01 Nov 2021 16:59:12 GMT Content-Type: text/plain Content-Length: 76 Connection: keep-alive - X-ORACLE-DMS-ECID: a9222626-7c7a-4506-94fd-b2acfbb4e146-0000047f + X-ORACLE-DMS-ECID: 9234b1a0-83b4-4100-9875-aa00e3f5db27-0000035f X-ORACLE-DMS-RID: 0 - Set-Cookie: JSESSIONID=y21yaD9mWO5lPBv808bEYugB7cyRAJ4yUxryfLZ08puDjXUG_sd2!-1157379773; path=/; HttpOnly - Set-Cookie: _WL_AUTHCOOKIE_JSESSIONID=Twy1lPz58QhVjCheOfWp; path=/; secure; HttpOnly + Set-Cookie: JSESSIONID=pSXccMR6t8B5QoyaAlOuZYSmhtseX4C4jx-0tnkmNyer8L1mOLET!402058795; path=/; HttpOnly + Set-Cookie: _WL_AUTHCOOKIE_JSESSIONID=X1iqH-mtDNGyFx5ZCXMK; path=/; secure; HttpOnly Strict-Transport-Security: max-age=15724800; includeSubDomains - https://masternode.example.com:32190/iam/admin/config/api/v1/config?path=%2F + https://masternode.example.com:31051/iam/admin/config/api/v1/config?path=%2F - /scratch/OAMDockerK8S/weblogic-kubernetes-operator/kubernetes/samples/scripts/create-access-domain/domain-home-on-pv/common/output/oamconfig_modify.xml executed successfully + $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv/common/output/oamconfig_modify.xml executed successfully --------------------------------------------------------------------------- Initializing WebLogic Scripting Tool (WLST) ... @@ -319,41 +320,52 @@ For production environments, the following WebLogic Server tuning parameters mus Waiting continuously at an interval of 10 secs for servers to start.. Waiting continuously at an interval of 10 secs for servers to start.. Waiting continuously at an interval of 10 secs for servers to start.. + ... Waiting continuously at an interval of 10 secs for servers to start.. Waiting continuously at an interval of 10 secs for servers to start.. - Waiting continuously at an interval of 10 secs for servers to start.. - Waiting continuously at an interval of 10 secs for servers to start.. - accessdomain-oam-server1 1/1 Running 0 2m23s - accessdomain-oam-server2 1/1 Running 0 2m24s + accessdomain-oam-server1 1/1 Running 0 4m37s + accessdomain-oam-server2 1/1 Running 0 4m36s OAM servers started successfully ``` The script will delete the `accessdomain-oam-server1` and `accessdomain-oam-server2` pods and then create new ones. Check the pods are running again by issuing the following command: + ```bash + $ kubectl get pods -n + ``` + + For example: + ```bash $ kubectl get pods -n oamns ``` The output will look similar to the following: - ```bash - NAME READY STATUS RESTARTS AGE - pod/accessdomain-adminserver 1/1 Running 0 1h17m - pod/accessdomain-create-oam-infra-domain-job-vj69h 0/1 Completed 0 1h42m - pod/accessdomain-oam-policy-mgr1 1/1 Running 0 1h9m - pod/accessdomain-oam-server1 0/1 Running 0 31s - pod/accessdomain-oam-server2 0/1 Running 0 31s + ``` + NAME READY STATUS RESTARTS AGE + accessdomain-adminserver 1/1 Running 0 43m + accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 5h14m + accessdomain-oam-policy-mgr1 1/1 Running 0 40m + accessdomain-oam-policy-mgr2 1/1 Running 0 40m + accessdomain-oam-server1 0/1 Running 0 8m3s + accessdomain-oam-server2 0/1 Running 0 8m2s + helper 0/1 Running 0 5h29m + nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 154m ``` The `accessdomain-oam-server1` and `accessdomain-oam-server2` are started, but currently have a `READY` status of `0/1`. This means `oam_server1` and `oam_server2` are not currently running but are in the process of starting. The servers will take several minutes to start so keep executing the command until READY shows 1/1: - ```bash - NAME READY STATUS RESTARTS AGE - pod/accessdomain-adminserver 1/1 Running 0 1h23m - pod/accessdomain-create-oam-infra-domain-job-vj69h 0/1 Completed 0 1h48m - pod/accessdomain-oam-policy-mgr1 1/1 Running 0 1h15m - pod/accessdomain-oam-server1 1/1 Running 0 6m - pod/accessdomain-oam-server2 1/1 Running 0 6m + ``` + NAME READY STATUS RESTARTS AGE + accessdomain-adminserver 1/1 Running 0 49m + accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 5h21m + accessdomain-oam-policy-mgr1 1/1 Running 0 46m + accessdomain-oam-policy-mgr2 1/1 Running 0 46m + accessdomain-oam-server1 1/1 Running 0 14m + accessdomain-oam-server2 1/1 Running 0 14m + helper 1/1 Running 0 5h36m + nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 160m ``` \ No newline at end of file diff --git a/docs-source/content/oam/prepare-your-environment/_index.md b/docs-source/content/oam/prepare-your-environment/_index.md index 25f1a4aab..22e817530 100644 --- a/docs-source/content/oam/prepare-your-environment/_index.md +++ b/docs-source/content/oam/prepare-your-environment/_index.md @@ -5,7 +5,7 @@ pre : "3. " description: "Sample for creating an OAM domain home on an existing PV or PVC, and the domain resource YAML file for deploying the generated OAM domain." --- - +To prepare for Oracle Access Management deployment in a Kubernetes environment, complete the following steps: 1. [Set up your Kubernetes cluster](#set-up-your-kubernetes-cluster) 1. [Install Helm](#install-helm) @@ -14,15 +14,17 @@ PVC, and the domain resource YAML file for deploying the generated OAM domain." 1. [Install the WebLogic Kubernetes Operator docker image](#install-the-weblogic-kubernetes-operator-docker-image) 1. [Set up the code repository to deploy OAM domains](#set-up-the-code-repository-to-deploy-oam-domains) 1. [Install the WebLogic Kubernetes Operator](#install-the-weblogic-kubernetes-operator) +1. [Create a namespace for Oracle Access Management](#create-a-namespace-for-oracle-access-management) 1. [RCU schema creation](#rcu-schema-creation) 1. [Preparing the environment for domain creation](#preparing-the-environment-for-domain-creation) - 1. [Configure the operator for the domain namespace](#configure-the-operator-for-the-domain-namespace) - 1. [Creating Kubernetes secrets for the domain and RCU](#creating-kubernetes-secrets-for-the-domain-and-rcu) - 1. [Create a Kubernetes persistent volume and persistent volume claim](#create-a-kubernetes-persistent-volume-and-persistent-volume-claim) + + a. [Creating Kubernetes secrets for the domain and RCU](#creating-kubernetes-secrets-for-the-domain-and-rcu) + + b. [Create a Kubernetes persistent volume and persistent volume claim](#create-a-kubernetes-persistent-volume-and-persistent-volume-claim) ### Set up your Kubernetes cluster -If you need help setting up a Kubernetes environment, check our [cheat sheet](https://oracle.github.io/weblogic-kubernetes-operator/userguide/overview/k8s-setup/). +If you need help setting up a Kubernetes environment, refer to the official Kubernetes [documentation](https://kubernetes.io/docs/setup/#production-environment) to set up a production grade Kubernetes cluster. It is recommended you have a master node and one or more worker nodes. The examples in this documentation assume one master and two worker nodes. @@ -47,11 +49,11 @@ As per the [prerequisites](../prerequisites) an installation of Helm is required The output will look similar to the following: - ```bash - NAME STATUS ROLES AGE VERSION - node/worker-node1 Ready 17h v1.18.4 - node/worker-node2 Ready 17h v1.18.4 - node/master-node Ready master 23h v1.18.4 + ``` + NAME STATUS ROLES AGE VERSION + node/worker-node1 Ready 17h v1.20.10 + node/worker-node2 Ready 17h v1.20.10 + node/master-node Ready control-plane,master 23h v1.20.10 NAME READY STATUS RESTARTS AGE pod/coredns-66bff467f8-fnhbq 1/1 Running 0 23h @@ -72,16 +74,12 @@ As per the [prerequisites](../prerequisites) an installation of Helm is required You can deploy OAM Docker images in the following ways: -1. Download a prebuilt OAM Docker image from [My Oracle Support](https://support.oracle.com) by referring to the document ID 2723908.1. This image is prebuilt by Oracle and includes Oracle Access Management 12.2.1.4.0 and the latest PSU. +1. Download the latest prebuilt OAM Docker image from [My Oracle Support](https://support.oracle.com) by referring to the document ID 2723908.1. This image is prebuilt by Oracle and includes Oracle Access Management 12.2.1.4.0 and the latest PSU. 1. Build your own OAM image using the WebLogic Image Tool or by using the dockerfile, scripts and base images from Oracle Container Registry (OCR). You can also build your own image by using only the dockerfile and scripts. For more information about the various ways in which you can build your own container image, see [Building the OAM Image](https://github.com/oracle/docker-images/tree/master/OracleAccessManagement/#building-the-oam-image). Choose one of these options based on your requirements. -{{% notice note %}} -If building your own image for OAM, you must include the mandatory patch [30571576](http://support.oracle.com). -{{% /notice %}} - {{% notice note %}} The OAM Docker image must be installed on the master node and each of the worker nodes in your Kubernetes cluster. Alternatively you can place the image in a Docker registry that your cluster can access. {{% /notice %}} @@ -93,58 +91,58 @@ $ docker images ``` The output will look similar to the following: - ```bash - REPOSITORY TAG IMAGE ID CREATED SIZE - quay.io/coreos/flannel v0.13.0-rc2 79dd6d6368e2 7 days ago 57.2MB - oracle/oam 12.2.1.4.0 720a172374e6 2 weeks ago 3.38GB - k8s.gcr.io/kube-proxy v1.18.4 718fa77019f2 3 weeks ago 117MB - k8s.gcr.io/kube-controller-manager v1.18.4 e8f1690127c4 3 weeks ago 162MB - k8s.gcr.io/kube-apiserver v1.18.4 408913fc18eb 3 weeks ago 173MB - k8s.gcr.io/kube-scheduler v1.18.4 c663567f869e 3 weeks ago 95.3MB - k8s.gcr.io/pause 3.2 80d28bedfe5d 5 months ago 683kB - k8s.gcr.io/coredns 1.6.7 67da37a9a360 5 months ago 43.8MB - k8s.gcr.io/etcd 3.4.3-0 303ce5db0e90 8 months ago 288MB + ``` + REPOSITORY TAG IMAGE ID CREATED SIZE + oracle/oam 12.2.1.4.0-8-ol7-210721.0755 720a172374e6 2 weeks ago 3.38GB + quay.io/coreos/flannel v0.15.0 09b38f011a29 6 days ago 69.5MB + rancher/mirrored-flannelcni-flannel-cni-plugin v1.2 98660e6e4c3a 13 days ago 8.98MB + k8s.gcr.io/kube-proxy v1.20.10 945c9bce487a 2 months ago 99.7MB + k8s.gcr.io/kube-controller-manager v1.20.10 2f450864515d 2 months ago 116MB + k8s.gcr.io/kube-apiserver v1.20.10 644cadd07add 2 months ago 122MB + k8s.gcr.io/kube-scheduler v1.20.10 4c9be8dc650b 2 months ago 47.3MB + k8s.gcr.io/etcd 3.4.13-0 0369cf4303ff 14 months ago 253MB + k8s.gcr.io/coredns 1.7.0 bfe3a36ebd25 16 months ago 45.2MB + k8s.gcr.io/pause 3.2 80d28bedfe5d 20 months ago 683kB ``` ### Install the WebLogic Kubernetes Operator Docker image -In this release only WebLogic Kubernetes Operator 3.0.1 is supported. - {{% notice note %}} The WebLogic Kubernetes Operator Docker image must be installed on the master node and each of the worker nodes in your Kubernetes cluster. Alternatively you can place the image in a Docker registry that your cluster can access. {{% /notice %}} -1. Pull the WebLogic Kubernetes Operator 3.0.1 image by running the following command on the master node: +1. Pull the WebLogic Kubernetes Operator image by running the following command on the master node: ```bash - $ docker pull ghcr.io/oracle/weblogic-kubernetes-operator:3.0.1 + $ docker pull ghcr.io/oracle/weblogic-kubernetes-operator:3.3.0 ``` The output will look similar to the following: - ```bash + ``` Trying to pull repository ghcr.io/oracle/weblogic-kubernetes-operator ... - 3.0.1: Pulling from ghcr.io/oracle/weblogic-kubernetes-operator - bce8f778fef0: Already exists - de14ddc50a70: Pull complete - 77401a861078: Pull complete - 9c5ac1423af4: Pull complete - 2b6f244f998f: Pull complete - 625e05083092: Pull complete - Digest: sha256:27047d032ac5a9077b39bec512b99d8ca54bf9bf71227f5fd1b7b26ac80c20d3 - Status: Downloaded newer image for ghcr.io/oracle/weblogic-kubernetes-operator - ghcr.io/oracle/weblogic-kubernetes-operator:3.0.1 + 3.3.0: Pulling from ghcr.io/oracle/weblogic-kubernetes-operator + c828c776e142: Pull complete + 175676c54fa1: Pull complete + b3231f480c32: Pull complete + ea4423fa8daa: Pull complete + f3ca38f7f95f: Pull complete + effd851583ec: Pull complete + 4f4fb700ef54: Pull complete + Digest: sha256:3e93848ad2f5b272c88680e7b37a4ee428dd12e4c4c91af6977fd2fa9ec1f9dc + Status: Downloaded newer image for ghcr.io/oracle/weblogic-kubernetes-operator:3.3.0 + ghcr.io/oracle/weblogic-kubernetes-operator:3.3.0 ``` 1. Run the docker tag command as follows: ```bash - $ docker tag ghcr.io/oracle/weblogic-kubernetes-operator:3.0.1 weblogic-kubernetes-operator:3.0.1 + $ docker tag ghcr.io/oracle/weblogic-kubernetes-operator:3.3.0 weblogic-kubernetes-operator:3.3.0 ``` - After installing the WebLogic Kubernetes Operator 3.0.1 Docker image, repeat the above on the worker nodes. + After installing the WebLogic Kubernetes Operator image, repeat the above on the worker nodes. ### Set up the code repository to deploy OAM domains @@ -153,50 +151,40 @@ OAM domain deployment on Kubernetes leverages the WebLogic Kubernetes Operator i 1. Create a working directory to setup the source code. ```bash - $ mkdir + $ mkdir ``` For example: ```bash - $ mkdir /scratch/OAMDockerK8S + $ mkdir /scratch/OAMK8S ``` - -1. Download the supported version of the WebLogic Kubernetes operator source code from the operator github project. Currently the supported operator version is [3.0.1](https://github.com/oracle/weblogic-kubernetes-operator/releases/tag/v3.0.1): + +1. Download the latest OAM deployment scripts from the OAM repository. ```bash - $ cd - $ git clone https://github.com/oracle/weblogic-kubernetes-operator.git --branch release/3.0.1 + $ cd + $ git clone https://github.com/oracle/fmw-kubernetes.git ``` - + For example: - + ```bash - $ cd /scratch/OAMDockerK8S - $ git clone https://github.com/oracle/weblogic-kubernetes-operator.git --branch release/3.0.1 + $ cd /scratch/OAMK8S + $ git clone https://github.com/oracle/fmw-kubernetes.git ``` - This will create the directory `/weblogic-kubernetes-operator` - -1. Download the OAM deployment scripts from the OAM [repository](https://github.com/oracle/fmw-kubernetes.git) and copy them in to the WebLogic Kubernetes Operator samples location. +1. Set the `$WORKDIR` environment variable as follows: ```bash - $ git clone https://github.com/oracle/fmw-kubernetes.git - $ cp -rf /fmw-kubernetes/OracleAccessManagement/kubernetes/3.0.1/create-access-domain /weblogic-kubernetes-operator/kubernetes/samples/scripts/ - $ mv -f /weblogic-kubernetes-operator/kubernetes/samples/charts/ingress-per-domain /weblogic-kubernetes-operator/kubernetes/samples/charts/ingress-per-domain_backup - $ cp -rf /fmw-kubernetes/OracleAccessManagement/kubernetes/3.0.1/ingress-per-domain /weblogic-kubernetes-operator/kubernetes/samples/charts/ingress-per-domain + $ export WORKDIR=/fmw-kubernetes/OracleAccessManagement ``` - + For example: ```bash - $ git clone https://github.com/oracle/fmw-kubernetes.git - $ cp -rf /scratch/OAMDockerK8S/fmw-kubernetes/OracleAccessManagement/kubernetes/3.0.1/create-access-domain /scratch/OAMDockerK8S/weblogic-kubernetes-operator/kubernetes/samples/scripts/ - $ mv -f /scratch/OAMDockerK8S/weblogic-kubernetes-operator/kubernetes/samples/charts/ingress-per-domain /scratch/OAMDockerK8S/weblogic-kubernetes-operator/kubernetes/samples/charts/ingress-per-domain_backup - $ cp -rf /scratch/OAMDockerK8S/fmw-kubernetes/OracleAccessManagement/kubernetes/3.0.1/ingress-per-domain /scratch/OAMDockerK8S/weblogic-kubernetes-operator/kubernetes/samples/charts/ingress-per-domain + $ export WORKDIR=/scratch/OAMK8S/fmw-kubernetes/OracleAccessManagement ``` - - You can now use the deployment scripts from `/weblogic-kubernetes-operator/kubernetes/samples/scripts/` to set up the OAM domains as further described in this document. 1. Run the following command and see if the WebLogic custom resource definition name already exists: @@ -206,13 +194,13 @@ OAM domain deployment on Kubernetes leverages the WebLogic Kubernetes Operator i In the output you should see: - ```bash - No resources found in default namespace. + ``` + No resources found ``` If you see the following: - ```bash + ``` NAME AGE domains.weblogic.oracle 5d ``` @@ -239,7 +227,7 @@ OAM domain deployment on Kubernetes leverages the WebLogic Kubernetes Operator i The output will look similar to the following: - ```bash + ``` namespace/opns created ``` @@ -257,13 +245,13 @@ OAM domain deployment on Kubernetes leverages the WebLogic Kubernetes Operator i The output will look similar to the following: - ```bash + ``` serviceaccount/op-sa created ``` -1. If you want to to setup logging and visualisation with Elasticsearch and Kibana (post domain creation) edit the `/weblogic-kubernetes-operator/kubernetes/charts/weblogic-operator/values.yaml` and set the parameter `elkIntegrationEnabled` to `true` and make sure the following parameters are set: +1. If you want to to setup logging and visualisation with Elasticsearch and Kibana (post domain creation) edit the `$WORKDIR/kubernetes/charts/weblogic-operator/values.yaml` and set the parameter `elkIntegrationEnabled` to `true` and make sure the following parameters are set: - ```bash + ``` # elkIntegrationEnabled specifies whether or not ELK integration is enabled. elkIntegrationEnabled: true @@ -286,297 +274,347 @@ OAM domain deployment on Kubernetes leverages the WebLogic Kubernetes Operator i 1. Run the following helm command to install and start the operator: ```bash - $ cd /weblogic-kubernetes-operator - $ helm install kubernetes/charts/weblogic-operator \ + $ cd $WORKDIR + $ helm install weblogic-kubernetes-operator kubernetes/charts/weblogic-operator \ --namespace \ - --set image=weblogic-kubernetes-operator:3.0.1 \ - --set serviceAccount= --set "domainNamespaces={}" --set "javaLoggingLevel=FINE" --wait + --set image=weblogic-kubernetes-operator:3.3.0 \ + --set serviceAccount= \ + --set “enableClusterRoleBinding=true” \ + --set "domainNamespaceSelectionStrategy=LabelSelector" \ + --set "domainNamespaceLabelSelector=weblogic-operator\=enabled" \ + --set "javaLoggingLevel=FINE" --wait ``` For example: ```bash - $ cd /scratch/OAMDockerK8S/weblogic-kubernetes-operator + $ cd $WORKDIR $ helm install weblogic-kubernetes-operator kubernetes/charts/weblogic-operator \ - --namespace opns --set image=weblogic-kubernetes-operator:3.0.1 \ - --set serviceAccount=op-sa --set "domainNamespaces={}" --set "javaLoggingLevel=FINE" --wait + --namespace opns \ + --set image=weblogic-kubernetes-operator:3.3.0 \ + --set serviceAccount=op-sa \ + --set "enableClusterRoleBinding=true" \ + --set "domainNamespaceSelectionStrategy=LabelSelector" \ + --set "domainNamespaceLabelSelector=weblogic-operator\=enabled" \ + --set "javaLoggingLevel=FINE" --wait ``` The output will look similar to the following: - ```bash - NAME: weblogic-kubernetes-operator - LAST DEPLOYED: Wed Sep 23 08:04:20 2020 - NAMESPACE: opns - STATUS: deployed - REVISION: 1 - TEST SUITE: None - ``` + ``` + NAME: weblogic-kubernetes-operator + LAST DEPLOYED: Fri Oct 29 03:10:39 2021 + NAMESPACE: opns + STATUS: deployed + REVISION: 1 + TEST SUITE: None + ``` 1. Verify that the operator's pod and services are running by executing the following command: - ```bash - $ kubectl get all -n - ``` + ```bash + $ kubectl get all -n + ``` - For example: + For example: - ```bash - $ kubectl get all -n opns - ``` + ```bash + $ kubectl get all -n opns + ``` - The output will look similar to the following: + The output will look similar to the following: - ```bash - NAME READY STATUS RESTARTS AGE - pod/weblogic-operator-759b7c657-8gd7g 2/2 Running 0 107s - - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - service/internal-weblogic-operator-svc ClusterIP 10.102.11.143 8082/TCP 107s + ``` + NAME READY STATUS RESTARTS AGE + pod/weblogic-operator-676d5cc6f4-wct7b 2/2 Running 0 40s - NAME READY UP-TO-DATE AVAILABLE AGE - deployment.apps/weblogic-operator 1/1 1 1 107s + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + service/internal-weblogic-operator-svc ClusterIP 10.101.1.198 8082/TCP 40s - NAME DESIRED CURRENT READY AGE - replicaset.apps/weblogic-operator-759b7c657 1 1 1 107s + NAME READY UP-TO-DATE AVAILABLE AGE + deployment.apps/weblogic-operator 1/1 1 1 40s - ``` + NAME DESIRED CURRENT READY AGE + replicaset.apps/weblogic-operator-676d5cc6f4 1 1 1 40s + ``` 1. Verify the operator pod's log: - ```bash - $ kubectl logs -n -c weblogic-operator deployments/weblogic-operator - ``` + ```bash + $ kubectl logs -n -c weblogic-operator deployments/weblogic-operator + ``` - For example: + For example: - ```bash - $ kubectl logs -n opns -c weblogic-operator deployments/weblogic-operator - ``` + ```bash + $ kubectl logs -n opns -c weblogic-operator deployments/weblogic-operator + ``` - The output will look similar to the following: + The output will look similar to the following: - ```bash - ... - {"timestamp":"09-23-2020T15:04:30.485+0000","thread":28,"fiber":"fiber-1","namespace":"opns","domainUID":"","level":"INFO","class":"oracle.kubernetes.operator.rest.RestServer",ethod":"start","timeInMillis":1600873470485,"message":"Started the internal ssl REST server on https://0.0.0.0:8082/operator","exception":"","code":"","headers":{},"body":""} - {"timestamp":"09-23-2020T15:04:30.487+0000","thread":28,"fiber":"fiber-1","namespace":"opns","domainUID":"","level":"INFO","class":"oracle.kubernetes.operator.Main","method":"mkReadyAndStartLivenessThread","timeInMillis":1600873470487,"message":"Starting Operator Liveness Thread","exception":"","code":"","headers":{},"body":""} - {"timestamp":"09-23-2020T15:06:27.528+0000","thread":22,"fiber":"engine-operator-thread-5-fiber-2","namespace":"opns","domainUID":"","level":"FINE","class":"oracle.kubernetes.orator.helpers.ConfigMapHelper$ScriptConfigMapContext","method":"loadScriptsFromClasspath","timeInMillis":1600873587528,"message":"Loading scripts into domain control config mapor namespace: opns","exception":"","code":"","headers":{},"body":""} - {"timestamp":"09-23-2020T15:06:27.529+0000","thread":22,"fiber":"engine-operator-thread-5-fiber-2","namespace":"opns","domainUID":"","level":"FINE","class":"oracle.kubernetes.orator.Main","method":"readExistingDomains","timeInMillis":1600873587529,"message":"Listing WebLogic Domains","exception":"","code":"","headers":{},"body":""} - {"timestamp":"09-23-2020T15:06:27.576+0000","thread":20,"fiber":"fiber-2-child-1","namespace":"opns","domainUID":"","level":"FINE","class":"oracle.kubernetes.operator.helpers.CfigMapHelper$ConfigMapContext$ReadResponseStep","method":"logConfigMapExists","timeInMillis":1600873587576,"message":"Existing config map, ConfigMapHelper$ConfigMapContext$Readsponse, is correct for namespace: opns.","exception":"","code":"","headers":{},"body":""} + ``` + ... + {"timestamp":"2021-11-01T10:26:10.917829423Z","thread":13,"fiber":"","namespace":"","domainUID":"","level":"CONFIG","class":"oracle.kubernetes.operator.TuningParametersImpl","method":"update","timeInMillis":1635762370917,"message":"Reloading tuning parameters from Operator's config map","exception":"","code":"","headers":{},"body":""} + {"timestamp":"2021-11-01T10:26:20.920145876Z","thread":13,"fiber":"","namespace":"","domainUID":"","level":"CONFIG","class":"oracle.kubernetes.operator.TuningParametersImpl","method":"update","timeInMillis":1635762380920,"message":"Reloading tuning parameters from Operator's config map","exception":"","code":"","headers":{},"body":""} + {"timestamp":"2021-11-01T10:26:30.922360564Z","thread":19,"fiber":"","namespace":"","domainUID":"","level":"CONFIG","class":"oracle.kubernetes.operator.TuningParametersImpl","method":"update","timeInMillis":1635762390922,"message":"Reloading tuning parameters from Operator's config map","exception":"","code":"","headers":{},"body":""} + {"timestamp":"2021-11-01T10:26:40.924847211Z","thread":29,"fiber":"","namespace":"","domainUID":"","level":"CONFIG","class":"oracle.kubernetes.operator.TuningParametersImpl","method":"update","timeInMillis":1635762400924,"message":"Reloading tuning parameters from Operator's config map","exception":"","code":"","headers":{},"body":""} + ``` - ``` +### Create a namespace for Oracle Access Management +1. Run the following command to create a namespace for the domain: + ```bash + $ kubectl create namespace + ``` + For example: - -### RCU schema creation - -In this section you create the RCU schemas in the Oracle Database. - -Before following the steps in this section, make sure that the database and listener are up and running and you can connect to the database via SQL*Plus or other client tool. + ```bash + $ kubectl create namespace oamns + ``` -1. Run the following command to create a namespace for the domain: + The output will look similar to the following: - ```bash - $ kubectl create namespace - ``` + ``` + namespace/oamns created + ``` + +1. Run the following command to tag the namespace so the WebLogic Kubernetes Operator can manage it: + + ```bash + $ kubectl label namespaces weblogic-operator=enabled + ``` + + For example: + + ```bash + $ kubectl label namespaces oamns weblogic-operator=enabled + ``` + + The output will look similar to the following: - For example: + ``` + namespace/oamns labeled + ``` + +1. Run the following command to check the label was created: + + ```bash + $ kubectl describe namespace + ``` + + For example: + + ```bash + $ kubectl describe namespace oamns + ``` + + + The output will look similar to the following: - ```bash - $ kubectl create namespace oamns - ``` + ``` + Name: oamns + Labels: weblogic-operator=enabled + Annotations: + Status: Active + + No resource quota. + + No LimitRange resource. + ``` + + + +### RCU schema creation - The output will look similar to the following: +In this section you create the RCU schemas in the Oracle Database. - ```bash - namespace/oamns created - ``` +Before following the steps in this section, make sure that the database and listener are up and running and you can connect to the database via SQL*Plus or other client tool. 1. Run the following command to create a helper pod to run RCU: - ```bash - $ kubectl run helper --image -n -- sleep infinity - ``` + ```bash + $ kubectl run helper --image -n -- sleep infinity + ``` - For example: + For example: - ```bash - $ kubectl run helper --image oracle/oam:12.2.1.4.0 -n oamns -- sleep infinity - ``` + ```bash + $ kubectl run helper --image oracle/oam:12.2.1.4.0-8-ol7-210721.0755 -n oamns -- sleep infinity + ``` - The output will look similar to the following: + The output will look similar to the following: - ```bash - pod/helper created - ``` + ``` + pod/helper created + ``` -1. Run the following command to check the pod is running: +1. Run the following command to check the pod is running: - ```bash - $ kubectl get pods -n - ``` + ```bash + $ kubectl get pods -n + ``` - For example: + For example: - ```bash - $ kubectl get pods -n oamns - ``` + ```bash + $ kubectl get pods -n oamns + ``` - The output will look similar to the following: + The output will look similar to the following: - ```bash - NAME READY STATUS RESTARTS AGE - helper 1/1 Running 0 8s - ``` + ``` + NAME READY STATUS RESTARTS AGE + helper 1/1 Running 0 8s + ``` 1. Run the following command to start a bash shell in the helper pod: - ```bash - $ kubectl exec -it helper -n -- /bin/bash - ``` + ```bash + $ kubectl exec -it helper -n -- /bin/bash + ``` - For example: + For example: - ```bash - $ kubectl exec -it helper -n oamns -- /bin/bash - ``` + ```bash + $ kubectl exec -it helper -n oamns -- /bin/bash + ``` - This will take you into a bash shell in the running helper pod: + This will take you into a bash shell in the running helper pod: - ```bash - [oracle@helper ~]$ - ``` + ```bash + [oracle@helper ~]$ + ``` 1. In the helper bash shell run the following commands to set the environment: - ```bash - [oracle@helper ~]$ export CONNECTION_STRING=:/ - [oracle@helper ~]$ export RCUPREFIX= - [oracle@helper ~]$ echo -e "\n" > /tmp/pwd.txt - [oracle@helper ~]$ cat /tmp/pwd.txt - ``` + ```bash + [oracle@helper ~]$ export CONNECTION_STRING=:/ + [oracle@helper ~]$ export RCUPREFIX= + [oracle@helper ~]$ echo -e "\n" > /tmp/pwd.txt + [oracle@helper ~]$ cat /tmp/pwd.txt + ``` - where: + where: - `:/` is your database connect string + `:/` is your database connect string - `` is the RCU schema prefix you want to set + `` is the RCU schema prefix you want to set - `` is the SYS password for the database + `` is the SYS password for the database - `` is the password you want to set for the `` + `` is the password you want to set for the `` - For example: + For example: - ```bash - [oracle@helper ~]$ export CONNECTION_STRING=mydatabasehost.example.com:1521/orcl.example.com - [oracle@helper ~]$ export RCUPREFIX=OAMK8S - [oracle@helper ~]$ echo -e "\n" > /tmp/pwd.txt - [oracle@helper ~]$ cat /tmp/pwd.txt - - - ``` + ```bash + [oracle@helper ~]$ export CONNECTION_STRING=mydatabasehost.example.com:1521/orcl.example.com + [oracle@helper ~]$ export RCUPREFIX=OAMK8S + [oracle@helper ~]$ echo -e "\n" > /tmp/pwd.txt + [oracle@helper ~]$ cat /tmp/pwd.txt + + + ``` 1. In the helper bash shell run the following command to create the RCU schemas in the database: - ```bash - $ [oracle@helper ~]$ /u01/oracle/oracle_common/bin/rcu -silent -createRepository -databaseType ORACLE -connectString \ - $CONNECTION_STRING -dbUser sys -dbRole sysdba -useSamePasswordForAllSchemaUsers true \ - -selectDependentsForComponents true -schemaPrefix $RCUPREFIX -component MDS -component IAU \ - -component IAU_APPEND -component IAU_VIEWER -component OPSS -component WLS -component STB -component OAM -f < /tmp/pwd.txt - ``` + ```bash + $ [oracle@helper ~]$ /u01/oracle/oracle_common/bin/rcu -silent -createRepository -databaseType ORACLE -connectString \ + $CONNECTION_STRING -dbUser sys -dbRole sysdba -useSamePasswordForAllSchemaUsers true \ + -selectDependentsForComponents true -schemaPrefix $RCUPREFIX -component MDS -component IAU \ + -component IAU_APPEND -component IAU_VIEWER -component OPSS -component WLS -component STB -component OAM -f < /tmp/pwd.txt + ``` The output will look similar to the following: - ```bash - RCU Logfile: /tmp/RCU2020-09-23_15-36_1649016162/logs/rcu.log - Processing command line .... - Repository Creation Utility - Checking Prerequisites - Checking Global Prerequisites - Repository Creation Utility - Checking Prerequisites - Checking Component Prerequisites - Repository Creation Utility - Creating Tablespaces - Validating and Creating Tablespaces - Create tablespaces in the repository database - Repository Creation Utility - Create - Repository Create in progress. - Executing pre create operations - Percent Complete: 18 - Percent Complete: 18 - Percent Complete: 19 - Percent Complete: 20 - Percent Complete: 21 - Percent Complete: 21 - Percent Complete: 22 - Percent Complete: 22 - Creating Common Infrastructure Services(STB) - Percent Complete: 30 - Percent Complete: 30 - Percent Complete: 39 - Percent Complete: 39 - Percent Complete: 39 - Creating Audit Services Append(IAU_APPEND) - Percent Complete: 46 - Percent Complete: 46 - Percent Complete: 55 - Percent Complete: 55 - Percent Complete: 55 - Creating Audit Services Viewer(IAU_VIEWER) - Percent Complete: 62 - Percent Complete: 62 - Percent Complete: 63 - Percent Complete: 63 - Percent Complete: 64 - Percent Complete: 64 - Creating Metadata Services(MDS) - Percent Complete: 73 - Percent Complete: 73 - Percent Complete: 73 - Percent Complete: 74 - Percent Complete: 74 - Percent Complete: 75 - Percent Complete: 75 - Percent Complete: 75 - Creating Weblogic Services(WLS) - Percent Complete: 80 - Percent Complete: 80 - Percent Complete: 83 - Percent Complete: 83 - Percent Complete: 91 - Percent Complete: 98 - Percent Complete: 98 - Creating Audit Services(IAU) - Percent Complete: 100 - Creating Oracle Platform Security Services(OPSS) - Creating Oracle Access Manager(OAM) - Executing post create operations - Repository Creation Utility: Create - Completion Summary - Database details: - ----------------------------- - Host Name : mydatabasehost.example.com - Port : 1521 - Service Name : ORCL.EXAMPLE.COM - Connected As : sys - Prefix for (prefixable) Schema Owners : OAMK8S - RCU Logfile : /tmp/RCU2020-09-23_15-36_1649016162/logs/rcu.log - Component schemas created: - ----------------------------- - Component Status Logfile - Common Infrastructure Services Success /tmp/RCU2020-09-23_15-36_1649016162/logs/stb.log - Oracle Platform Security Services Success /tmp/RCU2020-09-23_15-36_1649016162/logs/opss.log - Oracle Access Manager Success /tmp/RCU2020-09-23_15-36_1649016162/logs/OAM.log - Audit Services Success /tmp/RCU2020-09-23_15-36_1649016162/logs/iau.log - Audit Services Append Success /tmp/RCU2020-09-23_15-36_1649016162/logs/iau_append.log - Audit Services Viewer Success /tmp/RCU2020-09-23_15-36_1649016162/logs/iau_viewer.log - Metadata Services Success /tmp/RCU2020-09-23_15-36_1649016162/logs/mds.log - WebLogic Services Success /tmp/RCU2020-09-23_15-36_1649016162/logs/wls.log - Repository Creation Utility - Create : Operation Completed - [oracle@helper ~]$ - ``` + ``` + RCU Logfile: /tmp/RCU2021-11-01_10-29_561898106/logs/rcu.log + Processing command line .... + Repository Creation Utility - Checking Prerequisites + Checking Global Prerequisites + Repository Creation Utility - Checking Prerequisites + Checking Component Prerequisites + Repository Creation Utility - Creating Tablespaces + Validating and Creating Tablespaces + Create tablespaces in the repository database + Repository Creation Utility - Create + Repository Create in progress. + Executing pre create operations + Percent Complete: 18 + Percent Complete: 18 + Percent Complete: 19 + Percent Complete: 20 + Percent Complete: 21 + Percent Complete: 21 + Percent Complete: 22 + Percent Complete: 22 + Creating Common Infrastructure Services(STB) + Percent Complete: 30 + Percent Complete: 30 + Percent Complete: 39 + Percent Complete: 39 + Percent Complete: 39 + Creating Audit Services Append(IAU_APPEND) + Percent Complete: 46 + Percent Complete: 46 + Percent Complete: 55 + Percent Complete: 55 + Percent Complete: 55 + Creating Audit Services Viewer(IAU_VIEWER) + Percent Complete: 62 + Percent Complete: 62 + Percent Complete: 63 + Percent Complete: 63 + Percent Complete: 64 + Percent Complete: 64 + Creating Metadata Services(MDS) + Percent Complete: 73 + Percent Complete: 73 + Percent Complete: 73 + Percent Complete: 74 + Percent Complete: 74 + Percent Complete: 75 + Percent Complete: 75 + Percent Complete: 75 + Creating Weblogic Services(WLS) + Percent Complete: 80 + Percent Complete: 80 + Percent Complete: 83 + Percent Complete: 83 + Percent Complete: 91 + Percent Complete: 98 + Percent Complete: 98 + Creating Audit Services(IAU) + Percent Complete: 100 + Creating Oracle Platform Security Services(OPSS) + Creating Oracle Access Manager(OAM) + Executing post create operations + Repository Creation Utility: Create - Completion Summary + Database details: + ----------------------------- + Host Name : mydatabasehost.example.com + Port : 1521 + Service Name : ORCL.EXAMPLE.COM + Connected As : sys + Prefix for (prefixable) Schema Owners : OAMK8S + RCU Logfile : /tmp/RCU2021-11-01_10-29_561898106/logs/rcu.log + Component schemas created: + ----------------------------- + Component Status Logfile + Common Infrastructure Services Success /tmp/RCU2021-11-01_10-29_561898106/logs/stb.log + Oracle Platform Security Services Success /tmp/RCU2021-11-01_10-29_561898106/logs/opss.log + Oracle Access Manager Success /tmp/RCU2021-11-01_10-29_561898106/logs/oam.log + Audit Services Success /tmp/RCU2021-11-01_10-29_561898106/logs/iau.log + Audit Services Append Success /tmp/RCU2021-11-01_10-29_561898106/logs/iau_append.log + Audit Services Viewer Success /tmp/RCU2021-11-01_10-29_561898106/logs/iau_viewer.log + Metadata Services Success /tmp/RCU2021-11-01_10-29_561898106/logs/mds.log + WebLogic Services Success /tmp/RCU2021-11-01_10-29_561898106/logs/wls.log + Repository Creation Utility - Create : Operation Completed + [oracle@helper ~]$ + ``` 1. Exit the helper bash shell by issuing the command `exit`. @@ -586,38 +624,9 @@ Before following the steps in this section, make sure that the database and list In this section you prepare the environment for the OAM domain creation. This involves the following steps: - 1. Configure the operator for the domain namespace - 2. Create Kubernetes secrets for the domain and RCU - 3. Create a Kubernetes PV and PVC (Persistent Volume and Persistent Volume Claim) - - -#### Configure the operator for the domain namespace - -1. Configure the WebLogic Kubernetes Operator to manage the domain in the domain namespace by running the following command: - - ```bash - $ cd /weblogic-kubernetes-operator - $ helm upgrade --reuse-values --namespace --set "domainNamespaces={}" --wait weblogic-kubernetes-operator kubernetes/charts/weblogic-operator - ``` - - For example: - - ```bash - $ cd /scratch/OAMDockerK8S/weblogic-kubernetes-operator - $ helm upgrade --reuse-values --namespace opns --set "domainNamespaces={oamns}" --wait weblogic-kubernetes-operator kubernetes/charts/weblogic-operator - ``` - - The output will look similar to the following: - - ```bash - Release "weblogic-kubernetes-operator" has been upgraded. Happy Helming! - NAME: weblogic-kubernetes-operator - LAST DEPLOYED: Wed Sep 23 08:44:48 2020 - NAMESPACE: opns - STATUS: deployed - REVISION: 2 - TEST SUITE: None - ``` + a. [Creating Kubernetes secrets for the domain and RCU](#creating-kubernetes-secrets-for-the-domain-and-rcu) + + b. [Create a Kubernetes persistent volume and persistent volume claim](#create-a-kubernetes-persistent-volume-and-persistent-volume-claim) #### Creating Kubernetes secrets for the domain and RCU @@ -625,209 +634,225 @@ In this section you prepare the environment for the OAM domain creation. This in 1. Create a Kubernetes secret for the domain using the create-weblogic-credentials script in the same Kubernetes namespace as the domain: - ```bash - $ cd /weblogic-kubernetes-operator/kubernetes/samples/scripts/create-weblogic-domain-credentials - $ ./create-weblogic-credentials.sh -u weblogic -p -n -d -s - ``` + ```bash + $ cd $WORKDIR/kubernetes/create-weblogic-domain-credentials + $ ./create-weblogic-credentials.sh -u weblogic -p -n -d -s + ``` - where: + where: - `-u weblogic` is the WebLogic username + `-u weblogic` is the WebLogic username - `-p ` is the password for the weblogic user + `-p ` is the password for the weblogic user - `-n ` is the domain namespace + `-n ` is the domain namespace - `-d ` is the domain UID to be created. The default is domain1 if not specified + `-d ` is the domain UID to be created. The default is domain1 if not specified - `-s ` is the name you want to create for the secret for this namespace. The default is to use the domainUID if not specified + `-s ` is the name you want to create for the secret for this namespace. The default is to use the domainUID if not specified - For example: + For example: - ```bash - $ cd /scratch/OAMDockerK8S/weblogic-kubernetes-operator/kubernetes/samples/scripts/create-weblogic-domain-credentials - $ ./create-weblogic-credentials.sh -u weblogic -p -n oamns -d accessdomain -s accessdomain-domain-credentials - ``` + ```bash + $ cd $WORKDIR/kubernetes/create-weblogic-domain-credentials + $ ./create-weblogic-credentials.sh -u weblogic -p -n oamns -d accessdomain -s accessdomain-credentials + ``` - The output will look similar to the following: + The output will look similar to the following: - ```bash - secret/accessdomain-domain-credentials created - secret/accessdomain-domain-credentials labeled - The secret accessdomain-domain-credentials has been successfully created in the oamns namespace. - ``` + ``` + secret/accessdomain-credentials created + secret/accessdomain-credentials labeled + The secret accessdomain-credentials has been successfully created in the oamns namespace. + ``` 1. Verify the secret is created using the following command: - ```bash - $ kubectl get secret -o yaml -n - ``` + ```bash + $ kubectl get secret -o yaml -n + ``` - For example: + For example: - ```bash - $ kubectl get secret accessdomain-domain-credentials -o yaml -n oamns - ``` + ```bash + $ kubectl get secret accessdomain-credentials -o yaml -n oamns + ``` - The output will look similar to the following: + The output will look similar to the following: - ```bash - apiVersion: v1 - data: - password: V2VsY29tZTE= - username: d2VibG9naWM= - kind: Secret - metadata: - creationTimestamp: "2020-09-23T15:46:25Z" - labels: - weblogic.domainName: accessdomain - weblogic.domainUID: accessdomain - managedFields: + ``` + apiVersion: v1 + data: + password: V2VsY29tZTE= + username: d2VibG9naWM= + kind: Secret + metadata: + creationTimestamp: "2021-11-01T10:32:35Z" + labels: + weblogic.domainName: accessdomain + weblogic.domainUID: accessdomain + managedFields: + - apiVersion: v1 + fieldsType: FieldsV1 + fieldsV1: + f:data: + .: {} + f:password: {} + f:username: {} + f:type: {} + manager: kubectl-create + operation: Update + time: "2021-11-01T10:32:35Z" - apiVersion: v1 - fieldsType: FieldsV1 - fieldsV1: - f:data: - .: {} - f:password: {} - f:username: {} - f:metadata: - f:labels: - .: {} - f:weblogic.domainName: {} - f:weblogic.domainUID: {} - f:type: {} - manager: kubectl - operation: Update - time: "2020-09-23T15:46:25Z" - name: accessdomain-domain-credentials - namespace: oamns - resourceVersion: "50606" - selfLink: /api/v1/namespaces/oamns/secrets/accessdomain-domain-credentials - uid: 29f638f5-11d9-4b62-9cbb-03ff13ae3a90 - type: Opaque - ``` + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:labels: + .: {} + f:weblogic.domainName: {} + f:weblogic.domainUID: {} + manager: kubectl-label + operation: Update + time: "2021-11-01T10:32:35Z" + name: accessdomain-credentials + namespace: oamns + resourceVersion: "990770" + uid: b2ffcd87-8c61-4fb1-805e-3768295982e2 + type: Opaque + ``` 1. Create a Kubernetes secret for RCU using the create-weblogic-credentials script in the same Kubernetes namespace as the domain: - ```bash - $ cd /weblogic-kubernetes-operator/kubernetes/samples/scripts/create-rcu-credentials - $ ./create-rcu-credentials.sh -u -p -a sys -q -d -n -s - ``` + ```bash + $ cd $WORKDIR/kubernetes/create-rcu-credentials + $ ./create-rcu-credentials.sh -u -p -a sys -q -d -n -s + ``` - where: + where: - `-u ` is the name of the RCU schema prefix created previously + `-u ` is the name of the RCU schema prefix created previously - `-p ` is the password for the RCU schema prefix + `-p ` is the password for the RCU schema prefix - `-q ` is the sys database password + `-q ` is the sys database password - `-d ` is the domain_uid that you created earlier + `-d ` is the domain_uid that you created earlier - `-n ` is the domain namespace + `-n ` is the domain namespace - `-s ` is the name of the rcu secret to create + `-s ` is the name of the rcu secret to create - For example: + For example: - ```bash - $ cd /scratch/OAMDockerK8S/weblogic-kubernetes-operator/kubernetes/samples/scripts/create-rcu-credentials - $ ./create-rcu-credentials.sh -u OAMK8S -p -a sys -q -d accessdomain -n oamns -s accessdomain-rcu-credentials - ``` + ```bash + $ cd $WORKDIR/kubernetes/create-rcu-credentials + $ ./create-rcu-credentials.sh -u OAMK8S -p -a sys -q -d accessdomain -n oamns -s accessdomain-rcu-credentials + ``` - The output will look similar to the following: + The output will look similar to the following: - ```bash - secret/accessdomain-rcu-credentials created - secret/accessdomain-rcu-credentials labeled - The secret accessdomain-rcu-credentials has been successfully created in the oamns namespace. - ``` + ``` + secret/accessdomain-rcu-credentials created + secret/accessdomain-rcu-credentials labeled + The secret accessdomain-rcu-credentials has been successfully created in the oamns namespace. + ``` 1. Verify the secret is created using the following command: - ```bash - $ kubectl get secret -o yaml -n - ``` + ```bash + $ kubectl get secret -o yaml -n + ``` - For example: + For example: - ```bash - $ kubectl get secret accessdomain-rcu-credentials -o yaml -n oamns - ``` + ```bash + $ kubectl get secret accessdomain-rcu-credentials -o yaml -n oamns + ``` - The output will look similar to the following: + The output will look similar to the following: - ```bash - apiVersion: v1 - data: - password: V2VsY29tZTE= - sys_password: V2VsY29tZTE= - sys_username: c3lz - username: T0FNSzhT - kind: Secret - metadata: - creationTimestamp: "2020-09-23T15:50:04Z" - labels: - weblogic.domainName: accessdomain - weblogic.domainUID: accessdomain - managedFields: - - apiVersion: v1 - fieldsType: FieldsV1 - fieldsV1: - f:data: - .: {} - f:password: {} - f:sys_password: {} - f:sys_username: {} - f:username: {} - f:metadata: - f:labels: - .: {} - f:weblogic.domainName: {} - f:weblogic.domainUID: {} - f:type: {} - manager: kubectl - operation: Update - time: "2020-09-23T15:50:04Z" + ``` + apiVersion: v1 + data: + password: V2VsY29tZTE= + sys_password: V2VsY29tZTE= + sys_username: c3lz + username: T0FNSzhT + kind: Secret + metadata: + creationTimestamp: "2021-11-01T10:33:37Z" + labels: + weblogic.domainName: accessdomain + weblogic.domainUID: accessdomain + managedFields: + - apiVersion: v1 + fieldsType: FieldsV1 + fieldsV1: + f:data: + .: {} + f:password: {} + f:sys_password: {} + f:sys_username: {} + f:username: {} + f:type: {} + manager: kubectl-create + operation: Update + time: "2021-11-01T10:33:37Z" + - apiVersion: v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:labels: + .: {} + f:weblogic.domainName: {} + f:weblogic.domainUID: {} + manager: kubectl-label + operation: Update + time: "2021-11-01T10:33:37Z" name: accessdomain-rcu-credentials namespace: oamns - resourceVersion: "51134" - selfLink: /api/v1/namespaces/oamns/secrets/accessdomain-rcu-credentials - uid: fce2499c-d8c8-4e9c-93e0-b15722bfc4d7 - type: Opaque - ``` + resourceVersion: "992205" + uid: ee283fbd-6211-4172-9c28-a65c84ecd794 + type: Opaque + ``` ### Create a Kubernetes persistent volume and persistent volume claim - In the Kubernetes namespace created above, create the persistent volume (PV) and persistent volume claim (PVC) by running the `create-pv-pvc.sh` script. + A persistent volume is the same as a disk mount but is inside a container. A Kubernetes persistent volume is an arbitrary name (determined in this case, by Oracle) that is mapped to a physical volume on a disk. + + When a container is started, it needs to mount that volume. The physical volume should be on a shared disk accessible by all the Kubernetes worker nodes because it is not known on which worker node the container will be started. In the case of Identity and Access Management, the persistent volume does not get erased when a container stops. This enables persistent configurations. + + The example below uses an NFS mounted volume (/accessdomainpv). Other volume types can also be used. See the official [Kubernetes documentation for Volumes](https://kubernetes.io/docs/concepts/storage/volumes/). + + To create a Kubernetes persistent volume, perform the following steps: 1. Make a backup copy of the `create-pv-pvc-inputs.yaml` file and create required directories: - ```bash - $ cd /weblogic-kubernetes-operator/kubernetes/samples/scripts/create-weblogic-domain-pv-pvc - $ cp create-pv-pvc-inputs.yaml create-pv-pvc-inputs.yaml.orig - $ mkdir output - $ mkdir -p //accessdomainpv - $ chmod -R 777 //accessdomainpv - ``` + ```bash + $ cd $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc + $ cp create-pv-pvc-inputs.yaml create-pv-pvc-inputs.yaml.orig + $ mkdir output + $ mkdir -p //accessdomainpv + $ chmod -R 777 //accessdomainpv + ``` For example: - ```bash - $ cd /scratch/OAMDockerK8S/weblogic-kubernetes-operator/kubernetes/samples/scripts/create-weblogic-domain-pv-pvc - $ cp create-pv-pvc-inputs.yaml create-pv-pvc-inputs.yaml.orig - $ mkdir output - $ mkdir -p /scratch/OAMDockerK8S/accessdomainpv - $ chmod -R 777 /scratch/OAMDockerK8S/accessdomainpv - ``` + ```bash + $ cd $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc + $ cp create-pv-pvc-inputs.yaml create-pv-pvc-inputs.yaml.orig + $ mkdir output + $ mkdir -p /scratch/OAMK8S/accessdomainpv + $ chmod -R 777 /scratch/OAMK8S/accessdomainpv + ``` - **Note**: The persistent volume directory needs to be accessible to both the master and worker node(s) via NFS. Make sure this path has full access permissions, and that the folder is empty. In this example `/scratch/OAMDockerK8S/accessdomainpv` is accessible from all nodes via NFS. + **Note**: The persistent volume directory needs to be accessible to both the master and worker node(s) via NFS. Make sure this path has full access permissions, and that the folder is empty. In this example `/scratch/OAMK8S/accessdomainpv` is accessible from all nodes via NFS. 1. On the master node run the following command to ensure it is possible to read and write to the persistent volume: ```bash - cd /accessdomainpv + cd /accessdomainpv touch filemaster.txt ls filemaster.txt ``` @@ -835,7 +860,7 @@ In this section you prepare the environment for the OAM domain creation. This in For example: ```bash - cd /scratch/OAMDockerK8S/accessdomainpv + cd /scratch/OAMK8S/accessdomainpv touch filemaster.txt ls filemaster.txt ``` @@ -843,7 +868,7 @@ In this section you prepare the environment for the OAM domain creation. This in On the first worker node run the following to ensure it is possible to read and write to the persistent volume: ```bash - cd /scratch/OAMDockerK8S/accessdomainpv + cd /scratch/OAMK8S/accessdomainpv ls filemaster.txt touch fileworker1.txt ls fileworker1.txt @@ -853,7 +878,7 @@ In this section you prepare the environment for the OAM domain creation. This in 1. Edit the `create-pv-pvc-inputs.yaml` file and update the following parameters to reflect your settings. Save the file when complete: - ```bash + ``` baseName: domainUID: namespace: @@ -865,7 +890,7 @@ In this section you prepare the environment for the OAM domain creation. This in For example: - ```bash + ``` # The base name of the pv and pvc baseName: domain @@ -874,6 +899,7 @@ In this section you prepare the environment for the OAM domain creation. This in # If left empty, the generated pv can be shared by multiple domains # This ID must not contain an underscope ("_"), and must be lowercase and unique across all domains in a Kubernetes cluster. domainUID: accessdomain + # Name of the namespace for the persistent volume claim namespace: oamns ... @@ -895,7 +921,7 @@ In this section you prepare the environment for the OAM domain creation. This in # Note that the path where the domain is mounted in the WebLogic containers is not affected by this # setting, that is determined when you create your domain. # The following line must be uncomment and customized: - weblogicDomainStoragePath: /scratch/OAMDockerK8S/accessdomainpv + weblogicDomainStoragePath: /scratch/OAMK8S/accessdomainpv # Reclaim policy of the persistent storage # The valid values are: 'Retain', 'Delete', and 'Recycle' @@ -907,54 +933,55 @@ In this section you prepare the environment for the OAM domain creation. This in 1. Execute the `create-pv-pvc.sh` script to create the PV and PVC configuration files: - ```bash - $ ./create-pv-pvc.sh -i create-pv-pvc-inputs.yaml -o output - ``` + ```bash + $ ./create-pv-pvc.sh -i create-pv-pvc-inputs.yaml -o output + ``` - The output will be similar to the following: + The output will be similar to the following: - ```bash - Input parameters being used - export version="create-weblogic-sample-domain-pv-pvc-inputs-v1" - export baseName="domain" - export domainUID="accessdomain" - export namespace="oamns" - export weblogicDomainStorageType="NFS" - export weblogicDomainStorageNFSServer="mynfsserver" - export weblogicDomainStoragePath="/scratch/OAMDockerK8S/accessdomainpv" - export weblogicDomainStorageReclaimPolicy="Retain" - export weblogicDomainStorageSize="10Gi" - - Generating output/pv-pvcs/accessdomain-weblogic-sample-pv.yaml - Generating output/pv-pvcs/accessdomain-weblogic-sample-pvc.yaml - The following files were generated: - output/pv-pvcs/accessdomain-weblogic-sample-pv.yaml - output/pv-pvcs/accessdomain-weblogic-sample-pvc.yaml - ``` + ``` + Input parameters being used + export version="create-weblogic-sample-domain-pv-pvc-inputs-v1" + export baseName="domain" + export domainUID="accessdomain" + export namespace="oamns" + export weblogicDomainStorageType="NFS" + export weblogicDomainStorageNFSServer="mynfsserver" + export weblogicDomainStoragePath="/scratch/OAMK8S/accessdomainpv" + export weblogicDomainStorageReclaimPolicy="Retain" + export weblogicDomainStorageSize="10Gi" + + Generating output/pv-pvcs/accessdomain-weblogic-sample-pv.yaml + Generating output/pv-pvcs/accessdomain-weblogic-sample-pvc.yaml + The following files were generated: + output/pv-pvcs/accessdomain-weblogic-sample-pv.yaml + output/pv-pvcs/accessdomain-weblogic-sample-pvc.yaml + ``` 1. Run the following to show the files are created: - ```bash - $ ls output/pv-pvcs - create-pv-pvc-inputs.yaml accessdomain-weblogic-sample-pv.yaml accessdomain-weblogic-sample-pvc.yaml - ``` + ```bash + $ ls output/pv-pvcs + accessdomain-domain-pv.yaml accessdomain-domain-pvc.yaml create-pv-pvc-inputs.yaml + ``` + 1. Run the following `kubectl` command to create the PV and PVC in the domain namespace: - ```bash - $ kubectl create -f output/pv-pvcs/accessdomain-domain-pv.yaml -n - $ kubectl create -f output/pv-pvcs/accessdomain-domain-pvc.yaml -n - ``` + ```bash + $ kubectl create -f output/pv-pvcs/accessdomain-domain-pv.yaml -n + $ kubectl create -f output/pv-pvcs/accessdomain-domain-pvc.yaml -n + ``` For example: ```bash $ kubectl create -f output/pv-pvcs/accessdomain-domain-pv.yaml -n oamns $ kubectl create -f output/pv-pvcs/accessdomain-domain-pvc.yaml -n oamns - ``` + ``` The output will look similar to the following: - ```bash + ``` persistentvolume/accessdomain-domain-pv created persistentvolumeclaim/accessdomain-domain-pvc created ``` @@ -975,7 +1002,7 @@ In this section you prepare the environment for the OAM domain creation. This in The output will look similar to the following: - ```bash + ``` $ kubectl describe pv accessdomain-domain-pv Name: accessdomain-domain-pv @@ -994,7 +1021,7 @@ In this section you prepare the environment for the OAM domain creation. This in Source: Type: NFS (an NFS mount that lasts the lifetime of a pod) Server: mynfsserver - Path: /scratch/OAMDockerK8S/accessdomainpv + Path: /scratch/OAMK8S/accessdomainpv ReadOnly: false Events: ``` diff --git a/docs-source/content/oam/prerequisites/_index.md b/docs-source/content/oam/prerequisites/_index.md index 714edc34c..d9f4c1f6d 100644 --- a/docs-source/content/oam/prerequisites/_index.md +++ b/docs-source/content/oam/prerequisites/_index.md @@ -7,10 +7,9 @@ description: "System requirements and limitations for deploying and running an O ### Introduction -This document provides information about the system requirements and limitations for deploying and running OAM domains with the WebLogic Kubernetes Operator 3.0.1. +This document provides information about the system requirements and limitations for deploying and running OAM domains with the WebLogic Kubernetes Operator 3.3.0. + -In this release, OAM domains are supported using the “domain on a persistent volume” -[model](https://oracle.github.io/weblogic-kubernetes-operator/userguide/managing-domains/choosing-a-model/) only, where the domain home is located in a persistent volume (PV). ### System requirements for oam domains @@ -22,15 +21,9 @@ In this release, OAM domains are supported using the “domain on a persistent v ### Limitations -Compared to running a WebLogic Server domain in Kubernetes using the operator, the -following limitations currently exist for OAM domains: - -* The "domain in image" model is not supported. -* Only configured clusters are supported. Dynamic clusters are not supported for - OAM domains. Note that you can still use all of the scaling features, - you just need to define the maximum size of your cluster at domain creation time. -* Deploying and running OAM domains is supported only with WebLogic Kubernetes Operator version 3.0.1 -* The [WebLogic Monitoring Exporter](https://github.com/oracle/weblogic-monitoring-exporter) - currently supports the WebLogic MBean trees only. Support for JRF MBeans has not - been added yet. +Compared to running a WebLogic Server domain in Kubernetes using the operator, the following limitations currently exist for OAM domains: + +* In this release, OAM domains are supported using the “domain on a persistent volume” [model](https://oracle.github.io/weblogic-kubernetes-operator/userguide/managing-domains/choosing-a-model/) only, where the domain home is located in a persistent volume (PV).The "domain in image" model is not supported. +* Only configured clusters are supported. Dynamic clusters are not supported for OAM domains. Note that you can still use all of the scaling features, you just need to define the maximum size of your cluster at domain creation time. +* The [WebLogic Monitoring Exporter](https://github.com/oracle/weblogic-monitoring-exporter) currently supports the WebLogic MBean trees only. Support for JRF MBeans has not been added yet. diff --git a/docs-source/content/oam/release-notes.md b/docs-source/content/oam/release-notes.md index 0ba469849..05ed6a06b 100644 --- a/docs-source/content/oam/release-notes.md +++ b/docs-source/content/oam/release-notes.md @@ -12,5 +12,7 @@ Review the latest changes and known issues for Oracle Access Management on Kuber | Date | Version | Change | | --- | --- | --- | -| September 3, 2021 | 21.3.3 | **A**) References to supported Kubernetes, Helm and Docker versions removed and replaced with Support note reference. **B**) Namespace and domain names changed to be consistent with [Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster](https://docs.oracle.com/en/middleware/fusion-middleware/12.2.1.4/ikedg/). **C**) Addtional post configuration tasks added. **D**) *Upgrading a Kubernetes Cluster* and *Security Hardening* removed as vendor specific.| +| November, 2021 | 21.4.2 | Supports Oracle Access Management domain deployment using WebLogic Kubernetes Operator 3.3.0. Voyager ingress removed as no longer supported.| +| October 2021 | 21.4.1 | **A**) References to supported Kubernetes, Helm and Docker versions removed and replaced with Support note reference. **B**) Namespace and domain names changed to be consistent with [Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster](https://docs.oracle.com/en/middleware/fusion-middleware/12.2.1.4/ikedg/). **C**) Addtional post configuration tasks added. **D**) *Upgrading a Kubernetes Cluster* and *Security Hardening* removed as vendor specific.| +| November 2020 | 20.4.1 | Initial release of Oracle Access Management on Kubernetes.| diff --git a/docs-source/content/oam/troubleshooting/_index.md b/docs-source/content/oam/troubleshooting/_index.md index e934809fa..ec1f96f56 100644 --- a/docs-source/content/oam/troubleshooting/_index.md +++ b/docs-source/content/oam/troubleshooting/_index.md @@ -1,7 +1,7 @@ +++ title = "Troubleshooting" -weight = 11 -pre = "11. " +weight = 12 +pre = "12. " description = "How to Troubleshoot domain creation failure." +++ @@ -35,29 +35,27 @@ If the OAM domain creation fails when running `create-domain.sh`, run the follow Using the output you should be able to diagnose the problem and resolve the issue. - Clean down the failed domain creation by following steps 1-4 in [Delete the OAM domain home]({{< relref "/oam/manage-oam-domains/delete-domain-home" >}}). Then - [recreate the PV and PVC]({{< relref "/oam/prepare-your-environment/#create-a-kubernetes-persistent-volume-and-persistent-volume-claim" >}}) then execute the [OAM domain creation]({{< relref "/oam/create-oam-domains" >}}) steps again. + Clean down the failed domain creation by following steps 1-3 in [Delete the OAM domain home]({{< relref "/oam/manage-oam-domains/delete-domain-home" >}}). Then follow [RCU schema creation]({{< relref "/oam/prepare-your-environment/#rcu-schema-creation" >}}) onwards to recreate the RCU schema, kubernetes secrets for domain and RCU, the persistent volume and the persistent volume claim. Then execute the [OAM domain creation]({{< relref "/oam/create-oam-domains" >}}) steps again. -2. If any of the above commands return the following error: +1. If any of the above commands return the following error: - ```bash + ``` Failed to start container "create-fmw-infra-sample-domain-job": Error response from daemon: error while creating mount source path - '/scratch/OAMDockerK8S/accessdomainpv ': mkdir /scratch/OAMDockerK8S/accessdomainpv : permission denied + '/scratch/OAMK8S/accessdomainpv ': mkdir /scratch/OAMK8S/accessdomainpv : permission denied ``` then there is a permissions error on the directory for the PV and PVC and the following should be checked: - a) The directory has 777 permissions: `chmod -R 777 /accessdomainpv`. + a) The directory has 777 permissions: `chmod -R 777 /accessdomainpv`. b) If it does have the permissions, check if an `oracle` user exists and the `uid` and `gid` equal `1000`. Create the `oracle` user if it doesn't exist and set the `uid` and `gid` to `1000`. - c) Edit the `/weblogic-kubernetes-operator/kubernetes/samples/scripts/create-access-domain-pv-pvc/create-pv-pvc-inputs.yaml` and add a slash to the end of the directory for the `weblogicDomainStoragePath` parameter: + c) Edit the `$WORKDIR/kubernetes/create-weblogic-domain-pv-pvc/create-pv-pvc-inputs.yaml` and add a slash to the end of the directory for the `weblogicDomainStoragePath` parameter: - ```bash - weblogicDomainStoragePath: /scratch/OAMDockerK8S/accessdomainpv/ + ``` + weblogicDomainStoragePath: /scratch/OAMK8S/accessdomainpv/ ``` - Clean down the failed domain creation by following steps 1-4 in [Delete the OAM domain home]({{< relref "/oam/manage-oam-domains/delete-domain-home" >}}). Then - [recreate the PV and PVC]({{< relref "/oam/prepare-your-environment/#create-a-kubernetes-persistent-volume-and-persistent-volume-claim" >}}) and then execute the [OAM domain creation]({{< relref "/oam/create-oam-domains" >}}) steps again. + Clean down the failed domain creation by following steps 1-3 in [Delete the OAM domain home]({{< relref "/oam/manage-oam-domains/delete-domain-home" >}}). Then follow [RCU schema creation]({{< relref "/oam/prepare-your-environment/#rcu-schema-creation" >}}) onwards to recreate the RCU schema, kubernetes secrets for domain and RCU, the persistent volume and the persistent volume claim. Then execute the [OAM domain creation]({{< relref "/oam/create-oam-domains" >}}) steps again. diff --git a/docs-source/content/oam/validate-domain-urls/_index.md b/docs-source/content/oam/validate-domain-urls/_index.md index 816a24088..39d266d6f 100644 --- a/docs-source/content/oam/validate-domain-urls/_index.md +++ b/docs-source/content/oam/validate-domain-urls/_index.md @@ -5,9 +5,9 @@ pre = "6. " description = "Sample for validating domain urls." +++ -In this section you validate the OAM domain URLs are accessible via the NGINX or Voyager ingress. +In this section you validate the OAM domain URLs are accessible via the NGINX ingress. -Make sure you know the master hostname and ingress port for NGINX or Voyager before proceeding. +Make sure you know the master hostname and ingress port for NGINX before proceeding. #### Validate the OAM domain urls via the Ingress diff --git a/docs-source/content/oam/validate-sso-using-webgate/_index.md b/docs-source/content/oam/validate-sso-using-webgate/_index.md index 324a0736a..94f322f28 100644 --- a/docs-source/content/oam/validate-sso-using-webgate/_index.md +++ b/docs-source/content/oam/validate-sso-using-webgate/_index.md @@ -11,11 +11,10 @@ In this section you validate single-sign on works to the OAM Kubernetes cluster #### Update the OAM Hostname and Port for the Loadbalancer -If using an NGINX or Voyager ingress with no load balancer, change `{LOADBALANCER-HOSTNAME}:${LOADBALANCER-PORT}` to `{MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}` when referenced below. +If using an NGINX ingress with no load balancer, change `{LOADBALANCER-HOSTNAME}:${LOADBALANCER-PORT}` to `{MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}` when referenced below. 1. Launch a browser and access the OAM console (`https://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-PORT}/oamconsole`). Login with the weblogic username and password (`weblogic/`) - 1. Navigate to **Configuration** → **Settings ( View )** → **Access Manager**. 1. Under Load Balancing modify the **OAM Server Host** and **OAM Server Port**, to point to the Loadbalancer HTTP endpoint (e.g `loadbalancer.example.com` and `` respectively). In the **OAM Server Protocol** drop down list select **https**. @@ -38,7 +37,7 @@ In all the examples below, change the directory path as appropriate for your ins The output will look similar to the following: - ```bash + ``` Copying files from WebGate Oracle Home to WebGate Instancedir ``` @@ -52,7 +51,7 @@ In all the examples below, change the directory path as appropriate for your ins The output will look similar to the following: - ```bash + ``` The web server configuration file was successfully updated /scratch/export/home/oracle/admin/domains/oam_domain/config/fmwconfig/components/OHS/ohs_k8s/httpd.conf has been backed up as /scratch/export/home/oracle/admin/domains/oam_domain/config/fmwconfig/components/OHS/ohs_k8s/httpd.conf.ORIG ``` @@ -96,28 +95,32 @@ To change the WebGate agent to use OAP: **Note**: To find the value for `Host Port` run the following: ```bash - $ kubectl describe svc oamoap-service -n oamns + $ kubectl describe svc accessdomain-oamoap-service -n oamns ``` The output will look similar to the following: ``` - Name: oamoap-service - Namespace: oamns - Labels: - Annotations: - Selector: weblogic.clusterName=oam_cluster - Type: NodePort - IP: 10.96.63.13 - Port: 5575/TCP - TargetPort: 5575/TCP - NodePort: 30540/TCP - Endpoints: 10.244.0.30:5575,10.244.0.31:5575 - Session Affinity: None - External Traffic Policy: Cluster - Events: + Name: accessdomain-oamoap-service + Namespace: oamns + Labels: + Annotations: + Selector: weblogic.clusterName=oam_cluster + Type: NodePort + IP Families: + IP: 10.100.202.44 + IPs: 10.100.202.44 + Port: 5575/TCP + TargetPort: 5575/TCP + NodePort: 30540/TCP + Endpoints: 10.244.5.21:5575,10.244.6.76:5575 + Session Affinity: None + External Traffic Policy: Cluster + Events: ``` - + + In the example above the `NodePort` is `30540`. + 1. Delete all servers in **Server Lists** except for the one just created, and click `Apply`. 1. Click Download to download the webgate zip file. Copy the zip file to the desired WebGate. diff --git a/OracleSOASuite/kubernetes/create-soa-domain/soa-monitoring-services/.gitkeep b/docs-source/content/oid/.gitkeep similarity index 100% rename from OracleSOASuite/kubernetes/create-soa-domain/soa-monitoring-services/.gitkeep rename to docs-source/content/oid/.gitkeep diff --git a/docs-source/content/oid/_index.md b/docs-source/content/oid/_index.md index 676507c64..0d07c2fc8 100644 --- a/docs-source/content/oid/_index.md +++ b/docs-source/content/oid/_index.md @@ -2,6 +2,7 @@ title: "Oracle Internet Directory" date: 2019-02-23T16:43:45-05:00 description: "Oracle Internet Directory provides a comprehensive Directory Solution for robust Identity Management" +weight: 2 --- Oracle Internet Directory provides a comprehensive Directory Solution for robust Identity Management. @@ -27,4 +28,5 @@ For detailed information about deploying Oracle Internet Directory, start at [Pr ### Current release -The current supported release of Oracle Internet Directory is OID 12c PS4 (12.2.1.4.0) +The current production release for Oracle Internet Directory 12c PS4 (12.2.1.4.0) deployment on Kubernetes is [21.4.1](https://github.com/oracle/fmw-kubernetes/releases). + diff --git a/docs-source/content/oid/create-oid-instances-helm/_index.md b/docs-source/content/oid/create-oid-instances-helm/_index.md index 18a094e5d..b96753e00 100644 --- a/docs-source/content/oid/create-oid-instances-helm/_index.md +++ b/docs-source/content/oid/create-oid-instances-helm/_index.md @@ -1,8 +1,8 @@ +++ title = "Create Oracle Internet Directory Instances Using Helm" date = 2019-04-18T06:46:23-05:00 -weight = 3 -pre = "3. " +weight = 4 +pre = "4. " description= "This document provides steps to create Oracle Internet Directory instances using Helm Charts." +++ diff --git a/docs-source/content/oid/prepare-your-environment/_index.md b/docs-source/content/oid/prepare-your-environment/_index.md index dd553c593..fcd1e0640 100644 --- a/docs-source/content/oid/prepare-your-environment/_index.md +++ b/docs-source/content/oid/prepare-your-environment/_index.md @@ -1,8 +1,8 @@ +++ title= "Prepare Your Environment" date = 2019-04-18T06:46:23-05:00 -weight = 2 -pre = "2. " +weight = 3 +pre = "3. " description = "Prepare your environment" +++ diff --git a/docs-source/content/oid/prerequisites/_index.md b/docs-source/content/oid/prerequisites/_index.md index 690e1c655..82c9d7e67 100644 --- a/docs-source/content/oid/prerequisites/_index.md +++ b/docs-source/content/oid/prerequisites/_index.md @@ -1,8 +1,8 @@ --- title: "Prerequisites" date: 2019-04-18T07:32:31-05:00 -weight: 1 -pre : "1. " +weight: 2 +pre : "2. " description: "Prerequisites for deploying and running Oracle Internet Directory in a Kubernetes environment." --- diff --git a/docs-source/content/oid/release-notes.md b/docs-source/content/oid/release-notes.md new file mode 100644 index 000000000..f33840162 --- /dev/null +++ b/docs-source/content/oid/release-notes.md @@ -0,0 +1,16 @@ +--- +title: "Release Notes" +date: 2019-03-15T11:25:28-04:00 +draft: false +weight: 1 +pre: "1. " +--- + +Review the latest changes and known issues for Oracle Internet Directory on Kubernetes. + +### Recent changes + +| Date | Version | Change | +| --- | --- | --- | +| October, 2021 | 21.4.1 | Initial release of Oracle Identity Directory on Kubernetes. | + diff --git a/docs-source/content/oid/troubleshooting/_index.md b/docs-source/content/oid/troubleshooting/_index.md index 5a81c3b71..35c610523 100644 --- a/docs-source/content/oid/troubleshooting/_index.md +++ b/docs-source/content/oid/troubleshooting/_index.md @@ -1,8 +1,8 @@ +++ title = "Troubleshooting" date = 2019-04-18T07:32:31-05:00 -weight = 4 -pre = "4. " +weight = 5 +pre = "5. " description = "How to Troubleshoot issues." +++ 1. [Check the Status of a Namespace](#check-the-status-of-a-namespace) diff --git a/docs-source/content/oig/_index.md b/docs-source/content/oig/_index.md index 3c6e96e74..bdf7ce74e 100644 --- a/docs-source/content/oig/_index.md +++ b/docs-source/content/oig/_index.md @@ -1,7 +1,7 @@ --- title: "Oracle Identity Governance" description: "The WebLogic Kubernetes Operator supports deployment of Oracle Identity Governance. Follow the instructions in this guide to set up Oracle Identity Governance domains on Kubernetes." -weight: 2 +weight: 3 --- The WebLogic Kubernetes Operator supports deployment of Oracle Identity Governance (OIG). @@ -19,6 +19,13 @@ environment. You can: * Publish operator and WebLogic Server logs into Elasticsearch and interact with them in Kibana. * Monitor the OIG instance using Prometheus and Grafana. +### Current production release + +The current production release for the Oracle Identity Governance domain deployment on Kubernetes is [21.4.2](https://github.com/oracle/fmw-kubernetes/releases). This release uses the WebLogic Kubernetes Operator version 3.3.0. + +This release of the documentation can also be used for 3.1.X and 3.2.0 WebLogic Kubernetes Operator. +For 3.0.X WebLogic Kubernetes Operator refer to [Version 21.4.1](https://oracle.github.io/fmw-kubernetes/21.4.1/oig/) + ### Limitations See [here]({{< relref "/oig/prerequisites#limitations">}}) for limitations in this release. @@ -29,7 +36,8 @@ For detailed information about deploying Oracle Identity Governance domains, sta If performing an Enterprise Deployment, refer to the [Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster](https://docs.oracle.com/en/middleware/fusion-middleware/12.2.1.4/ikedg/index.html) instead. -### Current release +### Documentation for earlier releases -The current supported release of the WebLogic Kubernetes Operator, for Oracle Identity Governance domains deployment is [3.0.1](https://github.com/oracle/weblogic-kubernetes-operator/releases). +To view documentation for an earlier release, see: +* [Version 21.4.1](https://oracle.github.io/fmw-kubernetes/21.4.1/oig/) diff --git a/docs-source/content/oig/configure-design-console/Using the design console with NGINX (SSL).md b/docs-source/content/oig/configure-design-console/Using the design console with NGINX (SSL).md index 46f596d93..73ae79db1 100644 --- a/docs-source/content/oig/configure-design-console/Using the design console with NGINX (SSL).md +++ b/docs-source/content/oig/configure-design-console/Using the design console with NGINX (SSL).md @@ -6,289 +6,40 @@ description: "Configure Design Console with NGINX(SSL)." Configure an NGINX ingress (SSL) to allow Design Console to connect to your Kubernetes cluster. -#### Generate SSL Certificate - -**Note**: If already using NGINX with SSL for OIG you can skip this section: - -1. Generate a private key and certificate signing request (CSR) using a tool of your choice. Send the CSR to your certificate authority (CA) to generate the certificate. - - If you want to use a certificate for testing purposes you can generate a self signed certificate using openssl: - - ``` - $ mkdir /ssl - $ cd /ssl - $ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=" - ``` +1. [Prerequisites](#prerequisites) +1. [Setup routing rules for the Design Console ingress](#setup-routing-rules-for-the-design-console-ingress) +1. [Create the ingress](#create-the-ingress) +1. [Update the T3 channel](#update-the-t3-channel) +1. [Restart the OIG domain](#restart-the-oig-domain) +1. [Design Console client](#design-console-client) - For example: + a. [Using an on-premises installed Design Console](#using-an-on-premises-installed-design-console) - ``` - $ mkdir /scratch/OIGDockerK8S/ssl - $ cd /scratch/OIGDockerK8S/ssl - $ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=masternode.example.com" - ``` + b. [Using a container image for Design Console](#using-a-container-image-for-design-console) - **Note**: The `CN` should match the host.domain of the master node in order to prevent hostname problems during certificate verification. - - The output will look similar to the following: - - ``` - $ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=masternode.example.com" - Generating a 2048 bit RSA private key - ..........................................+++ - .......................................................................................................+++ - writing new private key to 'tls.key' - ----- - ``` +1. [Login to the Design Console](#login-to-the-design-console) -1. Create a secret for SSL containing the SSL certificate by running the following command: - - ``` - $ kubectl -n oigns create secret tls -tls-cert --key /tls.key --cert /tls.crt - ``` - - For example: - - ``` - $ kubectl -n oigns create secret tls governancedomain-tls-cert --key /scratch/OIGDockerK8S/ssl/tls.key --cert /scratch/OIGDockerK8S/ssl/tls.crt - ``` - - The output will look similar to the following: - - ``` - $ kubectl -n oigns create secret tls governancedomain-tls-cert --key /scratch/OIGDockerK8S/ssl/tls.key --cert /scratch/OIGDockerK8S/ssl/tls.crt - secret/governancedomain-tls-cert created - $ - ``` - -1. Confirm that the secret is created by running the following command: - - ``` - $ kubectl get secret governancedomain-tls-cert -o yaml -n oigns - ``` - - The output will look similar to the following: - - ``` - apiVersion: v1 - data: - tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURGVENDQWYyZ0F3SUJBZ0lKQUl3ZjVRMWVxZnljTUEwR0NTcUdTSWIzRFFFQkN3VUFNQ0V4SHpBZEJnTlYKQkFNTUZtUmxiakF4WlhadkxuVnpMbTl5WVdOc1pTNWpiMjB3SGhjTk1qQXdPREV3TVRReE9UUXpXaGNOTWpFdwpPREV3TVRReE9UUXpXakFoTVI4d0hRWURWUVFEREJaa1pXNHdNV1YyYnk1MWN5NXZjbUZqYkdVdVkyOXRNSUlCCklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUEyY0lpVUhwcTRVZzBhaGR6aXkycHY2cHQKSVIza2s5REd2eVRNY0syaWZQQ2dtUU5CdHV6VXNFN0l4c294eldITmU5RFpXRXJTSjVON3FYYm1lTzJkMVd2NQp1aFhzbkFTbnkwY1NLUE9xVDNQSlpDVk1MK0llZVFKdnhaVjZaWWU4V2FFL1NQSGJzczRjYy9wcG1mc3pxCnErUi83cXEyMm9ueHNHaE9vQ1h1TlQvMFF2WXVzMnNucGtueWRKRHUxelhGbDREYkFIZGMvamNVK0NPWWROeS8KT3Iza2JIV0FaTkR4OWxaZUREOTRmNXZLcUF2V0FkSVJZa2UrSmpNTHg0VHo2ZlM0VXoxbzdBSTVuSApPQ1ZMblV5U0JkaGVuWTNGNEdFU0wwbnorVlhFWjRWVjRucWNjRmo5cnJ0Q29pT1BBNlgvNGdxMEZJbi9Qd0lECkFRQUJvMUF3VGpBZEJnTlZIUTRFRmdRVWw1VnVpVDBDT0xGTzcxMFBlcHRxSC9DRWZyY3dId1lEVlIwakJCZ3cKRm9BVWw1VnVpVDBDT0xGTzcxMFBlcHRxSC9DRWZyY3dEQVlEVlIwVEJBVXdBd0VCL3pBTkJna3Foa2lHOXcwQgpBUXNGQUFPQ0FRRUFXdEN4b2ZmNGgrWXZEcVVpTFFtUnpqQkVBMHJCOUMwL1FWOG9JQzJ3d1hzYi9KaVNuMHdOCjNMdHppejc0aStEbk1yQytoNFQ3enRaSkc3NVluSGRKcmxQajgzVWdDLzhYTlFCSUNDbTFUa3RlVU1jWG0reG4KTEZEMHpReFhpVzV0N1FHcWtvK2FjeTlhUnUvN3JRMXlNSE9HdVVkTTZETzErNXF4cTdFNXFMamhyNEdKejV5OAoraW8zK25UcUVKMHFQOVRocG96RXhBMW80OEY0ZHJybWdqd3ROUldEQVpBYmYyV1JNMXFKWXhxTTJqdU1FQWNsCnFMek1TdEZUQ2o1UGFTQ0NUV1VEK3ZlSWtsRWRpaFdpRm02dzk3Y1diZ0lGMlhlNGk4L2szMmF1N2xUTDEvd28KU3Q2dHpsa20yV25uUFlVMzBnRURnVTQ4OU02Z1dybklpZz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K - tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV1d0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktVd2dnU2hBZ0VBQW9JQkFRRFp3aUpRZW1yaFNEUnEKRjNPTExhbS9xbTBoSGVTVDBNYS9KTXh3cmFKODhLQ1pBMEcyN05Td1Rzakd5akhOWWMxNzBObFlTdEluazN1cApkdVo0N1ozVmEvbTZGZXljQktmTFJ4SW84NnIwSmhQYzhsa0pVd3Y0aDU1QW0vRmxYcGxoN3hab1Q5SThkdXl6Cmh4eittbVorek9xcjVIL3VxcmJhaWZHd2FFNmdKZTQxUC9SQzlpNnpheWVtU2ZKMGtPN1hOY1dYZ05zQWQxeisKTnhUNEk1aDAzTDg2dmVSc2RZQmswUEgyVmw0TVAzaC9tOHFWdW5mK1NvQzlZQjBoRmlSNzRtTXd2SGhQUHA5TApoVFBXanNBam1jYzRKVXVkVEpJRjJGNmRqY1hnWVJJdlNmUDVWY1JuaFZYaWVweHdXUDJ1dTBLaUk0OERwZi9pCkNyUVVpZjgvQWdNQkFBRUNnZjl6cnE2TUVueTFNYWFtdGM2c0laWU1QSDI5R2lSVVlwVXk5bG1sZ3BqUHh3V0sKUkRDay9Td0FmZG9yd1Q2ejNVRk1oYWJ4UU01a04vVjZFYkJlamQxT15bjdvWTVEQWJRRTR3RG9SZWlrVApONndWU0FrVC92Z1RXc1RqRlY1bXFKMCt6U2ppOWtySkZQNVNRN1F2cUswQ3BHRlNhVjY2dW8ycktiNmJWSkJYCkxPZmZPMytlS0tVazBaTnE1Q1NVQk9mbnFoNVFJSGdpaDNiMTRlNjB6bndrNWhaMHBHZE9BQm9aTkoKZ21lanUyTEdzVWxXTjBLOVdsUy9lcUllQzVzQm9jaWlocmxMVUpGWnpPRUV6LzErT2cyemhmT29yTE9rMTIrTgpjQnV0cTJWQ2I4ZFJDaFg1ZzJ0WnBrdzgzcXN5RSt3M09zYlQxa0VDZ1lFQTdxUnRLWGFONUx1SENvWlM1VWhNCm9Hak1WcnYxTEg0eGNhaDJITmZnMksrMHJqQkJONGpkZkFDMmF3R3ZzU1EyR0lYRzVGYmYyK0pwL1kxbktKOEgKZU80MzNLWVgwTDE4NlNNLzFVay9HSEdTek1CWS9KdGR6WkRrbTA4UnBwaTl4bExTeDBWUWtFNVJVcnJJcTRJVwplZzBOM2RVTHZhTVl1UTBrR2dncUFETUNnWUVBNlpqWCtjU2VMZ1BVajJENWRpUGJ1TmVFd2RMeFNPZDFZMUFjCkUzQ01YTWozK2JxQ3BGUVIrTldYWWVuVmM1QiszajlSdHVnQ0YyTkNSdVdkZWowalBpL243UExIRHdCZVY0bVIKM3VQVHJmamRJbFovSFgzQ2NjVE94TmlaajU4VitFdkRHNHNHOGxtRTRieStYRExIYTJyMWxmUk9sUVRMSyswVgpyTU93eU1VQ2dZRUF1dm14WGM4NWxZRW9hU0tkU0cvQk9kMWlYSUtmc2VDZHRNT2M1elJ0UXRsSDQwS0RscE54CmxYcXBjbVc3MWpyYzk1RzVKNmE1ZG5xTE9OSFZoWW8wUEpmSXhPU052RXI2MTE5NjRBMm5sZXRHYlk0M0twUkEKaHBPRHlmdkZoSllmK29kaUJpZFUyL3ZBMCtUczNSUHJzRzBSOUVDOEZqVDNaZVhaNTF1R0xPa0NnWUFpTmU0NwplQjRxWXdrNFRsMTZmZG5xQWpaQkpLR05xY2c1V1R3alpMSkp6R3owdCtuMkl4SFd2WUZFSjdqSkNmcHFsaDlqCmlDcjJQZVV3K09QTlNUTG1JcUgydzc5L1pQQnNKWXVsZHZ4RFdGVWFlRXg1aHpkNDdmZlNRRjZNK0NHQmthYnIKVzdzU3R5V000ZFdITHpDaGZMS20yWGJBd0VqNUQrbkN1WTRrZVFLQmdFSkRHb0puM1NCRXcra2xXTE85N09aOApnc3lYQm9mUW1lRktIS2NHNzFZUFhJbTRlV1kyUi9KOCt5anc5b1FJQ3o5NlRidkdSZEN5QlJhbWhoTmFGUzVyCk9MZUc0ejVENE4zdThUc0dNem9QcU13KzBGSXJiQ3FzTnpGWTg3ekZweEdVaXZvRWZLNE82YkdERTZjNHFqNGEKNmlmK0RSRSt1TWRMWTQyYTA3ekoKLS0tLS1FTkQgUFJJVkFURSBLRVktLS0tLQo= - kind: Secret - metadata: - creationTimestamp: "2020-09-29T15:51:22Z" - managedFields: - - apiVersion: v1 - fieldsType: FieldsV1 - fieldsV1: - f:data: - .: {} - f:tls.crt: {} - f:tls.key: {} - f:type: {} - manager: kubectl - operation: Update - time: "2020-09-29T15:51:22Z" - name: governancedomain-tls-cert - namespace: oigns - resourceVersion: "1291036" - selfLink: /api/v1/namespaces/oigns/secrets/governancedomain-tls-cert - uid: a127e5fd-705b-43e1-ab56-590834efda5e - type: kubernetes.io/tls - ``` +### Prerequisites -### Add the NGINX ingress using helm +If you haven't already configured an NGINX ingress controller (SSL) for OIG, follow [Using an Ingress with NGINX (SSL)]({{< relref "/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-K8S-ssl">}}). -**Note**: If already using NGINX with SSL for OIG you can skip this section: +Make sure you know the master hostname and ingress port for NGINX before proceeding e.g `https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}`. Also make sure you know the Kubernetes secret for SSL that was generated e.g `governancedomain-tls-cert`. -1. Add the Helm chart repository for NGINX using the following command: - ```bash - $ helm repo add stable https://kubernetes.github.io/ingress-nginx - ``` - - The output will look similar to the following: +### Setup routing rules for the Design Console ingress - ```bash - "stable" has been added to your repositories - ``` -1. Update the repository using the following command: +1. Setup routing rules by running the following commands: ```bash - $ helm repo update + $ cd $WORKDIR/kubernetes/design-console-ingress ``` - The output will look similar to the following: - - ```bash - Hang tight while we grab the latest from your chart repositories... - ...Successfully got an update from the "stable" chart repository - Update Complete. Happy Helming! - ``` - -1. Create a Kubernetes namespace for NGINX: - - ``` - $ kubectl create namespace nginxssl - ``` - - The output will look similar to the following: - - ``` - namespace/nginxssl created - -### Install NGINX ingress using helm - -Install a NGINX ingress for the Design Console: - -If you can connect directly to the master node IP address from a browser, then install NGINX with the `--set controller.service.type=NodePort` parameter. - -If you are using a Managed Service for your Kubernetes cluster,for example Oracle Kubernetes Engine (OKE) on Oracle Cloud Infrastructure (OCI), and connect from a browser to the Load Balancer IP address, then use the `--set controller.service.type=LoadBalancer` parameter. This instructs the Managed Service to setup a Load Balancer to direct traffic to the NGINX ingress. - -1. To install NGINX use the following helm command depending on if you are using `NodePort` or `LoadBalancer`: - - a) Using NodePort - - ``` - $ helm install nginx-dc-operator-ssl -n nginxssl --set controller.extraArgs.default-ssl-certificate=oigns/governancedomain-tls-cert --set controller.service.type=NodePort --set controller.admissionWebhooks.enabled=false --set controller.service.nodePorts.https=30321 --set controller.ingressClass=nginx-designconsole stable/ingress-nginx --version=3.34.0 - ``` - The output will look similar to the following: - - ``` - LAST DEPLOYED: Wed Oct 21 03:52:25 2020 - NAMESPACE: nginxssl - STATUS: deployed - REVISION: 1 - TEST SUITE: None - NOTES: - The ingress-nginx controller has been installed. - Get the application URL by running these commands: - export HTTP_NODE_PORT=$(kubectl --namespace nginxssl get services -o jsonpath="{.spec.ports[0].nodePort}" nginx-dc-operator-ssl-ingress-nginx-controller) - export HTTPS_NODE_PORT=30321 - export NODE_IP=$(kubectl --namespace nginxssl get nodes -o jsonpath="{.items[0].status.addresses[1].address}") - - echo "Visit http://$NODE_IP:$HTTP_NODE_PORT to access your application via HTTP." - echo "Visit https://$NODE_IP:$HTTPS_NODE_PORT to access your application via HTTPS." - - An example Ingress that makes use of the controller: - - apiVersion: networking.k8s.io/v1beta1 - kind: Ingress - metadata: - annotations: - kubernetes.io/ingress.class: nginx-designconsole - name: example - namespace: foo - spec: - rules: - - host: www.example.com - http: - paths: - - backend: - serviceName: exampleService - servicePort: 80 - path: / - # This section is only required if TLS is to be enabled for the Ingress - tls: - - hosts: - - www.example.com - secretName: example-tls - - If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: - - apiVersion: v1 - kind: Secret - metadata: - name: example-tls - namespace: foo - data: - tls.crt: - tls.key: - type: kubernetes.io/tls - ``` + Edit `values.yaml` and ensure that `tls: SSL` is set. Change `domainUID:` and `secretName:` to match the values for your `` and your SSL Kubernetes secret, for example: - - b) Using LoadBalancer - - ``` - $ helm install nginx-dc-operator-ssl -n nginxssl --set controller.extraArgs.default-ssl-certificate=oigns/governancedomain-tls-cert --set controller.service.type=LoadBalancer --set controller.admissionWebhooks.enabled=false stable/ingress-nginx --version=3.34.0 - ``` - - The output will look similar to the following: - - ``` - NAME: nginx-dc-operator-ssl-lbr - LAST DEPLOYED: Wed Oct 21 04:02:35 2020 - NAMESPACE: nginxssl - STATUS: deployed - REVISION: 1 - TEST SUITE: None - NOTES: - The ingress-nginx controller has been installed. - It may take a few minutes for the LoadBalancer IP to be available. - You can watch the status by running 'kubectl --namespace nginxssl get services -o wide -w nginx-dc-operator-ssl-lbr-ingress-nginx-controller' - - An example Ingress that makes use of the controller: - - apiVersion: networking.k8s.io/v1beta1 - kind: Ingress - metadata: - annotations: - kubernetes.io/ingress.class: nginx - name: example - namespace: foo - spec: - rules: - - host: www.example.com - http: - paths: - - backend: - serviceName: exampleService - servicePort: 80 - path: / - # This section is only required if TLS is to be enabled for the Ingress - tls: - - hosts: - - www.example.com - secretName: example-tls - - If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: - - apiVersion: v1 - kind: Secret - metadata: - name: example-tls - namespace: foo - data: - tls.crt: - tls.key: - type: kubernetes.io/tls ``` - -### Setup Routing Rules for the Design Console ingress - -1. Setup routing rules by running the following commands: - - ``` - $ cd /weblogic-kubernetes-operator/kubernetes/samples/charts/design-console-ingress - $ cp values.yaml values.yaml.orig - $ vi values.yaml - ``` - - Edit `values.yaml` and ensure that `type=NGINX`, `tls=SSL`, `domainUID: governancedomain` and `secretName: governancedomain-tls-cert` are set, for example: - - ``` - $ cat values.yaml - # Copyright 2020 Oracle Corporation and/or its affiliates. - # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. - - # Default values for design-console-ingress. - # This is a YAML-formatted file. - # Declare variables to be passed into your templates. - - # Load balancer type. Supported values are: VOYAGER, NGINX + # Load balancer type. Supported values are: NGINX type: NGINX # Type of Configuration Supported Values are : NONSSL,SSL # tls: NONSSL @@ -301,37 +52,23 @@ If you are using a Managed Service for your Kubernetes cluster,for example Oracl wlsDomain: domainUID: governancedomain oimClusterName: oim_cluster - oimServerT3Port: 14001 - - # Voyager specific values - voyager: - # web port - webPort: 30320 - # stats port - statsPort: 30321 + oimServerT3Port: 14002 ``` ### Create the ingress 1. Run the following command to create the ingress: - ``` - $ cd /weblogic-kubernetes-operator - $ helm install governancedomain-nginx-designconsole kubernetes/samples/charts/design-console-ingress --namespace oigns --values kubernetes/samples/charts/design-console-ingress/values.yaml - ``` - - For example: - - ``` - $ cd /scratch/OIGDockerK8S/weblogic-kubernetes-operator - $ helm install governancedomain-nginx-designconsole kubernetes/samples/charts/design-console-ingress --namespace oigns --values kubernetes/samples/charts/design-console-ingress/values.yaml + ```bash + $ cd $WORKDIR + $ helm install governancedomain-nginx-designconsole kubernetes/design-console-ingress --namespace oigns --values kubernetes/design-console-ingress/values.yaml ``` The output will look similar to the following: ``` NAME: governancedomain-nginx-designconsole - LAST DEPLOYED: Wed Oct 21 04:12:00 2020 + Mon Nov 15 04:19:33 2021 NAMESPACE: oigns STATUS: deployed REVISION: 1 @@ -340,13 +77,13 @@ If you are using a Managed Service for your Kubernetes cluster,for example Oracl 1. Run the following command to show the ingress is created successfully: - ``` + ```bash $ kubectl describe ing governancedomain-nginx-designconsole -n ``` For example: - ``` + ```bash $ kubectl describe ing governancedomain-nginx-designconsole -n oigns ``` @@ -355,14 +92,14 @@ If you are using a Managed Service for your Kubernetes cluster,for example Oracl ``` Name: governancedomain-nginx-designconsole Namespace: oigns - Address: 10.106.181.99 + Address: Default backend: default-http-backend:80 () Rules: Host Path Backends ---- ---- -------- * - governancedomain-cluster-oim-cluster:14001 () - Annotations: kubernetes.io/ingress.class: nginx-designconsole + governancedomain-cluster-oim-cluster:14002 (10.244.2.103:14002) + Annotations: kubernetes.io/ingress.class: nginx meta.helm.sh/release-name: governancedomain-nginx-designconsole meta.helm.sh/release-namespace: oigns nginx.ingress.kubernetes.io/affinity: cookie @@ -375,11 +112,48 @@ If you are using a Managed Service for your Kubernetes cluster,for example Oracl Events: Type Reason Age From Message ---- ------ ---- ---- ------- - Normal CREATE 38s nginx-ingress-controller Ingress oigns/governancedomain-nginx-designconsole - Normal UPDATE 10s nginx-ingress-controller Ingress oigns/governancedomain-nginx-designconsole + Normal Sync 6s nginx-ingress-controller Scheduled for sync + ``` + +### Update the T3 channel + +1. Log in to the WebLogic Console using `https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console`. + +1. Navigate to **Environment**, click **Servers**, and then select **oim_server1**. + +1. Click **Protocols**, and then **Channels**. + +1. Click the default T3 channel called **T3Channel**. + +1. Click **Lock and Edit**. + +1. Set the **External Listen Address** to a worker node where `oim_server1` is running. + + **Note**: Use `kubectl get pods -n -o wide` to see the worker node it is running on. For example, below the `governancedomain-oim-server1` is running on `worker-node2`: + + ```bash + $ kubectl get pods -n oigns -o wide + NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES + governancedomain-adminserver 1/1 Running 0 33m 10.244.2.96 worker-node2 + governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 11d 10.244.2.45 worker-node2 + governancedomain-oim-server1 1/1 Running 0 31m 10.244.2.98 worker-node2 + governancedomain-soa-server1 1/1 Running 0 31m 10.244.2.97 worker-node2 + helper 1/1 Running 0 11d 10.244.2.30 worker-node2 + logstash-wls-f448b44c8-92l27 1/1 Running 0 7d23h 10.244.1.27 worker-node1 ``` + + +1. Set the **External Listen Port** to the ingress controller port. + +1. Click **Save**. + +1. Click **Activate Changes.** + + +### Restart the OIG domain + +Restart the domain for the above changes to take effect by following [Stopping and starting the administration server and managed servers]({{< relref "/oig/manage-oig-domains/domain-lifecycle#stopping-and-starting-the-administration-server-and-managed-servers" >}}). - ### Design Console Client It is possible to use Design Console from an on-premises install, or from a container image. @@ -390,19 +164,19 @@ The instructions below should be performed on the client where Design Console is 1. Import the CA certificate into the java keystore - If in [Generate a SSL Certificate](../using-the-design-console-with-nginx-ssl/#generate-ssl-certificate) you requested a certificate from a Certificate Authority (CA), then you must import the CA certificate (e.g cacert.crt) that signed your certificate, into the java truststore used by Design Console. + If in [Generate SSL Certificate]({{< relref "/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-K8S-ssl.md#generate-ssl-certificate">}}) you requested a certificate from a Certificate Authority (CA), then you must import the CA certificate (e.g cacert.crt) that signed your certificate, into the java truststore used by Design Console. - If in [Generate a SSL Certificate](../using-the-design-console-with-nginx-ssl/#generate-ssl-certificate) you generated a self-signed certicate (e.g tls.crt), you must import the self-signed certificate into the java truststore used by Design Console. + If in [Generate SSL Certificate]({{< relref "/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-K8S-ssl.md#generate-ssl-certificate">}}) you generated a self-signed certicate (e.g tls.crt), you must import the self-signed certificate into the java truststore used by Design Console. Import the certificate using the following command: - ``` + ```bash $ keytool -import -trustcacerts -alias dc -file -keystore $JAVA_HOME/jre/lib/security/cacerts ``` where `` is the CA certificate, or self-signed certicate. -1. Once complete follow [Login to the Design Console](../using-the-design-console-with-nginx-ssl/#login-to-the-design-console). +1. Once complete follow [Login to the Design Console](#login-to-the-design-console). #### Using a container image for Design Console @@ -412,111 +186,111 @@ The Design Console can be run from a container using X windows emulation. 1. Execute the following command to start a container to run Design Console: - ``` + ```bash $ docker run -u root --name oigdcbase -it bash ``` For example: - ``` - $ docker run -u root -it --name oigdcbase oracle/oig:12.2.1.4.0 bash + ```bash + $ docker run -u root -it --name oigdcbase oracle/oig:12.2.1.4.0-8-ol7-211022.0723 bash ``` This will take you into a bash shell inside the container: - ``` + ```bash bash-4.2# ``` 1. Inside the container set the proxy, for example: - ``` + ```bash bash-4.2# export https_proxy=http://proxy.example.com:80 ``` 1. Install the relevant X windows packages in the container: - ``` + ```bash bash-4.2# yum install libXext libXrender libXtst ``` 1. Execute the following outside the container to create a new Design Console image from the container: - ``` + ```bash $ docker commit ``` For example: - ``` + ```bash $ docker commit oigdcbase oigdc ``` 1. Exit the container bash session: - ``` + ```bash bash-4.2# exit ``` 1. Start a new container using the Design Console image: - ``` + ```bash $ docker run --name oigdc -it oigdc /bin/bash ``` This will take you into a bash shell for the container: - ``` + ```bash bash-4.2# ``` 1. Copy the Ingress CA certificate into the container - If in [Generate a SSL Certificate](../using-the-design-console-with-nginx-ssl/#generate-ssl-certificate) you requested a certificate from a Certificate Authority (CA), then you must copy the CA certificate (e.g cacert.crt) that signed your certificate, into the container + If in [Generate SSL Certificate]({{< relref "/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-K8S-ssl.md#generate-ssl-certificate">}}) you requested a certificate from a Certificate Authority (CA), then you must copy the CA certificate (e.g cacert.crt) that signed your certificate, into the container - If in [Generate a SSL Certificate](../using-the-design-console-with-nginx-ssl/#generate-ssl-certificate) you generated a self-signed certicate (e.g tls.crt), you must copy the self-signed certificate into the container + If in [Generate SSL Certificate]({{< relref "/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-K8S-ssl.md#generate-ssl-certificate">}}) you generated a self-signed certicate (e.g tls.crt), you must copy the self-signed certificate into the container Run the following command outside the container: - ``` - $ cd /ssl + ```bash + $ cd /ssl $ docker cp :/u01/jdk/jre/lib/security/ ``` For example: - ``` - $ cd /scratch/OIGDockerK8S/ssl + ```bash + $ cd /scratch/OIGK8S/ssl $ docker cp tls.crt oigdc:/u01/jdk/jre/lib/security/tls.crt - + ``` 1. Import the certificate using the following command: - ``` + ```bash bash-4.2# /u01/jdk/bin/keytool -import -trustcacerts -alias dc -file /u01/jdk/jre/lib/security/ -keystore /u01/jdk/jre/lib/security/cacerts ``` For example: - ``` + ```bash bash-4.2# /u01/jdk/bin/keytool -import -trustcacerts -alias dc -file /u01/jdk/jre/lib/security/tls.crt -keystore /u01/jdk/jre/lib/security/cacerts ``` 1. In the container run the following to export the DISPLAY: - ``` + ```bash $ export DISPLAY= ``` 1. Start the Design Console from the container: - ``` + ```bash bash-4.2# cd idm/designconsole bash-4.2# sh xlclient.sh ``` - The Design Console login should be displayed. Now follow [Login to the Design Console](../using-the-design-console-with-nginx-ssl/#login-to-the-design-console). + The Design Console login should be displayed. Now follow [Login to the Design Console](#login-to-the-design-console). @@ -529,15 +303,6 @@ The Design Console can be run from a container using X windows emulation. * `User ID`: `xelsysadm` * `Password`: ``. - where `` is as per the following: - - a) For NodePort: `https://:` - - where `` is the value passed in the command earlier, for example: `--set controller.service.nodePorts.http=30321` - - b) For LoadBalancer: `https://:` - - - -1. If successful the Design Console will be displayed. If the VNC session disappears then the connection failed so double check the connection details and try again. + where `` is where `` is `https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}`. +1. If successful the Design Console will be displayed. \ No newline at end of file diff --git a/docs-source/content/oig/configure-design-console/Using the design console with NGINX (non-SSL).md b/docs-source/content/oig/configure-design-console/Using the design console with NGINX (non-SSL).md index 787781f57..9aad61c39 100644 --- a/docs-source/content/oig/configure-design-console/Using the design console with NGINX (non-SSL).md +++ b/docs-source/content/oig/configure-design-console/Using the design console with NGINX (non-SSL).md @@ -6,202 +6,37 @@ description: "Configure Design Console with NGINX(non-SSL)." Configure an NGINX ingress (non-SSL) to allow Design Console to connect to your Kubernetes cluster. -{{% notice note %}} -Design Console is not installed as part of the OIG Kubernetes cluster so must be installed on a seperate client before following the steps below. -{{% /notice %}} - - -### Add the NGINX ingress using helm - -**Note**: If already using NGINX with non-SSL for OIG you can skip this section: - -1. Add the Helm chart repository for NGINX using the following command: - - ```bash - $ helm repo add stable https://kubernetes.github.io/ingress-nginx - ``` - - The output will look similar to the following: - - ```bash - "stable" has been added to your repositories - ``` -1. Update the repository using the following command: - - ```bash - $ helm repo update - ``` +1. [Prerequisites](#prerequisites) +1. [Setup routing rules for the Design Console ingress](#setup-routing-rules-for-the-design-console-ingress) +1. [Create the ingress](#create-the-ingress) +1. [Update the T3 channel](#update-the-t3-channel) +1. [Restart the OIG domain](#restart-the-oig-domain) +1. [Design Console client](#design-console-client) - The output will look similar to the following: - - ```bash - Hang tight while we grab the latest from your chart repositories... - ...Successfully got an update from the "stable" chart repository - Update Complete. Happy Helming! - ``` - -1. Create a Kubernetes namespace for NGINX by running the following command: - - ```bash - $ kubectl create namespace nginx - ``` - - The output will look similar to the following: - - ```bash - namespace/nginx created - ``` - -### Install NGINX ingress using helm - -Install a NGINX ingress for the Design Console: - -If you can connect directly to the master node IP address from a browser, then install NGINX with the `--set controller.service.type=NodePort` parameter. - -If you are using a Managed Service for your Kubernetes cluster,for example Oracle Kubernetes Engine (OKE) on Oracle Cloud Infrastructure (OCI), and connect from a browser to the Load Balancer IP address, then use the `--set controller.service.type=LoadBalancer` parameter. This instructs the Managed Service to setup a Load Balancer to direct traffic to the NGINX ingress. - -1. To install NGINX use the following helm command depending on if you are using `NodePort` or `LoadBalancer`: - - a) Using NodePort - - ``` - $ helm install nginx-dc-operator stable/ingress-nginx -n nginx --set controller.service.type=NodePort --set controller.admissionWebhooks.enabled=false --set controller.service.nodePorts.http=30315 --set controller.ingressClass=nginx-designconsole --version=3.34.0 - ``` - - The output will look similar to the following: + a. [Using an on-premises installed Design Console](#using-an-on-premises-installed-design-console) - ``` - NAME: nginx-dc-operator - LAST DEPLOYED: Tue Oct 20 07:31:08 2020 - NAMESPACE: nginx - STATUS: deployed - REVISION: 1 - TEST SUITE: None - NOTES: - The ingress-nginx controller has been installed. - Get the application URL by running these commands: - export HTTP_NODE_PORT=30315 - export HTTPS_NODE_PORT=$(kubectl --namespace nginx get services -o jsonpath="{.spec.ports[1].nodePort}" nginx-dc-operator-ingress-nginx-controller) - export NODE_IP=$(kubectl --namespace nginx get nodes -o jsonpath="{.items[0].status.addresses[1].address}") - - echo "Visit http://$NODE_IP:$HTTP_NODE_PORT to access your application via HTTP." - echo "Visit https://$NODE_IP:$HTTPS_NODE_PORT to access your application via HTTPS." - - An example Ingress that makes use of the controller: - - apiVersion: networking.k8s.io/v1beta1 - kind: Ingress - metadata: - annotations: - kubernetes.io/ingress.class: nginx-designconsole - name: example - namespace: foo - spec: - rules: - - host: www.example.com - http: - paths: - - backend: - serviceName: exampleService - servicePort: 80 - path: / - # This section is only required if TLS is to be enabled for the Ingress - tls: - - hosts: - - www.example.com - secretName: example-tls - - If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: - - apiVersion: v1 - kind: Secret - metadata: - name: example-tls - namespace: foo - data: - tls.crt: - tls.key: - type: kubernetes.io/tls - ``` + b. [Using a container image for Design Console](#using-a-container-image-for-design-console) - b) Using LoadBalancer +1. [Login to the Design Console](#login-to-the-design-console) - ``` - $ helm install nginx-dc-operator stable/ingress-nginx -n nginx --set controller.service.type=LoadBalancer --set controller.admissionWebhooks.enabled=false --version=3.34.0 - ``` +### Prerequisites - The output will look similar to the following: +If you haven't already configured an NGINX ingress controller (Non-SSL) for OIG, follow [Using an Ingress with NGINX (non-SSL)]({{< relref "/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-K8S">}}). - ``` - LAST DEPLOYED: Tue Oct 20 07:39:27 2020 - NAMESPACE: nginx - STATUS: deployed - REVISION: 1 - TEST SUITE: None - NOTES: - The ingress-nginx controller has been installed. - It may take a few minutes for the LoadBalancer IP to be available. - You can watch the status by running 'kubectl --namespace nginx get services -o wide -w nginx-dc-operator-ingress-nginx-controller' - - An example Ingress that makes use of the controller: - - apiVersion: networking.k8s.io/v1beta1 - kind: Ingress - metadata: - annotations: - kubernetes.io/ingress.class: nginx - name: example - namespace: foo - spec: - rules: - - host: www.example.com - http: - paths: - - backend: - serviceName: exampleService - servicePort: 80 - path: / - # This section is only required if TLS is to be enabled for the Ingress - tls: - - hosts: - - www.example.com - secretName: example-tls - - If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: - - apiVersion: v1 - kind: Secret - metadata: - name: example-tls - namespace: foo - data: - tls.crt: - tls.key: - type: kubernetes.io/tls - ``` +Make sure you know the master hostname and ingress port for NGINX before proceeding e.g `http://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}`. -### Setup Routing Rules for the Design Console ingress +### Setup routing rules for the Design Console ingress 1. Setup routing rules by running the following commands: + ```bash + $ cd $WORKDIR/kubernetes/design-console-ingress ``` - $ cd /weblogic-kubernetes-operator/kubernetes/samples/charts/design-console-ingress - $ cp values.yaml values.yaml.orig - $ vi values.yaml - ``` - - Edit `values.yaml` and ensure that `type: NGINX`, `tls: NONSSL` and `domainUID: governancedomain` are set, for example: + + Edit `values.yaml` and ensure that `tls: NONSSL` and `domainUID: governancedomain` are set, for example: ``` - $ cat values.yaml - # Copyright 2020 Oracle Corporation and/or its affiliates. - # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. - - # Default values for design-console-ingress. - # This is a YAML-formatted file. - # Declare variables to be passed into your templates. - - # Load balancer type. Supported values are: VOYAGER, NGINX + # Load balancer type. Supported values are: NGINX type: NGINX # Type of Configuration Supported Values are : NONSSL,SSL # tls: NONSSL @@ -214,37 +49,27 @@ If you are using a Managed Service for your Kubernetes cluster,for example Oracl wlsDomain: domainUID: governancedomain oimClusterName: oim_cluster - oimServerT3Port: 14001 - - # Voyager specific values - voyager: - # web port - webPort: 30320 - # stats port - statsPort: 30321 + oimServerT3Port: 14002 ``` ### Create the ingress 1. Run the following command to create the ingress: + ```bash + $ cd $WORKDIR + $ helm install governancedomain-nginx-designconsole kubernetes/design-console-ingress --namespace oigns --values kubernetes/design-console-ingress/values.yaml ``` - $ cd /weblogic-kubernetes-operator - $ helm install governancedomain-nginx-designconsole kubernetes/samples/charts/design-console-ingress --namespace oigns --values kubernetes/samples/charts/design-console-ingress/values.yaml - ``` + + **Note**: If using Kubernetes 1.18 then add `--version=3.34.0` to the end of command. For example: - ``` - $ cd /scratch/OIGDockerK8S/weblogic-kubernetes-operator - $ helm install governancedomain-nginx-designconsole kubernetes/samples/charts/design-console-ingress --namespace oigns --values kubernetes/samples/charts/design-console-ingress/values.yaml - ``` - The output will look similar to the following: ``` NAME: governancedomain-nginx-designconsole - LAST DEPLOYED: Tue Oct 20 08:01:47 2020 + LAST DEPLOYED: Mon Nov 15 06:07:09 2021 NAMESPACE: oigns STATUS: deployed REVISION: 1 @@ -253,29 +78,29 @@ If you are using a Managed Service for your Kubernetes cluster,for example Oracl 1. Run the following command to show the ingress is created successfully: - ``` + ```bash $ kubectl describe ing governancedomain-nginx-designconsole -n ``` For example: - ``` + ```bash $ kubectl describe ing governancedomain-nginx-designconsole -n oigns ``` The output will look similar to the following: - ``` + ``` Name: governancedomain-nginx-designconsole Namespace: oigns - Address: 10.99.240.21 + Address: Default backend: default-http-backend:80 () Rules: Host Path Backends ---- ---- -------- * - governancedomain-cluster-oim-cluster:14001 () - Annotations: kubernetes.io/ingress.class: nginx-designconsole + governancedomain-cluster-oim-cluster:14002 (10.244.1.25:14002) + Annotations: kubernetes.io/ingress.class: nginx meta.helm.sh/release-name: governancedomain-nginx-designconsole meta.helm.sh/release-namespace: oigns nginx.ingress.kubernetes.io/affinity: cookie @@ -283,12 +108,50 @@ If you are using a Managed Service for your Kubernetes cluster,for example Oracl Events: Type Reason Age From Message ---- ------ ---- ---- ------- - Normal CREATE 117s nginx-ingress-controller Ingress oigns/governancedomain-nginx-designconsole - Normal UPDATE 64s nginx-ingress-controller Ingress oigns/governancedomain-nginx-designconsole + Normal Sync 13s nginx-ingress-controller Scheduled for sync ``` +### Update the T3 channel + +1. Log in to the WebLogic Console using `http://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console`. + +1. Navigate to **Environment**, click **Servers**, and then select **oim_server1**. + +1. Click **Protocols**, and then **Channels**. + +1. Click the default T3 channel called **T3Channel**. + +1. Click **Lock and Edit**. + +1. Set the **External Listen Address** to a worker node where `oim_server1` is running. + + **Note**: Use `kubectl get pods -n -o wide` to see the worker node it is running on. For example, below the `governancedomain-oim-server1` is running on `worker-node2`: -### Design Console Client + ```bash + $ kubectl get pods -n oigns -o wide + NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES + governancedomain-adminserver 1/1 Running 0 33m 10.244.2.96 worker-node2 + governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 11d 10.244.2.45 worker-node2 + governancedomain-oim-server1 1/1 Running 0 31m 10.244.2.98 worker-node2 + governancedomain-soa-server1 1/1 Running 0 31m 10.244.2.97 worker-node2 + helper 1/1 Running 0 11d 10.244.2.30 worker-node2 + logstash-wls-f448b44c8-92l27 1/1 Running 0 7d23h 10.244.1.27 worker-node1 + ``` + + +1. Set the **External Listen Port** to the ingress controller port. + +1. Click **Save**. + +1. Click **Activate Changes.** + + +### Restart the OIG domain + +Restart the domain for the above changes to take effect by following [Stopping and starting the administration server and managed servers]({{< relref "/oig/manage-oig-domains/domain-lifecycle#stopping-and-starting-the-administration-server-and-managed-servers" >}}). + + +### Design Console client It is possible to use Design Console from an on-premises install, or from a container image. @@ -296,7 +159,7 @@ It is possible to use Design Console from an on-premises install, or from a cont 1. Install Design Console on an on-premises machine -1. Follow [Login to the Design Console](../using-the-design-console-with-nginx-ssl/#login-to-the-design-console). +1. Follow [Login to the Design Console](#login-to-the-design-console). #### Using a container image for Design Console @@ -306,78 +169,78 @@ The Design Console can be run from a container using X windows emulation. 1. Execute the following command to start a container to run Design Console: - ``` + ```bash $ docker run -u root --name oigdcbase -it bash ``` For example: - ``` - $ docker run -u root -it --name oigdcbase oracle/oig:12.2.1.4.0 bash + ```bash + $ docker run -u root -it --name oigdcbase oracle/oig:12.2.1.4.0-8-ol7-211022.0723 bash ``` This will take you into a bash shell inside the container: - ``` + ```bash bash-4.2# ``` 1. Inside the container set the proxy, for example: - ``` + ```bash bash-4.2# export https_proxy=http://proxy.example.com:80 ``` 1. Install the relevant X windows packages in the container: - ``` + ```bash bash-4.2# yum install libXext libXrender libXtst ``` 1. Execute the following outside the container to create a new Design Console image from the container: - ``` + ```bash $ docker commit ``` For example: - ``` + ```bash $ docker commit oigdcbase oigdc ``` 1. Exit the container bash session: - ``` + ```bash bash-4.2# exit ``` 1. Start a new container using the Design Console image: - ``` + ```bash $ docker run --name oigdc -it oigdc /bin/bash ``` This will take you into a bash shell for the container: - ``` + ```bash bash-4.2# ``` 1. In the container run the following to export the DISPLAY: - ``` + ```bash $ export DISPLAY= ``` 1. Start the Design Console from the container: - ``` + ```bash bash-4.2# cd idm/designconsole bash-4.2# sh xlclient.sh ``` - The Design Console login should be displayed. Now follow [Login to the Design Console](../using-the-design-console-with-nginx-ssl/#login-to-the-design-console). + The Design Console login should be displayed. Now follow [Login to the Design Console](#login-to-the-design-console). ### Login to the Design Console @@ -388,16 +251,9 @@ The Design Console can be run from a container using X windows emulation. * `User ID`: `xelsysadm` * `Password`: ``. - where `` is as per the following: - - a) For NodePort: `http://:` - - where `` is the value passed in the command earlier, for example: `--set controller.service.nodePorts.http=30315` - - b) For LoadBalancer: `http://:` - - + where `` is `http://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}` + -1. If successful the Design Console will be displayed. If the VNC session disappears then the connection failed so double check the connection details and try again. +1. If successful the Design Console will be displayed. diff --git a/docs-source/content/oig/configure-design-console/Using the design console with Voyager (SSL).md b/docs-source/content/oig/configure-design-console/Using the design console with Voyager (SSL).md deleted file mode 100644 index d431944bb..000000000 --- a/docs-source/content/oig/configure-design-console/Using the design console with Voyager (SSL).md +++ /dev/null @@ -1,473 +0,0 @@ ---- -title: "d. Using Design Console with Voyager(SSL)" -weight: 4 -description: "Configure Design Console with Voyager(SSL)." ---- - -Configure a Voyager ingress (SSL) to allow Design Console to connect to your Kubernetes cluster. - -#### Generate SSL Certificate - -**Note**: If already using Voyager with SSL for OIG you can skip this section: - -1. Generate a private key and certificate signing request (CSR) using a tool of your choice. Send the CSR to your certificate authority (CA) to generate the certificate. - - If you want to use a certificate for testing purposes you can generate a self signed certificate using openssl: - - ``` - $ mkdir /ssl - $ cd /ssl - $ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=" - ``` - - For example: - - ``` - $ mkdir /scratch/OIGDockerK8S/ssl - $ cd /scratch/OIGDockerK8S/ssl - $ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=masternode.example.com" - ``` - - **Note**: The `CN` should match the host.domain of the master node in order to prevent hostname problems during certificate verification. - - The output will look similar to the following: - - ``` - $ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=masternode.example.com" - Generating a 2048 bit RSA private key - ..........................................+++ - .......................................................................................................+++ - writing new private key to 'tls.key' - ----- - $ - ``` - -#### Create a Kubernetes Secret for SSL - -**Note**: If already using Voyager with SSL for OIG you can skip this section: - -1. Create a secret for SSL containing the SSL certificate by running the following command: - - ``` - $ kubectl -n oigns create secret tls -tls-cert --key /tls.key --cert /tls.crt - ``` - - For example: - - ``` - $ kubectl -n oigns create secret tls governancedomain-tls-cert --key /scratch/OIGDockerK8S/ssl/tls.key --cert /scratch/OIGDockerK8S/ssl/tls.crt - ``` - - The output will look similar to the following: - - ``` - secret/governancedomain-tls-cert created - ``` - - Confirm that the secret is created by running the following command: - - ``` - $ kubectl get secret governancedomain-tls-cert -o yaml -n oigns - ``` - - The output will look similar to the following: - - ``` - apiVersion: v1 - data: - tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURGVENDQWYyZ0F3SUJBZ0lKQUl3ZjVRMWVxZnljTUEwR0NTcUdTSWIzRFFFQkN3VUFNQ0V4SHpBZEJnTlYKQkFNTUZtUmxiakF4WlhadkxuVnpMbTl5WVdOc1pTNWpiMjB3SGhjTk1qQXdPREV3TVRReE9UUXpXaGNOTWpFdwpPREV3TVRReE9UUXpXakFoTVI4d0hRWURWUVFEREJaa1pXNHdNV1YyYnk1MWN5NXZjbUZqYkdVdVkyOXRNSUlCCklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUEyY0lpVUhwcTRVZzBhaGR6aXkycHY2cHQKSVIza2s5REd2eVRNY0syaWZQQ2dtUU5CdHV6VXNFN0l4c294eldITmU5RFpXRXJTSjVON3FYYm1lTzJkMVd2NQp1aFhzbkFTbnkwY1NLUE9xOUNZVDNQSlpDVk1MK0llZVFKdnhaVjZaWWU4V2FFL1NQSGJzczRjYy9wcG1mc3pxCnErUi83cXEyMm9ueHNHaE9vQ1h1TlQvMFF2WXVzMnNucGtueWRKRHUxelhGbDREYkFIZGMvamNVK0NPWWROeS8KT3Iza2JIV0FaTkR4OWxaZUREOTRmNXZLbGJwMy9rcUF2V0FkSVJZa2UrSmpNTHg0VHo2ZlM0VXoxbzdBSTVuSApPQ1ZMblV5U0JkaGVuWTNGNEdFU0wwbnorVlhFWjRWVjRucWNjRmo5cnJ0Q29pT1BBNlgvNGdxMEZJbi9Qd0lECkFRQUJvMUF3VGpBZEJnTlZIUTRFRmdRVWw1VnVpVDBDT0xGTzcxMFBlcHRxSC9DRWZyY3dId1lEVlIwakJCZ3cKRm9BVWw1VnVpVDBDT0xGTzcxMFBlcHRxSC9DRWZyY3dEQVlEVlIwVEJBVXdBd0VCL3pBTkJna3Foa2lHOXcwQgpBUXNGQUFPQ0FRRUFXdEN4b2ZmNGgrWXZEcVVpTFFtUnpqQkVBMHJCOUMwL1FWOG9JQzJ3d1hzYi9KaVNuMHdOCjNMdHppejc0aStEbk1yQytoNFQ3enRaSkc3NVluSGRKcmxQajgzVWdDLzhYTlFCSUNDbTFUa3RlVU1jWG0reG4KTEZEMHpReFhpVzV0N1FHcWtvK2FjeN3JRMXlNSE9HdVVkTTZETzErNXF4cTdFNXFMamhyNEdKejV5OAoraW8zK25UcUVKMHFQOVRocG96RXhBMW80OEY0ZHJybWdqd3ROUldEQVpBYmYyV1JNMXFKWXhxTTJqdU1FQWNsCnFMek1TdEZUQ2o1UGFTQ0NUV1VEK3ZlSWtsRWRpaFdpRm02dzk3Y1diZ0lGMlhlNGk4L2szMmF1N2xUTDEvd28KU3Q2dHpsa20yV25uUFlVMzBnRURnVTQ4OU02Z1dybklpZz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K - tls.key: LS0tLS1CRUdJQVRFIEtFWS0tLS0tCk1JSUV1d0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktVd2dnU2hBZ0VBQW9JQkFRRFp3aUpRZW1yaFNEUnEKRjNPTExhbS9xbTBoSGVTVDBNYS9KTXh3cmFKODhLQ1pBMEcyN05Td1Rzakd5akhOWWMxNzBObFlTdEluazN1cApkdVo0N1ozVmEvbTZGZXljQktmTFJ4SW84NnIwSmhQYzhsa0pVd3Y0aDU1QW0vRmxYcGxoN3hab1Q5SThkdXl6Cmh4eittbVorek9xcjVIL3VxcmJhaWZHd2FFNmdKZTQxUC9SQzlpNnpheWVtU2ZKMGtPN1hOY1dYZ05zQWQxeisKTnhUNEk1aDAzTDg2dmVSc2RZQmswUEgyVmw0TVAzaC9tOHFWdW5mK1NvQzlZQjBoRmlSNzRtTXd2SGhQUHA5TApoVFBXanNBam1jYzRKVXVkVEpJRjJGNmRqY1hnWVJJdlNmUDVWY1JuaFZYaWVweHdXUDJ1dTBLaUk0OERwZi9pCkNyUVVpZjgvQWdNQkFBRUNnZjl6cnE2TUVueTFNYWFtdGM2c0laWU1QSDI5R2lSVVlwVXk5bG1sZ3BqUHh3V0sKUkRDay9Td0FmZG9yd1Q2ejNVRk1oYWJ4UU01a04vVjZFYkJlamQxTGhCRW15bjdvWTVEQWJRRTR3RG9SZWlrVApONndWU0FrVC92Z1RXc1RqRlY1bXFKMCt6U2ppOWtySkZQNVNRN1F2cUswQ3BHRlNhVjY2dW8ycktiNmJWSkJYCkxPZmZPMytlS0tVazBaTnE1Q1NVQk9mbnFoNVFJSGdpaDNiMTRlNjBDdGhYcEh6bndrNWhaMHBHZE9BQm9aTkoKZ21lanUyTEdzVWxXTjBLOVdsUy9lcUllQzVzQm9jaWlocmxMVUpGWnpPRUV6LzErT2cyemhmT29yTE9rMTIrTgpjQnV0cTJWQ2I4ZFJDaFg1ZzJ0WnBrdzgzcXN5RSt3M09zYlQxa0VDZ1lFQTdxUnRLWGFONUx1SENvWlM1VWhNCm9Hak1WcnYxTEg0eGNhaDJITmZnMksrMHJqQkJONGpkZkFDMmF3R3ZzU1EyR0lYRzVGYmYyK0pwL1kxbktKOEgKZU80MzNLWVgwTDE4NlNNLzFVay9HSEdTek1CWS9KdGR6WkRrbTA4UnBwaTl4bExTeDBWUWtFNVJVcnJJcTRJVwplZzBOM2RVTHZhTVl1UTBrR2dncUFETUNnWUVBNlpqWCtjU2VMZ1BVajJENWRpUGJ1TmVFd2RMeFNPZDFZMUFjCkUzQ01YTWozK2JxQ3BGUVIrTldYWWVuVmM1QiszajlSdHVnQ0YyTkNSdVdkZWowalBpL243UExIRHdCZVY0bVIKM3VQVHJmamRJbFovSFgzQ2NjVE94TmlaajU4VitFdkRHNHNHOGxtRTRieStYRExIYTJyMWxmUk9sUVRMSyswVgpyTU93eU1VQ2dZRUF1dm14WGM4NWxZRW9hU0tkU0cvQk9kMWlYSUtmc2VDZHRNT2M1elJ0UXRsSDQwS0RscE54CmxYcXBjbVc3MWpyYzk1RzVKNmE1ZG5xTE9OSFZoWW8wUEpmSXhPU052RXI2MTE5NjRBMm5sZXRHYlk0M0twUkEKaHBPRHlmdkZoSllmK29kaUJpZFUyL3ZBMCtUczNSUHJzRzBSOUVDOEZqVDNaZVhaNTF1R0xPa0NnWUFpTmU0NwplQjRxWXdrNFRsMTZmZG5xQWpaQkpLR05xY2c1V1R3alpMSkp6R3owdCtuMkl4SFd2WUZFSjdqSkNmcHFsaDlqCmlDcjJQZVV3K09QTlNUTG1JcUgydzc5L1pQQnNKWXVsZHZ4RFdGVWFlRXg1aHpkNDdmZlNRRjZNK0NHQmthYnIKVzdzU3R5V000ZFdITHpDaGZMS20yWGJBd0VqNUQrbkN1WTRrZVFLQmdFSkRHb0puM1NCRXcra2xXTE85N09aOApnc3lYQm9mUW1lRktIS2NHNzFZUFhJbTRlV1kyUi9KOCt5anc5b1FJQ3o5NlRidkdSZEN5QlJhbWhoTmFGUzVyCk9MZUc0ejVENE4zdThUc0dNem9QcU13KzBGSXJiQ3FzTnpGWTg3ekZweEdVaXZvRWZLNE82YkdERTZjNHFqNGEKNmlmK0RSRSt1TWRMWTQyYTA3ekoKLS0tLS1FTkQgUFJJVkFURSBLRVktLS0tLQo= - kind: Secret - metadata: - creationTimestamp: "2020-08-10T14:22:52Z" - managedFields: - - apiVersion: v1 - fieldsType: FieldsV1 - fieldsV1: - f:data: - .: {} - f:tls.crt: {} - f:tls.key: {} - f:type: {} - manager: kubectl - operation: Update - time: "2020-08-10T14:22:52Z" - name: governancedomain-tls-cert - namespace: oigns - resourceVersion: "3722477" - selfLink: /api/v1/namespaces/oigns/secrets/governancedomain-tls-cert - uid: 596fe0fe-effd-4eb9-974d-691da3a3b15a - type: kubernetes.io/tls - ``` - - -### Add the Voyager ingress using helm - -**Note**: If already using Voyager with SSL for OIG you can skip this section: - -1. Add the Helm chart repository for Voyager using the following command: - - ```bash - $ helm repo add appscode https://charts.appscode.com/stable - ``` - - The output will look similar to the following: - - ```bash - "appscode" has been added to your repositories - ``` -1. Update the repository using the following command: - - ```bash - $ helm repo update - ``` - - The output will look similar to the following: - - ```bash - Hang tight while we grab the latest from your chart repositories... - ...Successfully got an update from the "appscode" chart repository - Update Complete. Happy Helming! - ``` - -1. Create a namespace for Voyager: - - ``` - $ kubectl create namespace voyagerssl - ``` - - The output will look similar to the following: - - ``` - namespace/voyagerssl created - ``` - - - -### Install Voyager ingress using helm - -1. Run the following command to install the ingress: - - ``` - $ helm install voyager-designconsole-operator appscode/voyager --version v12.0.0-rc.1 --namespace voyagerssl --set cloudProvider=baremetal --set apiserver.enableValidatingWebhook=false - ``` - - **Note**: For bare metal Kubernetes use `--set cloudProvider=baremetal`. If using a managed Kubernetes service then the value should be set for your specific service as per the [Voyager](https://voyagermesh.com/docs/6.0.0/setup/install/) install guide. - - The output will look similar to the following: - - ``` - NAME: voyager-designconsole-operator - LAST DEPLOYED: Wed Oct 21 09:24:55 2020 - NAMESPACE: voyagerssl - STATUS: deployed - REVISION: 1 - TEST SUITE: None - NOTES: - Set cloudProvider for installing Voyager - - To verify that Voyager has started, run: - - kubectl --namespace=voyagerssl get deployments -l "release=voyager-designconsole-operator, app=voyager" - ``` - - -### Setup Routing Rules for the Design Console ingress - -1. Setup routing rules by running the following commands: - - ``` - $ cd /weblogic-kubernetes-operator/kubernetes/samples/charts/design-console-ingress - $ cp values.yaml values.yaml.orig - $ vi values.yaml - ``` - - Edit `values.yaml` and ensure that `type: VOYAGER`, `tls: SSL`, `secretName:governancedomain-tls-cert` and `domainUID: governancedomain` are set, and that `webPort` and `statsPort` are set to free ports, for example: - - ``` - $ cat values.yaml - # Copyright 2020 Oracle Corporation and/or its affiliates. - # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. - - # Default values for design-console-ingress. - # This is a YAML-formatted file. - # Declare variables to be passed into your templates. - - # Load balancer type. Supported values are: VOYAGER, NGINX - type: VOYAGER - # Type of Configuration Supported Values are : NONSSL,SSL - # tls: NONSSL - tls: SSL - # TLS secret name if the mode is SSL - secretName: governancedomain-tls-cert - - - # WLS domain as backend to the load balancer - wlsDomain: - domainUID: governancedomain - oimClusterName: oim_cluster - oimServerT3Port: 14001 - - # Voyager specific values - voyager: - # web port - webPort: 30330 - # stats port - statsPort: 30331 - ``` - -### Create the ingress - -1. Run the following command to create the ingress: - - ``` - $ cd /weblogic-kubernetes-operator - $ helm install governancedomain-voyager-designconsole kubernetes/samples/charts/design-console-ingress --namespace oigns --values kubernetes/samples/charts/design-console-ingress/values.yaml - ``` - - For example: - - ``` - $ cd /scratch/OIGDockerK8S/weblogic-kubernetes-operator - $ helm install governancedomain-voyager-designconsole kubernetes/samples/charts/design-console-ingress --namespace oigns --values kubernetes/samples/charts/design-console-ingress/values.yaml - ``` - - The output will look similar to the following: - - ``` - NAME: governancedomain-voyager-designconsole - LAST DEPLOYED: Wed Oct 21 09:59:43 2020 - NAMESPACE: oigns - STATUS: deployed - REVISION: 1 - TEST SUITE: None - ``` - -1. Run the following command to show the ingress is created successfully: - - ``` - $ kubectl get ingress.voyager.appscode.com -n - ``` - - For example: - - ``` - $ kubectl get ingress.voyager.appscode.com -n oigns - ``` - - The output will look similar to the following: - - ``` - NAME HOSTS LOAD_BALANCER_IP AGE - governancedomain-voyager-designconsole * 10s - ``` - -1. Return details of the ingress using the following command: - - ``` - $ kubectl describe ingress.voyager.appscode.com governancedomain-voyager-designconsole -n oigns - ``` - - The output will look similar to the following: - - ``` - Name: governancedomain-voyager-designconsole - Namespace: oigns - Labels: app.kubernetes.io/managed-by=Helm - weblogic.resourceVersion=domain-v2 - Annotations: ingress.appscode.com/affinity: cookie - ingress.appscode.com/stats: true - ingress.appscode.com/type: NodePort - meta.helm.sh/release-name: governancedomain-voyager-designconsole - meta.helm.sh/release-namespace: oigns - API Version: voyager.appscode.com/v1beta1 - Kind: Ingress - Metadata: - Creation Timestamp: 2020-10-21T09:26:48Z - Generation: 1 - Resource Version: 15430914 - Self Link: /apis/voyager.appscode.com/v1beta1/namespaces/oigns/ingresses/governancedomain-voyager-designconsole - UID: 89f42060-c8e6-470f-b661-14b9969fe1aa - Spec: - Frontend Rules: - Port: 443 - Rules: - http-request set-header WL-Proxy-SSL true - Rules: - Host: * - Http: - Node Port: 30330 - Paths: - Backend: - Service Name: governancedomain-cluster-oim-cluster - Service Port: 14001 - Path: / - Tls: - Hosts: - * - Secret Name: dc-tls-cert - Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal ServiceReconcileSuccessful 54m voyager-operator Successfully patched NodePort Service voyager-governancedomain-voyager-designconsole - Normal DeploymentReconcileSuccessful 54m voyager-operator Successfully patched HAProxy Deployment voyager-governancedomain-voyager-designconsole - Normal DeploymentReconcileSuccessful 44m voyager-operator Successfully patched HAProxy Deployment voyager-governancedomain-voyager-designconsole - ``` - -### Design Console Client - -It is possible to use Design Console from an on-premises install, or from a container image. - -#### Using an on-premises installed Design Console - -The instructions below should be performed on the client where Design Console is installed. - -1. Import the CA certificate into the java keystore - - If in [Generate a SSL Certificate](../using-the-design-console-with-voyager-ssl/#generate-ssl-certificate) you requested a certificate from a Certificate Authority (CA), then you must import the CA certificate (e.g cacert.crt) that signed your certificate, into the java truststore used by Design Console. - - If in [Generate a SSL Certificate](../using-the-design-console-with-voyager-ssl/#generate-ssl-certificate) you generated a self-signed certicate (e.g tls.crt), you must import the self-signed certificate into the java truststore used by Design Console. - - Import the certificate using the following command: - - ``` - $ keytool -import -trustcacerts -alias dc -file -keystore $JAVA_HOME/jre/lib/security/cacerts - ``` - - where `` is the CA certificate, or self-signed certicate. - -1. Once complete follow [Login to the Design Console](../using-the-design-console-with-voyager-ssl/#login-to-the-design-console). - -#### Using a container image for Design Console - -The Design Console can be run from a container using X windows emulation. - -1. On the parent machine where the Design Console is to be displayed, run `xhost+`. - -1. Execute the following command to start a container to run Design Console: - - ``` - $ docker run -u root --name oigdcbase -it bash - ``` - - For example: - - ``` - $ docker run -u root -it --name oigdcbase oracle/oig:12.2.1.4.0 bash - ``` - - This will take you into a bash shell inside the container: - - ``` - bash-4.2# - ``` - -1. Inside the container set the proxy, for example: - - ``` - bash-4.2# export https_proxy=http://proxy.example.com:80 - ``` - -1. Install the relevant X windows packages in the container: - - ``` - bash-4.2# yum install libXext libXrender libXtst - ``` - -1. Execute the following outside the container to create a new Design Console image from the container: - - ``` - $ docker commit - ``` - - For example: - - ``` - $ docker commit oigdcbase oigdc - ``` - -1. Exit the container bash session: - - ``` - bash-4.2# exit - ``` - -1. Start a new container using the Design Console image: - - ``` - $ docker run --name oigdc -it oigdc /bin/bash - ``` - - This will take you into a bash shell for the container: - - ``` - bash-4.2# - ``` - -1. Copy the Ingress CA certificate into the container - - If in [Generate a SSL Certificate](../using-the-design-console-with-voyager-ssl/#generate-ssl-certificate) you requested a certificate from a Certificate Authority (CA), then you must copy the CA certificate (e.g cacert.crt) that signed your certificate, into the container - - If in [Generate a SSL Certificate](../using-the-design-console-with-voyager-ssl/#generate-ssl-certificate) you generated a self-signed certicate (e.g tls.crt), you must copy the self-signed certificate into the container - - Run the following command outside the container: - - ``` - $ cd /ssl - $ docker cp :/u01/jdk/jre/lib/security/ - ``` - - For example: - - ``` - $ cd /scratch/OIGDockerK8S/ssl - $ docker cp tls.crt oigdc:/u01/jdk/jre/lib/security/tls.crt - - -1. Import the certificate using the following command: - - ``` - bash-4.2# /u01/jdk/bin/keytool -import -trustcacerts -alias dc -file /u01/jdk/jre/lib/security/ -keystore /u01/jdk/jre/lib/security/cacerts - ``` - - For example: - - ``` - bash-4.2# /u01/jdk/bin/keytool -import -trustcacerts -alias dc -file /u01/jdk/jre/lib/security/tls.crt -keystore /u01/jdk/jre/lib/security/cacerts - ``` - - -1. In the container run the following to export the DISPLAY: - - ``` - $ export DISPLAY= - ``` - -1. Start the Design Console from the container: - - ``` - bash-4.2# cd idm/designconsole - bash-4.2# sh xlclient.sh - ``` - - The Design Console login should be displayed. Now follow [Login to the Design Console](../using-the-design-console-with-voyager-ssl/#login-to-the-design-console). - - - - -#### Login to the Design Console - -1. Launch the Design Console and in the Oracle Identity Manager Design Console login page enter the following details: - - Enter the following details and click Login: - * `Server URL`: `` - * `User ID`: `xelsysadm` - * `Password`: ``. - - where `` is `http://:` - - `` is the value passed for webPort in the `values.yaml` earlier, for example: `30330` - - -1. If successful the Design Console will be displayed. If the VNC session disappears then the connection failed so double check the connection details and try again. - diff --git a/docs-source/content/oig/configure-design-console/Using the design console with Voyager (non-SSL).md b/docs-source/content/oig/configure-design-console/Using the design console with Voyager (non-SSL).md deleted file mode 100644 index 1d951fb56..000000000 --- a/docs-source/content/oig/configure-design-console/Using the design console with Voyager (non-SSL).md +++ /dev/null @@ -1,349 +0,0 @@ ---- -title: "c. Using Design Console with Voyager(non-SSL)" -weight: 3 -description: "Configure Design Console with Voyager(non-SSL)." ---- - -Configure a Voyager ingress (non-SSL) to allow Design Console to connect to your Kubernetes cluster. - -### Add the Voyager ingress using helm - -**Note**: If already using Voyager with non-SSL for OIG you can skip this section: - -1. Add the Helm chart repository for Voyager using the following command: - - ```bash - $ helm repo add appscode https://charts.appscode.com/stable - ``` - - The output will look similar to the following: - - ```bash - "appscode" has been added to your repositories - ``` -1. Update the repository using the following command: - - ```bash - $ helm repo update - ``` - - The output will look similar to the following: - - ```bash - Hang tight while we grab the latest from your chart repositories... - ...Successfully got an update from the "appscode" chart repository - Update Complete. Happy Helming! - ``` - -1. Create a namespace for Voyager: - - ``` - $ kubectl create namespace voyager - ``` - - The output will look similar to the following: - - ``` - namespace/voyager created - ``` - -### Install Voyager ingress using helm - -``` - $ helm install voyager-designconsole-operator appscode/voyager --version v12.0.0-rc.1 --namespace voyager --set cloudProvider=baremetal --set apiserver.enableValidatingWebhook=false - ``` - - **Note**: For bare metal Kubernetes use `--set cloudProvider=baremetal`. If using a managed Kubernetes service then the value should be set for your specific service as per the [Voyager](https://voyagermesh.com/docs/6.0.0/setup/install/) install guide. - - The output will look similar to the following: - - ``` - NAME: voyager-designconsole-operator - LAST DEPLOYED: Wed Oct 21 08:31:32 2020 - NAMESPACE: voyager - STATUS: deployed - REVISION: 1 - TEST SUITE: None - NOTES: - Set cloudProvider for installing Voyager - - To verify that Voyager has started, run: - - kubectl --namespace=voyager get deployments -l "release=voyager-designconsole-operator, app=voyager" - ``` - -### Setup Routing Rules for the Design Console ingress - -1. Setup routing rules by running the following commands: - - ``` - $ cd /weblogic-kubernetes-operator/kubernetes/samples/charts/design-console-ingress - $ cp values.yaml values.yaml.orig - $ vi values.yaml - ``` - - Edit `values.yaml` and ensure that `type=VOYAGER`, `tls=NONSSL` and `domainUID: governancedomain are set, and that `webPort` and `statsPort` are set to free ports, for example: - - ``` - $ cat values.yaml - # Copyright 2020 Oracle Corporation and/or its affiliates. - # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. - - # Default values for design-console-ingress. - # This is a YAML-formatted file. - # Declare variables to be passed into your templates. - - # Load balancer type. Supported values are: VOYAGER, NGINX - type: VOYAGER - # Type of Configuration Supported Values are : NONSSL,SSL - # tls: NONSSL - tls: NONSSL - # TLS secret name if the mode is SSL - secretName: dc-tls-cert - - - # WLS domain as backend to the load balancer - wlsDomain: - domainUID: governancedomain - oimClusterName: oim_cluster - oimServerT3Port: 14001 - - # Voyager specific values - voyager: - # web port - webPort: 30325 - # stats port - statsPort: 30326 - ``` - -### Create the ingress - -1. Run the following command to create the ingress: - - ``` - $ cd /weblogic-kubernetes-operator - $ helm install governancedomain-voyager-designconsole kubernetes/samples/charts/design-console-ingress --namespace oigns --values kubernetes/samples/charts/design-console-ingress/values.yaml - ``` - - For example: - - ``` - $ cd /scratch/OIGDockerK8S/weblogic-kubernetes-operator - $ helm install governancedomain-voyager-designconsole kubernetes/samples/charts/design-console-ingress --namespace oigns --values kubernetes/samples/charts/design-console-ingress/values.yaml - ``` - - The output will look similar to the following: - - ``` - NAME: governancedomain-voyager-designconsole - LAST DEPLOYED: Wed Oct 21 08:36:03 2020 - NAMESPACE: oigns - STATUS: deployed - REVISION: 1 - TEST SUITE: None - - ``` - -1. Run the following command to show the ingress is created successfully: - - ``` - $ kubectl get ingress.voyager.appscode.com -n - ``` - - For example: - - ``` - $ kubectl get ingress.voyager.appscode.com -n oigns - ``` - - The output will look similar to the following: - - ``` - NAME HOSTS LOAD_BALANCER_IP AGE - governancedomain-voyager-designconsole * 10s - ``` - -1. Return details of the ingress using the following command: - - ``` - $ kubectl describe ingress.voyager.appscode.com governancedomain-voyager-designconsole -n oigns - ``` - - The output will look similar to the following: - - ``` - Name: governancedomain-voyager-designconsole - Namespace: oigns - Labels: app.kubernetes.io/managed-by=Helm - weblogic.resourceVersion=domain-v2 - Annotations: ingress.appscode.com/affinity: cookie - ingress.appscode.com/stats: true - ingress.appscode.com/type: NodePort - meta.helm.sh/release-name: governancedomain-voyager-designconsole - meta.helm.sh/release-namespace: oigns - API Version: voyager.appscode.com/v1beta1 - Kind: Ingress - Metadata: - Creation Timestamp: 2020-10-21T15:46:29Z - Generation: 1 - Managed Fields: - API Version: voyager.appscode.com/v1beta1 - Fields Type: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - .: - f:ingress.appscode.com/affinity: - f:ingress.appscode.com/stats: - f:ingress.appscode.com/type: - f:meta.helm.sh/release-name: - f:meta.helm.sh/release-namespace: - f:labels: - .: - f:app.kubernetes.io/managed-by: - f:weblogic.resourceVersion: - f:spec: - .: - f:frontendRules: - f:rules: - f:tls: - Manager: Go-http-client - Operation: Update - Time: 2020-10-21T15:46:29Z - Resource Version: 6082128 - Self Link: /apis/voyager.appscode.com/v1beta1/namespaces/oigns/ingresses/governancedomain-voyager-designconsole - UID: a4968c01-28eb-4e4a-ac31-d60cfcd8705f - Spec: - Frontend Rules: - Port: 443 - Rules: - http-request set-header WL-Proxy-SSL true - Rules: - Host: * - Http: - Node Port: 30325 - Paths: - Backend: - Service Name: governancedomain-cluster-oim-cluster - Service Port: 14001 - Path: / - Tls: - Hosts: - * - Secret Name: governancedomain-tls-cert - Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal DeploymentReconcileSuccessful 55m voyager-operator Successfully patched HAProxy Deployment voyager-governancedomain-voyager-designconsole - Normal DeploymentReconcileSuccessful 45m voyager-operator Successfully patched HAProxy Deployment voyager-governancedomain-voyager-designconsole - ``` - - -### Design Console Client - -It is possible to use Design Console from an on-premises install, or from a container image. - -#### Using an on-premises installed Design Console - -1. Install Design Console on an on-premises machine - -1. Follow [Login to the Design Console](../using-the-design-console-with-voyager-ssl/#login-to-the-design-console). - -#### Using a container image for Design Console - -The Design Console can be run from a container using X windows emulation. - -1. On the parent machine where the Design Console is to be displayed, run `xhost+`. - -1. Execute the following command to start a container to run Design Console: - - ``` - $ docker run -u root --name oigdcbase -it bash - ``` - - For example: - - ``` - $ docker run -u root -it --name oigdcbase oracle/oig:12.2.1.4.0 bash - ``` - - This will take you into a bash shell inside the container: - - ``` - bash-4.2# - ``` - -1. Inside the container set the proxy, for example: - - ``` - bash-4.2# export https_proxy=http://proxy.example.com:80 - ``` - -1. Install the relevant X windows packages in the container: - - ``` - bash-4.2# yum install libXext libXrender libXtst - ``` - -1. Execute the following outside the container to create a new Design Console image from the container: - - ``` - $ docker commit - ``` - - For example: - - ``` - $ docker commit oigdcbase oigdc - ``` - -1. Exit the container bash session: - - ``` - bash-4.2# exit - ``` - -1. Start a new container using the Design Console image: - - ``` - $ docker run --name oigdc -it oigdc /bin/bash - ``` - - This will take you into a bash shell for the container: - - ``` - bash-4.2# - ``` - -1. In the container run the following to export the DISPLAY: - - ``` - $ export DISPLAY= - ``` - -1. Start the Design Console from the container: - - ``` - bash-4.2# cd idm/designconsole - bash-4.2# sh xlclient.sh - ``` - - The Design Console login should be displayed. Now follow **Login to the Design Console**. - - -#### Login to the Design Console - -1. Launch the Design Console and in the Oracle Identity Manager Design Console login page enter the following details: - - Enter the following details and click Login: - * `Server URL`: `` - * `User ID`: `xelsysadm` - * `Password`: ``. - - where `` is `http://:` - - `` is the value passed for webPort in the `values.yaml earlier, for example: 30325 - - -1. If successful the Design Console will be displayed. If the VNC session disappears then the connection failed so double check the connection details and try again. - diff --git a/docs-source/content/oig/configure-ingress/_index.md b/docs-source/content/oig/configure-ingress/_index.md index 1b023376f..24a3a7955 100644 --- a/docs-source/content/oig/configure-ingress/_index.md +++ b/docs-source/content/oig/configure-ingress/_index.md @@ -1,5 +1,5 @@ +++ -title = "Configure an Ingress for an OIG domain" +title = "Configure an ingress for an OIG domain" weight = 5 pre = "5. " description= "This document provides steps to configure an Ingress to direct traffic to the OIG domain." diff --git a/docs-source/content/oig/configure-ingress/ingress-Voyager-setup-for-oig-domain-setup-on-K8S-ssl.md b/docs-source/content/oig/configure-ingress/ingress-Voyager-setup-for-oig-domain-setup-on-K8S-ssl.md deleted file mode 100644 index 5afa6055c..000000000 --- a/docs-source/content/oig/configure-ingress/ingress-Voyager-setup-for-oig-domain-setup-on-K8S-ssl.md +++ /dev/null @@ -1,507 +0,0 @@ ---- -title: "d. Using an Ingress with Voyager (SSL)" -description: "Steps to set up an Ingress for Voyager to direct traffic to the OIG domain (SSL)." ---- - -### Setting Up an Ingress for Voyager for the OIG Domain on Kubernetes - -The instructions below explain how to set up Voyager as an Ingress for the OIG domain with SSL termination. - -**Note**: All the steps below should be performed on the **master** node. - -1. [Create a SSL Certificate](#create-a-ssl-certificate) - 1. [Generate SSL Certificate](#generate-ssl-certificate) - 1. [Create a Kubernetes Secret for SSL](#create-a-kubernetes-secret-for-ssl) -1. [Install Voyager](#install-voyager) - 1. [Configure the repository](#configure-the-repository) - 1. [Create Namespace and Install Voyager](#create-namespace-and-install-voyager) - 1. [Setup Routing Rules for the Domain](#setup-routing-rules-for-the-domain) -1. [Create an Ingress for the Domain](#create-an-ingress-for-the-domain) -1. [Verify that You can Access the Domain URL](#verify-that-you-can-access-the-domain-url) -1. [Cleanup](#cleanup) - -### Create a SSL Certificate - -#### Generate SSL Certificate - -1. Generate a private key and certificate signing request (CSR) using a tool of your choice. Send the CSR to your certificate authority (CA) to generate the certificate. - - If you want to use a certificate for testing purposes you can generate a self signed certificate using openssl: - - ``` - $ mkdir /ssl - $ cd /ssl - $ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=" - ``` - - For example: - - ``` - $ mkdir /scratch/OIGDockerK8S/ssl - $ cd /scratch/OIGDockerK8S/ssl - $ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=masternode.example.com" - ``` - - **Note**: The `CN` should match the host.domain of the master node in order to prevent hostname problems during certificate verification. - - The output will look similar to the following: - - ``` - $ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=masternode.example.com" - Generating a 2048 bit RSA private key - ..........................................+++ - .......................................................................................................+++ - writing new private key to 'tls.key' - ----- - $ - ``` - -#### Create a Kubernetes Secret for SSL - -1. Create a secret for SSL containing the SSL certificate by running the following command: - - ``` - $ kubectl -n oigns create secret tls -tls-cert --key /tls.key --cert /tls.crt - ``` - - For example: - - ``` - $ kubectl -n oigns create secret tls governancedomain-tls-cert --key /scratch/OIGDockerK8S/ssl/tls.key --cert /scratch/OIGDockerK8S/ssl/tls.crt - ``` - - The output will look similar to the following: - - ``` - secret/governancedomain-tls-cert created - ``` - - Confirm that the secret is created by running the following command: - - ``` - $ kubectl get secret governancedomain-tls-cert -o yaml -n oigns - ``` - - The output will look similar to the following: - - ``` - apiVersion: v1 - data: - tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURGVENDQWYyZ0F3SUJBZ0lKQUl3ZjVRMWVxZnljTUEwR0NTcUdTSWIzRFFFQkN3VUFNQ0V4SHpBZEJnTlYKQkFNTUZtUmxiakF4WlhadkxuVnpMbTl5WVdOc1pTNWpiMjB3SGhjTk1qQXdPREV3TVRReE9UUXpXaGNOTWpFdwpPREV3TVRReE9UUXpXakFoTVI4d0hRWURWUVFEREJaa1pXNHdNV1YyYnk1MWN5NXZjbUZqYkdVdVkyOXRNSUlCCklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUEyY0lpVUhwcTRVZzBhaGR6aXkycHY2cHQKSVIza2s5REd2eVRNY0syaWZQQ2dtUU5CdHV6VXNFN0l4c294eldITmU5RFpXRXJTSjVON3FYYm1lTzJkMVd2NQp1aFhzbkFTbnkwY1NLUE9xOUNZVDNQSlpDVk1MK0llZVFKdnhaVjZaWWU4V2FFL1NQSGJzczRjYy9wcG1mc3pxCnErUi83cXEyMm9ueHNHaE9vQ1h1TlQvMFF2WXVzMnNucGtueWRKRHUxelhGbDREYkFIZGMvamNVK0NPWWROeS8KT3Iza2JIV0FaTkR4OWxaZUREOTRmNXZLbGJwMy9rcUF2V0FkSVJZa2UrSmpNTHg0VHo2ZlM0VXoxbzdBSTVuSApPQ1ZMblV5U0JkaGVuWTNGNEdFU0wwbnorVlhFWjRWVjRucWNjRmo5cnJ0Q29pT1BBNlgvNGdxMEZJbi9Qd0lECkFRQUJvMUF3VGpBZEJnTlZIUTRFRmdRVWw1VnVpVDBDT0xGTzcxMFBlcHRxSC9DRWZyY3dId1lEVlIwakJCZ3cKRm9BVWw1VnVpVDBDT0xGTzcxMFBlcHRxSC9DRWZyY3dEQVlEVlIwVEJBVXdBd0VCL3pBTkJna3Foa2lHOXcwQgpBUXNGQUFPQ0FRRUFXdEN4b2ZmNGgrWXZEcVVpTFFtUnpqQkVBMHJCOUMwL1FWOG9JQzJ3d1hzYi9KaVNuMHdOCjNMdHppejc0aStEbk1yQytoNFQ3enRaSkc3NVluSGRKcmxQajgzVWdDLzhYTlFCSUNDbTFUa3RlVU1jWG0reG4KTEZEMHpReFhpVzV0N1FHcWtvK2FjeN3JRMXlNSE9HdVVkTTZETzErNXF4cTdFNXFMamhyNEdKejV5OAoraW8zK25UcUVKMHFQOVRocG96RXhBMW80OEY0ZHJybWdqd3ROUldEQVpBYmYyV1JNMXFKWXhxTTJqdU1FQWNsCnFMek1TdEZUQ2o1UGFTQ0NUV1VEK3ZlSWtsRWRpaFdpRm02dzk3Y1diZ0lGMlhlNGk4L2szMmF1N2xUTDEvd28KU3Q2dHpsa20yV25uUFlVMzBnRURnVTQ4OU02Z1dybklpZz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K - tls.key: LS0tLS1CRUdJQVRFIEtFWS0tLS0tCk1JSUV1d0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktVd2dnU2hBZ0VBQW9JQkFRRFp3aUpRZW1yaFNEUnEKRjNPTExhbS9xbTBoSGVTVDBNYS9KTXh3cmFKODhLQ1pBMEcyN05Td1Rzakd5akhOWWMxNzBObFlTdEluazN1cApkdVo0N1ozVmEvbTZGZXljQktmTFJ4SW84NnIwSmhQYzhsa0pVd3Y0aDU1QW0vRmxYcGxoN3hab1Q5SThkdXl6Cmh4eittbVorek9xcjVIL3VxcmJhaWZHd2FFNmdKZTQxUC9SQzlpNnpheWVtU2ZKMGtPN1hOY1dYZ05zQWQxeisKTnhUNEk1aDAzTDg2dmVSc2RZQmswUEgyVmw0TVAzaC9tOHFWdW5mK1NvQzlZQjBoRmlSNzRtTXd2SGhQUHA5TApoVFBXanNBam1jYzRKVXVkVEpJRjJGNmRqY1hnWVJJdlNmUDVWY1JuaFZYaWVweHdXUDJ1dTBLaUk0OERwZi9pCkNyUVVpZjgvQWdNQkFBRUNnZjl6cnE2TUVueTFNYWFtdGM2c0laWU1QSDI5R2lSVVlwVXk5bG1sZ3BqUHh3V0sKUkRDay9Td0FmZG9yd1Q2ejNVRk1oYWJ4UU01a04vVjZFYkJlamQxTGhCRW15bjdvWTVEQWJRRTR3RG9SZWlrVApONndWU0FrVC92Z1RXc1RqRlY1bXFKMCt6U2ppOWtySkZQNVNRN1F2cUswQ3BHRlNhVjY2dW8ycktiNmJWSkJYCkxPZmZPMytlS0tVazBaTnE1Q1NVQk9mbnFoNVFJSGdpaDNiMTRlNjBDdGhYcEh6bndrNWhaMHBHZE9BQm9aTkoKZ21lanUyTEdzVWxXTjBLOVdsUy9lcUllQzVzQm9jaWlocmxMVUpGWnpPRUV6LzErT2cyemhmT29yTE9rMTIrTgpjQnV0cTJWQ2I4ZFJDaFg1ZzJ0WnBrdzgzcXN5RSt3M09zYlQxa0VDZ1lFQTdxUnRLWGFONUx1SENvWlM1VWhNCm9Hak1WcnYxTEg0eGNhaDJITmZnMksrMHJqQkJONGpkZkFDMmF3R3ZzU1EyR0lYRzVGYmYyK0pwL1kxbktKOEgKZU80MzNLWVgwTDE4NlNNLzFVay9HSEdTek1CWS9KdGR6WkRrbTA4UnBwaTl4bExTeDBWUWtFNVJVcnJJcTRJVwplZzBOM2RVTHZhTVl1UTBrR2dncUFETUNnWUVBNlpqWCtjU2VMZ1BVajJENWRpUGJ1TmVFd2RMeFNPZDFZMUFjCkUzQ01YTWozK2JxQ3BGUVIrTldYWWVuVmM1QiszajlSdHVnQ0YyTkNSdVdkZWowalBpL243UExIRHdCZVY0bVIKM3VQVHJmamRJbFovSFgzQ2NjVE94TmlaajU4VitFdkRHNHNHOGxtRTRieStYRExIYTJyMWxmUk9sUVRMSyswVgpyTU93eU1VQ2dZRUF1dm14WGM4NWxZRW9hU0tkU0cvQk9kMWlYSUtmc2VDZHRNT2M1elJ0UXRsSDQwS0RscE54CmxYcXBjbVc3MWpyYzk1RzVKNmE1ZG5xTE9OSFZoWW8wUEpmSXhPU052RXI2MTE5NjRBMm5sZXRHYlk0M0twUkEKaHBPRHlmdkZoSllmK29kaUJpZFUyL3ZBMCtUczNSUHJzRzBSOUVDOEZqVDNaZVhaNTF1R0xPa0NnWUFpTmU0NwplQjRxWXdrNFRsMTZmZG5xQWpaQkpLR05xY2c1V1R3alpMSkp6R3owdCtuMkl4SFd2WUZFSjdqSkNmcHFsaDlqCmlDcjJQZVV3K09QTlNUTG1JcUgydzc5L1pQQnNKWXVsZHZ4RFdGVWFlRXg1aHpkNDdmZlNRRjZNK0NHQmthYnIKVzdzU3R5V000ZFdITHpDaGZMS20yWGJBd0VqNUQrbkN1WTRrZVFLQmdFSkRHb0puM1NCRXcra2xXTE85N09aOApnc3lYQm9mUW1lRktIS2NHNzFZUFhJbTRlV1kyUi9KOCt5anc5b1FJQ3o5NlRidkdSZEN5QlJhbWhoTmFGUzVyCk9MZUc0ejVENE4zdThUc0dNem9QcU13KzBGSXJiQ3FzTnpGWTg3ekZweEdVaXZvRWZLNE82YkdERTZjNHFqNGEKNmlmK0RSRSt1TWRMWTQyYTA3ekoKLS0tLS1FTkQgUFJJVkFURSBLRVktLS0tLQo= - kind: Secret - metadata: - creationTimestamp: "2020-08-10T14:22:52Z" - managedFields: - - apiVersion: v1 - fieldsType: FieldsV1 - fieldsV1: - f:data: - .: {} - f:tls.crt: {} - f:tls.key: {} - f:type: {} - manager: kubectl - operation: Update - time: "2020-08-10T14:22:52Z" - name: governancedomain-tls-cert - namespace: oigns - resourceVersion: "3722477" - selfLink: /api/v1/namespaces/oigns/secrets/governancedomain-tls-cert - uid: 596fe0fe-effd-4eb9-974d-691da3a3b15a - type: kubernetes.io/tls - ``` - -### Install Voyager - -Use Helm to install Voyager. For detailed information, see [this document](https://github.com/oracle/weblogic-kubernetes-operator/blob/master/kubernetes/samples/charts/voyager/README.md). - -#### Configure the repository - -1. Add the Helm chart repository for installing Voyager using the following command: - - ``` - $ helm repo add appscode https://charts.appscode.com/stable - ``` - - The output will look similar to the following: - - ``` - $ helm repo add appscode https://charts.appscode.com/stable - "appscode" has been added to your repositories - ``` - -1. Update the repository using the following command: - - ``` - $ helm repo update - ``` - - The output will look similar to the following: - - ``` - Hang tight while we grab the latest from your chart repositories... - ...Successfully got an update from the "appscode" chart repository - Update Complete. Happy Helming! - ``` - -1. Run the following command to show the Voyager chart was added successfully. - - ``` - $ helm search repo appscode/voyager - ``` - - The output will look similar to the following: - - ``` - NAME CHART VERSION APP VERSION DESCRIPTION - appscode/voyager v12.0.0 v12.0.0 Voyager by AppsCode - Secure HAProxy Ingress Co... - ``` - -#### Create Namespace and Install Voyager - -1. Create a namespace for Voyager: - - ``` - $ kubectl create namespace voyagerssl - ``` - - The output will look similar to the following: - - ``` - namespace/voyagerssl created - ``` - -1. Install Voyager using the following Helm command: - - ``` - $ helm install voyager-ingress appscode/voyager --version 12.0.0 --namespace voyagerssl --set cloudProvider=baremetal --set apiserver.enableValidatingWebhook=false - ``` - - **Note**: For bare metal Kubernetes use `--set cloudProvider=baremetal`. If using a managed Kubernetes service then the value should be set for your specific service as per the [Voyager](https://voyagermesh.com/docs/6.0.0/setup/install/) install guide. - - The output will look similar to the following: - - ``` - NAME: voyager-ingress - LAST DEPLOYED: Wed Aug 12 09:00:58 2020 - NAMESPACE: voyagerssl - STATUS: deployed - REVISION: 1 - TEST SUITE: None - NOTES: - Set cloudProvider for installing Voyager - - To verify that Voyager has started, run: - - kubectl get deployment --namespace voyagerssl -l "app.kubernetes.io/name=voyager,app.kubernetes.io/instance=voyager-ingress" - ``` - -1. Verify that the ingress has started by running the following command: - - ``` - $ kubectl get deployment --namespace voyagerssl -l "app.kubernetes.io/name=voyager,app.kubernetes.io/instance=voyager-ingress" - ``` - - The output will look similar to the following: - - ``` - NAME READY UP-TO-DATE AVAILABLE AGE - voyager-ingress 1/1 1 1 89s - ``` - -#### Setup Routing Rules for the Domain - -1. Setup routing rules using the following commands: - - ``` - $ cd /weblogic-kubernetes-operator/kubernetes/samples/charts/ingress-per-domain - $ cp values.yaml values.yaml.orig - $ vi values.yaml - ``` - -1. Edit `values.yaml` and ensure that `type=VOYAGER`,`tls=SSL`, and secretName: `` , for example: - - ``` - $ cat values.yaml - # Copyright 2020 Oracle Corporation and/or its affiliates. - # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. - - - # Default values for ingress-per-domain. - # This is a YAML-formatted file. - # Declare variables to be passed into your templates. - - # Load balancer type. Supported values are: VOYAGER, NGINX - type: VOYAGER - # Type of Configuration Supported Values are : NONSSL,SSL - # tls: NONSSL - tls: SSL - # TLS secret name if the mode is SSL - secretName: governancedomain-tls-cert - - - # WLS domain as backend to the load balancer - wlsDomain: - domainUID: governancedomain - oimClusterName: oim_cluster - soaClusterName: soa_cluster - soaManagedServerPort: 8001 - oimManagedServerPort: 14000 - adminServerName: adminserver - adminServerPort: 7001 - - # Traefik specific values - # traefik: - # hostname used by host-routing - # hostname: idmdemo.m8y.xyz - - # Voyager specific values - voyager: - # web port - webPort: 30305 - # stats port - statsPort: 30315 - $ - ``` - -### Create an Ingress for the Domain - -1. Create an Ingress for the domain (`governancedomain-voyager`), in the domain namespace by using the sample Helm chart. - - ``` - $ cd /weblogic-kubernetes-operator - $ helm install governancedomain-voyager kubernetes/samples/charts/ingress-per-domain --namespace --values kubernetes/samples/charts/ingress-per-domain/values.yaml - ``` - - For example: - - ``` - $ cd /scratch/OIGDockerK8S/weblogic-kubernetes-operator - $ helm install governancedomain-voyager kubernetes/samples/charts/ingress-per-domain --namespace oigns --values kubernetes/samples/charts/ingress-per-domain/values.yaml - ``` - - The output will look similar to the following: - - ``` - NAME: governancedomain-voyager - LAST DEPLOYED: Wed Sep 30 01:51:05 2020 - NAMESPACE: oigns - STATUS: deployed - REVISION: 1 - TEST SUITE: None - ``` - -1. Run the following command to show the ingress is created successfully: - - ``` - $ kubectl get ingress.voyager.appscode.com -n oigns - ``` - - The output will look similar to the following: - - ``` - NAME HOSTS LOAD_BALANCER_IP AGE - governancedomain-voyager * 3m44s - ``` - -1. Return details of the ingress using the following command: - - ``` - $ kubectl describe ingress.voyager.appscode.com governancedomain-voyager -n oigns - ``` - - The output will look similar to the following: - - ``` - Name: governancedomain-voyager - Namespace: oigns - Labels: app.kubernetes.io/managed-by=Helm - weblogic.resourceVersion=domain-v2 - Annotations: ingress.appscode.com/affinity: cookie - ingress.appscode.com/stats: true - ingress.appscode.com/type: NodePort - meta.helm.sh/release-name: governancedomain-voyager - meta.helm.sh/release-namespace: oigns - API Version: voyager.appscode.com/v1beta1 - Kind: Ingress - Metadata: - Creation Timestamp: 2020-09-30T08:51:05Z - Generation: 1 - Managed Fields: - API Version: voyager.appscode.com/v1beta1 - Fields Type: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - .: - f:ingress.appscode.com/affinity: - f:ingress.appscode.com/stats: - f:ingress.appscode.com/type: - f:meta.helm.sh/release-name: - f:meta.helm.sh/release-namespace: - f:labels: - .: - f:app.kubernetes.io/managed-by: - f:weblogic.resourceVersion: - f:spec: - .: - f:frontendRules: - f:rules: - f:tls: - Manager: Go-http-client - Operation: Update - Time: 2020-09-30T08:51:05Z - Resource Version: 1440614 - Self Link: /apis/voyager.appscode.com/v1beta1/namespaces/oigns/ingresses/governancedomain-voyager - UID: 875e7d90-b166-40ff-b792-c764d514c0c3 - Spec: - Frontend Rules: - Port: 443 - Rules: - http-request set-header WL-Proxy-SSL true - Rules: - Host: * - Http: - Node Port: 30305 - Paths: - Backend: - Service Name: governancedomain-adminserver - Service Port: 7001 - Path: /console - Backend: - Service Name: governancedomain-adminserver - Service Port: 7001 - Path: /em - Backend: - Service Name: governancedomain-cluster-soa-cluster - Service Port: 8001 - Path: /soa-infra - Backend: - Service Name: governancedomain-cluster-soa-cluster - Service Port: 8001 - Path: /soa - Backend: - Service Name: governancedomain-cluster-soa-cluster - Service Port: 8001 - Path: /integration - Backend: - Service Name: governancedomain-cluster-oim-cluster - Service Port: 14000 - Path: / - Tls: - Hosts: - * - Secret Name: governancedomain-tls-cert - Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal ServiceReconcileSuccessful 65s voyager-operator Successfully created NodePort Service voyager-governancedomain-voyager - Normal ConfigMapReconcileSuccessful 64s voyager-operator Successfully created ConfigMap voyager-governancedomain-voyager - Normal RBACSuccessful 64s voyager-operator Successfully created ServiceAccount voyager-governancedomain-voyager - Normal RBACSuccessful 64s voyager-operator Successfully created Role voyager-governancedomain-voyager - Normal RBACSuccessful 64s voyager-operator Successfully created RoleBinding voyager-governancedomain-voyager - Normal DeploymentReconcileSuccessful 64s voyager-operator Successfully created HAProxy Deployment voyager-governancedomain-voyager - Normal StatsServiceReconcileSuccessful 64s voyager-operator Successfully created stats Service voyager-governancedomain-voyager-stats - Normal DeploymentReconcileSuccessful 64s voyager-operator Successfully patched HAProxy Deployment voyager-governancedomain-voyager - ``` - -1. Find the NodePort of Voyager using the following command: - - ``` - $ kubectl get svc -n oigns - ``` - - The output will look similar to the following: - - ``` - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - governancedomain-adminserver ClusterIP None 7001/TCP 18h - governancedomain-cluster-oim-cluster ClusterIP 10.97.121.159 14000/TCP 18h - governancedomain-cluster-soa-cluster ClusterIP 10.111.231.242 8001/TCP 18h - governancedomain-oim-server1 ClusterIP None 14000/TCP 18h - governancedomain-oim-server2 ClusterIP 10.108.139.30 14000/TCP 18h - governancedomain-oim-server3 ClusterIP 10.97.170.104 14000/TCP 18h - governancedomain-oim-server4 ClusterIP 10.99.82.214 14000/TCP 18h - governancedomain-oim-server5 ClusterIP 10.98.75.228 14000/TCP 18h - governancedomain-soa-server1 ClusterIP None 8001/TCP 18h - governancedomain-soa-server2 ClusterIP 10.107.232.220 8001/TCP 18h - governancedomain-soa-server3 ClusterIP 10.108.203.6 8001/TCP 18h - governancedomain-soa-server4 ClusterIP 10.96.178.0 8001/TCP 18h - governancedomain-soa-server5 ClusterIP 10.107.83.62 8001/TCP 18h - governancedomain-voyager-stats NodePort 10.96.62.0 56789:30315/TCP 3m19s - voyager-governancedomain-voyager NodePort 10.97.231.109 443:30305/TCP,80:30419/TCP 3m12s - voyager-governancedomain-voyager-stats ClusterIP 10.99.185.46 56789/TCP 3m6s - ``` - - Identify the service `voyager-governancedomain-voyager` in the above output and get the `NodePort` which corresponds to port `443`. In this example it will be `30305`. - -1. To confirm that the new Ingress is successfully routing to the domain's server pods, run the following command to send a request to the URL for the "WebLogic ReadyApp framework": - - ``` - $ curl -v -k https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/weblogic/ready - ``` - - For example: - - ``` - $ curl -v -k https://masternode.example.com:30305/weblogic/ready - ``` - - The output will look similar to the following: - - ``` - * About to connect() to masternode.example.com port 30305 (#0) - * Trying 12.345.678.9... - * Connected to masternode.example.com (12.345.678.9) port 30305 (#0) - * Initializing NSS with certpath: sql:/etc/pki/nssdb - * skipping SSL peer certificate verification - * SSL connection using TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - * Server certificate: - * subject: CN=masternode.example.com - * start date: Sep 29 14:52:35 2020 GMT - * expire date: Sep 29 14:52:35 2021 GMT - * common name: masternode.example.com - * issuer: CN=masternode.example.com - > GET /weblogic/ready HTTP/1.1 - > User-Agent: curl/7.29.0 - > Host: masternode.example.com:30305 - > Accept: */* - > - < HTTP/1.1 200 OK - < Date: Wed, 30 Sep 2020 08:56:08 GMT - < Content-Length: 0 - < Strict-Transport-Security: max-age=15768000 - < Set-Cookie: SERVERID=pod-governancedomain-oim-server1; path=/ - < Cache-control: private - < - * Connection #0 to host masternode.example.com left intact - ``` - -### Verify that You can Access the Domain URL - -After setting up the Voyager ingress, verify that the domain applications are accessible through the Voyager ingress port (for example 30305) as per [Validate Domain URLs ]({{< relref "/oig/validate-domain-urls" >}}) - - -#### Cleanup - -If you need to remove the Voyager Ingress then remove the ingress with the following commands: - -``` -$ helm delete governancedomain-voyager -n oigns -$ helm delete voyager-ingress -n voyagerssl -$ kubectl delete namespace voyagerssl -``` - -The output will look similar to the following: - -``` -$ helm delete governancedomain-voyager -n oigns -release "governancedomain-voyager" uninstalled - -$ helm delete voyager-ingress -n voyagerssl -release "voyager-ingress" uninstalled - -$ kubectl delete namespace voyagerssl -namespace "voyagerssl" deleted -``` diff --git a/docs-source/content/oig/configure-ingress/ingress-Voyager-setup-for-oig-domain-setup-on-K8S.md b/docs-source/content/oig/configure-ingress/ingress-Voyager-setup-for-oig-domain-setup-on-K8S.md deleted file mode 100644 index 3d41cebe4..000000000 --- a/docs-source/content/oig/configure-ingress/ingress-Voyager-setup-for-oig-domain-setup-on-K8S.md +++ /dev/null @@ -1,381 +0,0 @@ ---- -title: "c. Using an Ingress with Voyager (non-SSL)" -description: "Steps to set up an Ingress for Voyager to direct traffic to the OIG domain (non-SSL)." ---- - -### Setting Up an Ingress for Voyager for the OIG Domain on Kubernetes - -The instructions below explain how to set up Voyager as an Ingress for the OIG domain with non-SSL termination. - -**Note**: All the steps below should be performed on the **master** node. - -1. [Install Voyager](#install-voyager) - 1. [Configure the repository](#configure-the-repository) - 1. [Create Namespace and Install Voyager](#create-namespace-and-install-voyager) - 1. [Setup Routing Rules for the Domain](#setup-routing-rules-for-the-domain) -1. [Create an Ingress for the Domain](#create-an-ingress-for-the-domain) -1. [Verify that You can Access the Domain URL](#verify-that-you-can-access-the-domain-url) -1. [Cleanup](#cleanup) - -### Install Voyager - -Use Helm to install Voyager. For detailed information, see [this document](https://github.com/oracle/weblogic-kubernetes-operator/blob/master/kubernetes/samples/charts/voyager/README.md). - -#### Configure the repository - -1. Add the Helm chart repository for installing Voyager using the following command: - - ``` - $ helm repo add appscode https://charts.appscode.com/stable - ``` - - The output will look similar to the following: - - ``` - "appscode" has been added to your repositories - ``` - -1. Update the repository using the following command: - - ``` - $ helm repo update - ``` - - The output will look similar to the following: - - ``` - Hang tight while we grab the latest from your chart repositories... - ...Successfully got an update from the "appscode" chart repository - Update Complete. Happy Helming! - ``` - -1. Run the following command to show the Voyager chart was added successfully. - - ``` - $ helm search repo appscode/voyager - ``` - - The output will look similar to the following: - - ``` - NAME CHART VERSION APP VERSION DESCRIPTION - appscode/voyager v12.0.0 v12.0.0 Voyager by AppsCode - Secure HAProxy Ingress Co... - ``` - -#### Create Namespace and Install Voyager - -1. Create a namespace for Voyager: - - ``` - $ kubectl create namespace voyager - ``` - - The output will look similar to the following: - - ``` - namespace/voyager created - ``` - -1. Install Voyager using the following Helm command: - - ``` - $ helm install voyager-ingress appscode/voyager --version 12.0.0 --namespace voyager --set cloudProvider=baremetal --set apiserver.enableValidatingWebhook=false - ``` - - **Note**: For bare metal Kubernetes use `--set cloudProvider=baremetal`. If using a managed Kubernetes service then the value should be set for your specific service as per the [Voyager](https://voyagermesh.com/docs/6.0.0/setup/install/) install guide. - - The output will look similar to the following: - - ``` - NAME: voyager-ingress - LAST DEPLOYED: Tue Sep 29 09:23:22 2020 - NAMESPACE: voyager - STATUS: deployed - REVISION: 1 - TEST SUITE: None - NOTES: - Set cloudProvider for installing Voyager - - To verify that Voyager has started, run: - - $ kubectl get deployment --namespace voyager -l "app.kubernetes.io/name=voyager,app.kubernetes.io/instance=voyager-ingress" - ``` - -1. Verify the ingress has started by running the following command: - - ``` - $ kubectl get deployment --namespace voyager -l "app.kubernetes.io/name=voyager,app.kubernetes.io/instance=voyager-ingress" - ``` - - The output will look similar to the following: - - ``` - NAME READY UP-TO-DATE AVAILABLE AGE - voyager-ingress 1/1 1 1 89s - ``` - -#### Setup Routing Rules for the Domain - -1. Setup routing rules using the following commands: - - ``` - $ cd /weblogic-kubernetes-operator/kubernetes/samples/charts/ingress-per-domain - $ cp values.yaml values.yaml.orig - $ vi values.yaml - ``` - - Edit `values.yaml` and ensure that the values `type=VOYAGER` and `tls=NONSSL` are set. Also change `domainUID` to the value for your domain e.g (`governancedomain`), for example: - - ``` - $ cat values.yaml - # Copyright 2020 Oracle Corporation and/or its affiliates. - # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. - - - # Default values for ingress-per-domain. - # This is a YAML-formatted file. - # Declare variables to be passed into your templates. - - # Load balancer type. Supported values are: VOYAGER, NGINX - type: VOYAGER - # Type of Configuration Supported Values are : NONSSL,SSL - # tls: NONSSL - tls: NONSSL - # TLS secret name if the mode is SSL - secretName: domain1-tls-cert - - - # WLS domain as backend to the load balancer - wlsDomain: - domainUID: governancedomain - oimClusterName: oim_cluster - soaClusterName: soa_cluster - soaManagedServerPort: 8001 - oimManagedServerPort: 14000 - adminServerName: adminserver - adminServerPort: 7001 - - # Voyager specific values - voyager: - # web port - webPort: 30305 - # stats port - statsPort: 30315 - ``` - -### Create an Ingress for the Domain - -1. Create an Ingress for the domain (`governancedomain-voyager`), in the domain namespace by using the sample Helm chart: - - ``` - $ cd /weblogic-kubernetes-operator - $ helm install governancedomain-voyager kubernetes/samples/charts/ingress-per-domain --namespace --values kubernetes/samples/charts/ingress-per-domain/values.yaml - ``` - - For example: - - ``` - $ cd /scratch/OIGDockerK8S/weblogic-kubernetes-operator - $ helm install governancedomain-voyager kubernetes/samples/charts/ingress-per-domain --namespace oigns --values kubernetes/samples/charts/ingress-per-domain/values.yaml - ``` - - The output will look similar to the following: - - ``` - NAME: governancedomain-voyager - LAST DEPLOYED: Tue Sep 29 09:28:12 2020 - NAMESPACE: oigns - STATUS: deployed - REVISION: 1 - TEST SUITE: None - ``` - -1. Run the following command to show the ingress is created successfully: - - ``` - $ kubectl get ingress.voyager.appscode.com -n oigns - ``` - - The output will look similar to the following: - - ``` - NAME HOSTS LOAD_BALANCER_IP AGE - governancedomain-voyager * 78s - ``` - -1. Return details of the ingress using the following command: - - ``` - $ kubectl describe ingress.voyager.appscode.com governancedomain-voyager -n oigns - ``` - - The output will look similar to the following: - - ``` - Name: governancedomain-voyager - Namespace: oigns - Labels: app.kubernetes.io/managed-by=Helm - weblogic.resourceVersion=domain-v2 - Annotations: ingress.appscode.com/affinity: cookie - ingress.appscode.com/stats: true - ingress.appscode.com/type: NodePort - meta.helm.sh/release-name: governancedomain-voyager - meta.helm.sh/release-namespace: oigns - API Version: voyager.appscode.com/v1beta1 - Kind: Ingress - Metadata: - Creation Timestamp: 2020-09-29T09:28:12Z - Generation: 1 - Managed Fields: - API Version: voyager.appscode.com/v1beta1 - Fields Type: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - .: - f:ingress.appscode.com/affinity: - f:ingress.appscode.com/stats: - f:ingress.appscode.com/type: - f:meta.helm.sh/release-name: - f:meta.helm.sh/release-namespace: - f:labels: - .: - f:app.kubernetes.io/managed-by: - f:weblogic.resourceVersion: - f:spec: - .: - f:rules: - Manager: Go-http-client - Operation: Update - Time: 2020-09-29T09:28:12Z - Resource Version: 4168835 - Self Link: /apis/voyager.appscode.com/v1beta1/namespaces/oigns/ingresses/governancedomain-voyager - UID: 2ea71f79-6836-4df2-8200-8418abf6ad9f - Spec: - Rules: - Host: * - Http: - Node Port: 30305 - Paths: - Backend: - Service Name: governancedomain-adminserver - Service Port: 7001 - Path: /console - Backend: - Service Name: governancedomain-adminserver - Service Port: 7001 - Path: /em - Backend: - Service Name: governancedomain-cluster-soa-cluster - Service Port: 8001 - Path: /soa-infra - Backend: - Service Name: governancedomain-cluster-soa-cluster - Service Port: 8001 - Path: /integration - Backend: - Service Name: governancedomain-cluster-oim-cluster - Service Port: 14000 - Path: / - Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal ServiceReconcileSuccessful 5m22s voyager-operator Successfully created NodePort Service voyager-governancedomain-voyager - Normal ConfigMapReconcileSuccessful 5m22s voyager-operator Successfully created ConfigMap voyager-governancedomain-voyager - Normal RBACSuccessful 5m22s voyager-operator Successfully created ServiceAccount voyager-governancedomain-voyager - Normal RBACSuccessful 5m22s voyager-operator Successfully created Role voyager-governancedomain-voyager - Normal RBACSuccessful 5m22s voyager-operator Successfully created RoleBinding voyager-governancedomain-voyager - Normal DeploymentReconcileSuccessful 5m22s voyager-operator Successfully created HAProxy Deployment voyager-governancedomain-voyager - Normal StatsServiceReconcileSuccessful 5m22s voyager-operator Successfully created stats Service voyager-governancedomain-voyager-stats - ``` - -1. Find the NodePort of Voyager using the following command: - - ``` - $ kubectl get svc -n oigns - ``` - - The output will look similar to the following: - - ``` - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - governancedomain-adminserver ClusterIP None 7001/TCP 19h - governancedomain-cluster-oim-cluster ClusterIP 10.97.121.159 14000/TCP 19h - governancedomain-cluster-soa-cluster ClusterIP 10.111.231.242 8001/TCP 19h - governancedomain-oim-server1 ClusterIP None 14000/TCP 19h - governancedomain-oim-server2 ClusterIP 10.108.139.30 14000/TCP 19h - governancedomain-oim-server3 ClusterIP 10.97.170.104 14000/TCP 19h - governancedomain-oim-server4 ClusterIP 10.99.82.214 14000/TCP 19h - governancedomain-oim-server5 ClusterIP 10.98.75.228 14000/TCP 19h - governancedomain-soa-server1 ClusterIP None 8001/TCP 19h - governancedomain-soa-server2 ClusterIP 10.107.232.220 8001/TCP 19h - governancedomain-soa-server3 ClusterIP 10.108.203.6 8001/TCP 19h - governancedomain-soa-server4 ClusterIP 10.96.178.0 8001/TCP 19h - governancedomain-soa-server5 ClusterIP 10.107.83.62 8001/TCP 19h - governancedomain-voyager-stats NodePort 10.99.34.145 56789:30315/TCP 3m36s - voyager-governancedomain-voyager NodePort 10.106.40.20 80:30305/TCP 3m36s - voyager-governancedomain-voyager-stats ClusterIP 10.100.89.234 56789/TCP 3m30s - ``` - - Identify the service `voyager-governancedomain-voyager` in the above output and get the `NodePort` which corresponds to port `80`. In this example it will be `30305`. - -1. To confirm that the new Ingress is successfully routing to the domain's server pods, run the following command to send a request to the URL for the "WebLogic ReadyApp framework": - - ``` - $ curl -v http://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/weblogic/ready - ``` - - For example: - - ``` - $ curl -v http://masternode.example.com:30305/weblogic/ready - ``` - - The output will look similar to the following: - - ``` - $ curl -v -k http://masternode.example.com:30305/weblogic/ready - * About to connect() to masternode.example.com port 30305 (#0) - * Trying 12.345.67.890... - * Connected to masternode.example.com (12.345.67.890) port 30305 (#0) - > GET /weblogic/ready HTTP/1.1 - > User-Agent: curl/7.29.0 - > Host: masternode.example.com:30305 - > Accept: */* - > - < HTTP/1.1 200 OK - < Date: Wed, 29 Sep 2020 09:30:56 GMT - < Content-Length: 0 - < Set-Cookie: SERVERID=pod-governancedomain-oim-server1; path=/ - < Cache-control: private - < - * Connection #0 to host masternode.example.com left intact - ``` - -### Verify that You can Access the Domain URL - -After setting up the Voyager ingress, verify that the domain applications are accessible through the Voyager ingress port (for example 30305) as per [Validate Domain URLs ]({{< relref "/oig/validate-domain-urls" >}}) - - -#### Cleanup - -If you need to remove the Voyager Ingress (for example to setup Voyager with SSL) then remove the ingress with the following commands: - -``` -$ helm delete governancedomain-voyager -n oigns -$ helm delete voyager-ingress -n voyager -$ kubectl delete namespace voyager -``` - -The output will look similar to the following: - -``` -$ helm delete governancedomain-voyager -n oigns -release "governancedomain-voyager" uninstalled - -$ helm delete voyager-ingress -n voyager -release "voyager-ingress" uninstalled - -$ kubectl delete namespace voyager -namespace "voyager" deleted -``` \ No newline at end of file diff --git a/docs-source/content/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-K8S-ssl.md b/docs-source/content/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-K8S-ssl.md index f39a7f46c..65b5906f7 100644 --- a/docs-source/content/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-K8S-ssl.md +++ b/docs-source/content/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-K8S-ssl.md @@ -3,42 +3,48 @@ title: "b. Using an Ingress with NGINX (SSL)" description: "Steps to set up an Ingress for NGINX to direct traffic to the OIG domain (SSL)." --- -### Setting Up an Ingress for NGINX for the OIG Domain on Kubernetes +### Setting up an ingress for NGINX for the OIG domain on Kubernetes The instructions below explain how to set up NGINX as an ingress for the OIG domain with SSL termination. **Note**: All the steps below should be performed on the **master** node. -1. [Create a SSL Certificate](#create-a-ssl-certificate) - 1. [Generate SSL Certificate](#generate-ssl-certificate) - 1. [Create a Kubernetes Secret for SSL](#create-a-kubernetes-secret-for-ssl) +1. [Create a SSL certificate](#create-a-ssl-certificate) + + a. [Generate SSL certificate](#generate-ssl-certificate) + + b. [Create a Kubernetes secret for SSL](#create-a-kubernetes-secret-for-ssl) + 1. [Install NGINX](#install-nginx) - 1. [Configure the repository](#configure-the-repository) - 1. [Create a Namespace](#create-a-namespace) - 1. [Install NGINX using helm](#install-nginx-using-helm) -1. [Create an Ingress for the Domain](#create-an-ingress-for-the-domain) -1. [Verify that You can Access the Domain URL](#verify-that-you-can-access-the-domain-url) -1. [Cleanup](#cleanup) -### Create a SSL Certificate + a. [Configure the repository](#configure-the-repository) + + b. [Create a namespace](#create-a-namespace) + + c. [Install NGINX using helm](#install-nginx-using-helm) + +1. [Create an ingress for the domain](#create-an-ingress-for-the-domain) +1. [Verify that you can access the domain URL](#verify-that-you-can-access-the-domain-url) + +### Create a SSL certificate -#### Generate SSL Certificate +#### Generate SSL certificate 1. Generate a private key and certificate signing request (CSR) using a tool of your choice. Send the CSR to your certificate authority (CA) to generate the certificate. If you want to use a certificate for testing purposes you can generate a self signed certificate using openssl: - ``` - $ mkdir /ssl - $ cd /ssl + ```bash + $ mkdir /ssl + $ cd /ssl $ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=" ``` For example: - ``` - $ mkdir /scratch/OIGDockerK8S/ssl - $ cd /scratch/OIGDockerK8S/ssl + ```bash + $ mkdir /scratch/OIGK8S/ssl + $ cd /scratch/OIGK8S/ssl $ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=masternode.example.com" ``` @@ -47,7 +53,6 @@ The instructions below explain how to set up NGINX as an ingress for the OIG dom The output will look similar to the following: ``` - $ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=masternode.example.com" Generating a 2048 bit RSA private key ..........................................+++ .......................................................................................................+++ @@ -55,31 +60,35 @@ The instructions below explain how to set up NGINX as an ingress for the OIG dom ----- ``` -#### Create a Kubernetes Secret for SSL +#### Create a Kubernetes secret for SSL 1. Create a secret for SSL containing the SSL certificate by running the following command: - ``` - $ kubectl -n oigns create secret tls -tls-cert --key /tls.key --cert /tls.crt + ```bash + $ kubectl -n oigns create secret tls -tls-cert --key /tls.key --cert /tls.crt ``` For example: - ``` - $ kubectl -n oigns create secret tls governancedomain-tls-cert --key /scratch/OIGDockerK8S/ssl/tls.key --cert /scratch/OIGDockerK8S/ssl/tls.crt + ```bash + $ kubectl -n oigns create secret tls governancedomain-tls-cert --key /scratch/OIGK8S/ssl/tls.key --cert /scratch/OIGK8S/ssl/tls.crt ``` The output will look similar to the following: ``` - $ kubectl -n oigns create secret tls governancedomain-tls-cert --key /scratch/OIGDockerK8S/ssl/tls.key --cert /scratch/OIGDockerK8S/ssl/tls.crt secret/governancedomain-tls-cert created - $ ``` 1. Confirm that the secret is created by running the following command: + ```bash + $ kubectl get secret -tls-cert -o yaml -n oigns ``` + + For example: + + ```bash $ kubectl get secret governancedomain-tls-cert -o yaml -n oigns ``` @@ -88,11 +97,11 @@ The instructions below explain how to set up NGINX as an ingress for the OIG dom ``` apiVersion: v1 data: - tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURGVENDQWYyZ0F3SUJBZ0lKQUl3ZjVRMWVxZnljTUEwR0NTcUdTSWIzRFFFQkN3VUFNQ0V4SHpBZEJnTlYKQkFNTUZtUmxiakF4WlhadkxuVnpMbTl5WVdOc1pTNWpiMjB3SGhjTk1qQXdPREV3TVRReE9UUXpXaGNOTWpFdwpPREV3TVRReE9UUXpXakFoTVI4d0hRWURWUVFEREJaa1pXNHdNV1YyYnk1MWN5NXZjbUZqYkdVdVkyOXRNSUlCCklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUEyY0lpVUhwcTRVZzBhaGR6aXkycHY2cHQKSVIza2s5REd2eVRNY0syaWZQQ2dtUU5CdHV6VXNFN0l4c294eldITmU5RFpXRXJTSjVON3FYYm1lTzJkMVd2NQp1aFhzbkFTbnkwY1NLUE9xVDNQSlpDVk1MK0llZVFKdnhaVjZaWWU4V2FFL1NQSGJzczRjYy9wcG1mc3pxCnErUi83cXEyMm9ueHNHaE9vQ1h1TlQvMFF2WXVzMnNucGtueWRKRHUxelhGbDREYkFIZGMvamNVK0NPWWROeS8KT3Iza2JIV0FaTkR4OWxaZUREOTRmNXZLcUF2V0FkSVJZa2UrSmpNTHg0VHo2ZlM0VXoxbzdBSTVuSApPQ1ZMblV5U0JkaGVuWTNGNEdFU0wwbnorVlhFWjRWVjRucWNjRmo5cnJ0Q29pT1BBNlgvNGdxMEZJbi9Qd0lECkFRQUJvMUF3VGpBZEJnTlZIUTRFRmdRVWw1VnVpVDBDT0xGTzcxMFBlcHRxSC9DRWZyY3dId1lEVlIwakJCZ3cKRm9BVWw1VnVpVDBDT0xGTzcxMFBlcHRxSC9DRWZyY3dEQVlEVlIwVEJBVXdBd0VCL3pBTkJna3Foa2lHOXcwQgpBUXNGQUFPQ0FRRUFXdEN4b2ZmNGgrWXZEcVVpTFFtUnpqQkVBMHJCOUMwL1FWOG9JQzJ3d1hzYi9KaVNuMHdOCjNMdHppejc0aStEbk1yQytoNFQ3enRaSkc3NVluSGRKcmxQajgzVWdDLzhYTlFCSUNDbTFUa3RlVU1jWG0reG4KTEZEMHpReFhpVzV0N1FHcWtvK2FjeTlhUnUvN3JRMXlNSE9HdVVkTTZETzErNXF4cTdFNXFMamhyNEdKejV5OAoraW8zK25UcUVKMHFQOVRocG96RXhBMW80OEY0ZHJybWdqd3ROUldEQVpBYmYyV1JNMXFKWXhxTTJqdU1FQWNsCnFMek1TdEZUQ2o1UGFTQ0NUV1VEK3ZlSWtsRWRpaFdpRm02dzk3Y1diZ0lGMlhlNGk4L2szMmF1N2xUTDEvd28KU3Q2dHpsa20yV25uUFlVMzBnRURnVTQ4OU02Z1dybklpZz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K - tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV1d0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktVd2dnU2hBZ0VBQW9JQkFRRFp3aUpRZW1yaFNEUnEKRjNPTExhbS9xbTBoSGVTVDBNYS9KTXh3cmFKODhLQ1pBMEcyN05Td1Rzakd5akhOWWMxNzBObFlTdEluazN1cApkdVo0N1ozVmEvbTZGZXljQktmTFJ4SW84NnIwSmhQYzhsa0pVd3Y0aDU1QW0vRmxYcGxoN3hab1Q5SThkdXl6Cmh4eittbVorek9xcjVIL3VxcmJhaWZHd2FFNmdKZTQxUC9SQzlpNnpheWVtU2ZKMGtPN1hOY1dYZ05zQWQxeisKTnhUNEk1aDAzTDg2dmVSc2RZQmswUEgyVmw0TVAzaC9tOHFWdW5mK1NvQzlZQjBoRmlSNzRtTXd2SGhQUHA5TApoVFBXanNBam1jYzRKVXVkVEpJRjJGNmRqY1hnWVJJdlNmUDVWY1JuaFZYaWVweHdXUDJ1dTBLaUk0OERwZi9pCkNyUVVpZjgvQWdNQkFBRUNnZjl6cnE2TUVueTFNYWFtdGM2c0laWU1QSDI5R2lSVVlwVXk5bG1sZ3BqUHh3V0sKUkRDay9Td0FmZG9yd1Q2ejNVRk1oYWJ4UU01a04vVjZFYkJlamQxT15bjdvWTVEQWJRRTR3RG9SZWlrVApONndWU0FrVC92Z1RXc1RqRlY1bXFKMCt6U2ppOWtySkZQNVNRN1F2cUswQ3BHRlNhVjY2dW8ycktiNmJWSkJYCkxPZmZPMytlS0tVazBaTnE1Q1NVQk9mbnFoNVFJSGdpaDNiMTRlNjB6bndrNWhaMHBHZE9BQm9aTkoKZ21lanUyTEdzVWxXTjBLOVdsUy9lcUllQzVzQm9jaWlocmxMVUpGWnpPRUV6LzErT2cyemhmT29yTE9rMTIrTgpjQnV0cTJWQ2I4ZFJDaFg1ZzJ0WnBrdzgzcXN5RSt3M09zYlQxa0VDZ1lFQTdxUnRLWGFONUx1SENvWlM1VWhNCm9Hak1WcnYxTEg0eGNhaDJITmZnMksrMHJqQkJONGpkZkFDMmF3R3ZzU1EyR0lYRzVGYmYyK0pwL1kxbktKOEgKZU80MzNLWVgwTDE4NlNNLzFVay9HSEdTek1CWS9KdGR6WkRrbTA4UnBwaTl4bExTeDBWUWtFNVJVcnJJcTRJVwplZzBOM2RVTHZhTVl1UTBrR2dncUFETUNnWUVBNlpqWCtjU2VMZ1BVajJENWRpUGJ1TmVFd2RMeFNPZDFZMUFjCkUzQ01YTWozK2JxQ3BGUVIrTldYWWVuVmM1QiszajlSdHVnQ0YyTkNSdVdkZWowalBpL243UExIRHdCZVY0bVIKM3VQVHJmamRJbFovSFgzQ2NjVE94TmlaajU4VitFdkRHNHNHOGxtRTRieStYRExIYTJyMWxmUk9sUVRMSyswVgpyTU93eU1VQ2dZRUF1dm14WGM4NWxZRW9hU0tkU0cvQk9kMWlYSUtmc2VDZHRNT2M1elJ0UXRsSDQwS0RscE54CmxYcXBjbVc3MWpyYzk1RzVKNmE1ZG5xTE9OSFZoWW8wUEpmSXhPU052RXI2MTE5NjRBMm5sZXRHYlk0M0twUkEKaHBPRHlmdkZoSllmK29kaUJpZFUyL3ZBMCtUczNSUHJzRzBSOUVDOEZqVDNaZVhaNTF1R0xPa0NnWUFpTmU0NwplQjRxWXdrNFRsMTZmZG5xQWpaQkpLR05xY2c1V1R3alpMSkp6R3owdCtuMkl4SFd2WUZFSjdqSkNmcHFsaDlqCmlDcjJQZVV3K09QTlNUTG1JcUgydzc5L1pQQnNKWXVsZHZ4RFdGVWFlRXg1aHpkNDdmZlNRRjZNK0NHQmthYnIKVzdzU3R5V000ZFdITHpDaGZMS20yWGJBd0VqNUQrbkN1WTRrZVFLQmdFSkRHb0puM1NCRXcra2xXTE85N09aOApnc3lYQm9mUW1lRktIS2NHNzFZUFhJbTRlV1kyUi9KOCt5anc5b1FJQ3o5NlRidkdSZEN5QlJhbWhoTmFGUzVyCk9MZUc0ejVENE4zdThUc0dNem9QcU13KzBGSXJiQ3FzTnpGWTg3ekZweEdVaXZvRWZLNE82YkdERTZjNHFqNGEKNmlmK0RSRSt1TWRMWTQyYTA3ekoKLS0tLS1FTkQgUFJJVkFURSBLRVktLS0tLQo= + tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURGVENDQWYyZ0F3SUJBZ0lKQUl3ZjVRMWVxZnljTUEwR0NTcUdTSWIzRFFFQkN3VUFNQ0V4SHpBZEJnTlYKQkFNTUZtUmxiakF4WlhadkxuVnpMbTl5WVdOc1pTNWpiMjB3SGhjTk1qQXdPREV3TVRReE9UUXpXaGNOTWpFdwpPREV3TVRReE9UUXpXakFoTVI4d0hRWURWUVFEREJaa1pXNHdNV1YyYnk1MWN5NXZjbUZqYkdVdVkyOXRNSUlCCklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUEyY0lpVUhwcTRVZzBhaGR6aXkycHY2cHQKSVIza2s5REd2eVRNY0syaWZQQ2dtUU5CdHV6VXNFN0l4c294eldITmU5RFpXRXJTSjVON3Ym1lTzJkMVd2NQp1aFhzbkFTbnkwY1NLUE9xVDNQSlpDVk1MK0llZVFKdnhaVjZaWWU4V2FFL1NQSGJzczRjYy9wcG1mc3pxCnErUi83cXEyMm9ueHNHaE9vQ1h1TlQvMFF2WXVzMnNucGtueWRKRHUxelhGbDREYkFIZGMvamNVK0NPWWROeS8KT3Iza2JIV0FaTkR4OWxaZUREOTRmNXZLcUF2V0FkSVJZa2UrSmpNTHg0VHo2ZlM0VXoxbzdBSTVuSApPQ1ZMblV5U0JkaGVuWTNGNEdFU0wwbnorVlhFWjRWVjRucWNjRmo5cnJ0Q29pT1BBNlgvNGdxMEZJbi9Qd0lECkFRQUJvMUF3VGpBZEJnTlZIUTRFRmdRVWw1VnVpVDBDT0xGTzcxMFBlcHRxSC9DRWZyY3dId1lEVlIwakJCZ3cKRm9BVWw1VnVpVDBDT0xGTzcxMFBlcHRxSC9DRWZyY3dEQVlEVlIwVEJBVXdBd0VCL3pBTkJna3Foa2lHOXcwQgpBUXNGQUFPQ0FRRUFXdEN4b2ZmNGgrWXZEcVVpTFFtUnpqQkVBMHJCOUMwL1FWOG9JQzJ3d1hzYi9KaVNuMHdOCjNMdHppejc0aStEbk1yQytoNFQ3enRaSkc3NVluSGRKcmxQajgzVWdDLzhYTlFCSUNDbTFUa3RlVU1jWG0reG4KTEZEMHpReFhpVzV0N1FHcWtvK2FjeTlhUnUvN3JRMXlNSE9HdVVkTTZETzErNXF4cTdFNXFMamhyNEdKejV5OAoraW8zK25UcUVKMHFQOVRocG96RXhBMW80OEY0ZHJybWdqd3ROUldEQVpBYmYyV1JNMXFKWXhxTTJqdU1FQWNsCnFMek1TdEZUQ2o1UGFTQ0NUV1VEK3ZlSWtsRWRpaFdpRm02dzk3Y1diZ0lGMlhlNGk4L2szMmF1N2xUTDEvd28KU3Q2dHpsa20yV25uUFlVMzBnRURnVTQ4OU02Z1dybklpZz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV1d0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktVd2dnU2hBZ0VBQW9JQkFRRFp3aUpRZW1yaFNEUnEKRjNPTExhbS9xbTBoSGVTVDBNYS9KTXh3cmFKODhLQ1pBMEcyN05Td1Rzakd5akhOWWMxNzBObFlTdEluazN1cApkdVo0N1ozVmEvbTZGZXljQktmTFJ4SW84NnIwSmhQYzhsa0pVd3Y0aDU1QW0vRmxYcGxoN3hab1Q5SThkdXl6Cmh4eittbVorek9xcjVIL3VxcmJhaWZHd2FFNmdKZTQxUC9SQzlpNnpheWVtU2ZKMGtPN1hOY1dYZ05zQWQxeisKTnhUNEk1aDAzTDg2dmVSc2RZQmswUEgyVmw0TVAzaC9tOHFWdW5mK1NvQzlZQjBoRmlSNzRtTXd2SGhQUHA5TApoVFBXanNBam1jYzRKVXVkVEpJRjJGNmRqY1hnWVJJdlNmUDVWY1JuaFZYaWVweHdXUDJ1dTBLaUk0OERwZi9pCkNyUVVpZjgvQWdNQkFBRUNnZjl6cnE2TUVueTFNYWFtdGM2c0laWU1QSDI5R2lSVVlwVXk5bG1sZ3BqUHh3V0sKUkRDay9Td0FmZG9yd1Q2ejNVRk1oYWJ4UU01a04vVjZFYkJlamQxT15bjdvWTVEQWJRRTR3RG9SZWlrVApONndWU0FrVC92Z1RXc1RqRlY1bXFKMCt6U2ppOWtySkZQNVNRN1F2cUswQ3BHRlNhVjY2dW8ycktiNmJWSkJYCkxPZmZPMytlS0tVazBaTnE1Q1NVQk9mbnFoNVFJSGdpaDNiMTRlNjB6bndrNWhaMHBHZE9BQm9aTkoKZ21lanUyTEdzVWxXTjBLOVdsUy9lcUllQzVzQm9jaWlocmxMVUpGWnpPRUV6LzErT2cyemhmT29yTE9rMTIrTgpjQnV0cTJWQ2I4ZFJDaFg1ZzJ0WnBrdzgzcXN5RSt3M09zYlQxa0VDZ1lFQTdxUnRLWGFONUx1SENvWlM1VWhNCm9Hak1WcnYxTEg0eGNhaDJIZnMksrMHJqQkJONGpkZkFDMmF3R3ZzU1EyR0lYRzVGYmYyK0pwL1kxbktKOEgKZU80MzNLWVgwTDE4NlNNLzFVay9HSEdTek1CWS9KdGR6WkRrbTA4UnBwaTl4bExTeDBWUWtFNVJVcnJJcTRJVwplZzBOM2RVTHZhTVl1UTBrR2dncUFETUNnWUVBNlpqWCtjU2VMZ1BVajJENWRpUGJ1TmVFd2RMeFNPZDFZMUFjCkUzQ01YTWozK2JxQ3BGUVIrTldYWWVuVmM1QiszajlSdHVnQ0YyTkNSdVdkZWowalBpL243UExIRHdCZVY0bVIKM3VQVHJmamRJbFovSFgzQ2NjVE94TmlaajU4VitFdkRHNHNHOGxtRTRieStYRExIYTJyMWxmUk9sUVRMSyswVgpyTU93eU1VQ2dZRUF1dm14WGM4NWxZRW9hU0tkU0cvQk9kMWlYSUtmc2VDZHRNT2M1elJ0UXRsSDQwS0RscE54CmxYcXBjbVc3MWpyYzk1RzVKNmE1ZG5xTE9OSFZoWW8wUEpmSXhPU052RXI2MTE5NjRBMm5sZXRHYlk0M0twUkEKaHBPRHlmdkZoSllmK29kaUJpZFUyL3ZBMCtUczNSUHJzRzBSOUVDOEZqVDNaZVhaNTF1R0xPa0NnWUFpTmU0NwplQjRxWXdrNFRsMTZmZG5xQWpaQkpLR05xY2c1V1R3alpMSkp6R3owdCtuMkl4SFd2WUZFSjdqSkNmcHFsaDlqCmlDcjJQZVV3K09QTlNUTG1JcUgydzc5L1pQQnNKWXVsZHZ4RFdGVWFlRXg1aHpkNDdmZlNRRjZNK0NHQmthYnIKVzdzU3R5V000ZFdITHpDaGZMS20yWGJBd0VqNUQrbkN1WTRrZVFLQmdFSkRHb0puM1NCRXcra2xXTE85N09aOApnc3lYQm9mUW1lRktIS2NHNzFZUFhJbTRlV1kyUi9KOCt5anc5b1FJQ3o5NlRidkdSZEN5QlJhbWhoTmFGUzVyCk9MZUc0ejVENE4zdThUc0dNem9QcU13KzBGSXJiQ3FzTnpGWTg3ekZweEdVaXZvRWZLNE82YkdERTZjNHFqNGEKNmlmK0RSRSt1TWRMWTQyYTA3ekoKLS0tLS1FTkQgUFJJVkFURSBLRVktLS0tLQo= kind: Secret metadata: - creationTimestamp: "2020-09-29T15:51:22Z" + creationTimestamp: "2021-11-12T17:13:37Z" managedFields: - apiVersion: v1 fieldsType: FieldsV1 @@ -104,7 +113,7 @@ The instructions below explain how to set up NGINX as an ingress for the OIG dom f:type: {} manager: kubectl operation: Update - time: "2020-09-29T15:51:22Z" + time: "2021-11-12T17:13:37Z" name: governancedomain-tls-cert namespace: oigns resourceVersion: "1291036" @@ -121,7 +130,7 @@ Use helm to install NGINX. 1. Add the Helm chart repository for installing NGINX using the following command: - ``` + ```bash $ helm repo add stable https://kubernetes.github.io/ingress-nginx ``` @@ -133,7 +142,7 @@ Use helm to install NGINX. 1. Update the repository using the following command: - ``` + ```bash $ helm repo update ``` @@ -145,11 +154,11 @@ Use helm to install NGINX. Update Complete. Happy Helming! ``` -#### Create a Namespace +#### Create a namespace 1. Create a Kubernetes namespace for NGINX: - ``` + ```bash $ kubectl create namespace nginxssl ``` @@ -169,16 +178,18 @@ If you are using a Managed Service for your Kubernetes cluster, for example Orac a) Using NodePort - ``` - $ helm install nginx-ingress -n nginxssl --set controller.extraArgs.default-ssl-certificate=oigns/governancedomain-tls-cert --set controller.service.type=NodePort --set controller.admissionWebhooks.enabled=false stable/ingress-nginx --version=3.34.0 + ```bash + $ helm install nginx-ingress -n nginxssl --set controller.extraArgs.default-ssl-certificate=oigns/governancedomain-tls-cert --set controller.service.type=NodePort --set controller.admissionWebhooks.enabled=false stable/ingress-nginx ``` - + + **Note**: If using Kubernetes 1.18 then add `--version=3.34.0` to the end of command. + The output will look similar to the following: ``` - $ helm install nginx-ingress -n nginxssl --set controller.extraArgs.default-ssl-certificate=oigns/governancedomain-tls-cert --set controller.service.type=NodePort --set controller.admissionWebhooks.enabled=false stable/ingress-nginx --version=3.34.0 + $ helm install nginx-ingress -n nginxssl --set controller.extraArgs.default-ssl-certificate=oigns/governancedomain-tls-cert --set controller.service.type=NodePort --set controller.admissionWebhooks.enabled=false stable/ingress-nginx NAME: nginx-ingress - LAST DEPLOYED: Tue Sep 29 08:53:30 2020 + LAST DEPLOYED: Mon Nov 15 02:23:30 2021 NAMESPACE: nginxssl STATUS: deployed REVISION: 1 @@ -195,7 +206,7 @@ If you are using a Managed Service for your Kubernetes cluster, for example Orac An example Ingress that makes use of the controller: - apiVersion: extensions/v1beta1 + apiVersion: networking.k8s.io/v1 kind: Ingress metadata: annotations: @@ -203,11 +214,14 @@ If you are using a Managed Service for your Kubernetes cluster, for example Orac name: example namespace: foo spec: + ingressClassName: example-class rules: - host: www.example.com http: paths: - - backend: + - path: / + pathType: Prefix + backend: serviceName: exampleService servicePort: 80 path: / @@ -232,16 +246,17 @@ If you are using a Managed Service for your Kubernetes cluster, for example Orac b) Using LoadBalancer - ``` - $ helm install nginx-ingress -n nginxssl --set controller.extraArgs.default-ssl-certificate=oigns/governancedomain-tls-cert --set controller.service.type=LoadBalancer --set controller.admissionWebhooks.enabled=false stable/ingress-nginx --version=3.34.0 + ```bash + $ helm install nginx-ingress -n nginxssl --set controller.extraArgs.default-ssl-certificate=oigns/governancedomain-tls-cert --set controller.service.type=LoadBalancer --set controller.admissionWebhooks.enabled=false stable/ingress-nginx ``` - + + **Note**: If using Kubernetes 1.18 then add `--version=3.34.0` to the end of command. + The output will look similar to the following: ``` - $ helm install nginx-ingress -n nginxssl --set controller.extraArgs.default-ssl-certificate=oigns/governancedomain-tls-cert --set controller.service.type=LoadBalancer --set controller.admissionWebhooks.enabled=false stable/ingress-nginx --version=3.34.0 NAME: nginx-ingress - LAST DEPLOYED: Tue Sep 29 08:53:30 2020 + LAST DEPLOYED: Mon Nov 15 02:26:09 2021 NAMESPACE: nginxssl STATUS: deployed REVISION: 1 @@ -253,7 +268,7 @@ If you are using a Managed Service for your Kubernetes cluster, for example Orac An example Ingress that makes use of the controller: - apiVersion: extensions/v1beta1 + apiVersion: networking.k8s.io/v1 kind: Ingress metadata: annotations: @@ -265,10 +280,13 @@ If you are using a Managed Service for your Kubernetes cluster, for example Orac - host: www.example.com http: paths: - - backend: - serviceName: exampleService - servicePort: 80 - path: / + - path: / + pathType: Prefix + backend: + service: + name: exampleService + port: 80 + # This section is only required if TLS is to be enabled for the Ingress tls: - hosts: @@ -288,29 +306,19 @@ If you are using a Managed Service for your Kubernetes cluster, for example Orac type: kubernetes.io/tls ``` -#### Setup Routing Rules for the Domain +#### Setup routing rules for the domain 1. Setup routing rules by running the following commands: + ```bash + $ cd $WORKDIR/kubernetes/charts/ingress-per-domain ``` - $ cd /weblogic-kubernetes-operator/kubernetes/samples/charts/ingress-per-domain - $ cp values.yaml values.yaml.orig - $ vi values.yaml - ``` - -1. Edit `values.yaml` and ensure that the values `type=NGINX`, `tls=SSL` and `secretName=governancedomain-tls-cert` are set. Change the `domainUID` to the value of the domain e.g `governancedomain`, for example: - ``` - $ cat values.yaml - # Copyright 2020 Oracle Corporation and/or its affiliates. - # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + Edit `values.yaml` and change the `domainUID` parameter to match your `domainUID`, for example `domainUID: governancedomain`. Also change `sslType` to `SSL`. The file should look as follows: - - # Default values for ingress-per-domain. - # This is a YAML-formatted file. - # Declare variables to be passed into your templates. - - # Load balancer type. Supported values are: VOYAGER, NGINX + + ``` + # Load balancer type. Supported values are: TRAEFIK, NGINX type: NGINX # Type of Configuration Supported Values are : NONSSL,SSL # tls: NONSSL @@ -322,44 +330,37 @@ If you are using a Managed Service for your Kubernetes cluster, for example Orac # WLS domain as backend to the load balancer wlsDomain: domainUID: governancedomain - oimClusterName: oim_cluster + adminServerName: AdminServer + adminServerPort: 7001 soaClusterName: soa_cluster soaManagedServerPort: 8001 + oimClusterName: oim_cluster oimManagedServerPort: 14000 - adminServerName: adminserver - adminServerPort: 7001 - - # Voyager specific values - voyager: - # web port - webPort: 30305 - # stats port - statsPort: 30315 ``` -#### Create an Ingress for the Domain +#### Create an ingress for the domain 1. Create an Ingress for the domain (`governancedomain-nginx`), in the domain namespace by using the sample Helm chart: - ``` - $ cd /weblogic-kubernetes-operator - $ helm install governancedomain-nginx kubernetes/samples/charts/ingress-per-domain --namespace oigns --values kubernetes/samples/charts/ingress-per-domain/values.yaml + ```bash + $ cd $WORKDIR + $ helm install governancedomain-nginx kubernetes/charts/ingress-per-domain --namespace oigns --values kubernetes/charts/ingress-per-domain/values.yaml ``` - **Note**: The `/weblogic-kubernetes-operator/kubernetes/samples/charts/ingress-per-domain/templates/nginx-ingress.yaml` has `nginx.ingress.kubernetes.io/enable-access-log` set to `false`. If you want to enable access logs then set this value to `true` before executing the command. Enabling access-logs can cause issues with disk space if not regularly maintained. + **Note**: The `$WORKDIR/kubernetes/charts/ingress-per-domain/templates/nginx-ingress-k8s1.19.yaml and nginx-ingress.yaml` has `nginx.ingress.kubernetes.io/enable-access-log` set to `false`. If you want to enable access logs then set this value to `true` before executing the command. Enabling access-logs can cause issues with disk space if not regularly maintained. For example: - ``` - $ cd /scratch/OIGDockerK8S/weblogic-kubernetes-operator - $ helm install governancedomain-nginx kubernetes/samples/charts/ingress-per-domain --namespace oigns --values kubernetes/samples/charts/ingress-per-domain/values.yaml + ```bash + $ cd $WORKDIR + $ helm install governancedomain-nginx kubernetes/charts/ingress-per-domain --namespace oigns --values kubernetes/charts/ingress-per-domain/values.yaml ``` The output will look similar to the following: ``` NAME: governancedomain-nginx - LAST DEPLOYED: Tue Sep 29 08:56:38 2020 + LAST DEPLOYED: Mon Nov 15 02:35:05 2021 NAMESPACE: oigns STATUS: deployed REVISION: 1 @@ -368,145 +369,139 @@ If you are using a Managed Service for your Kubernetes cluster, for example Orac 1. Run the following command to show the ingress is created successfully: - ``` + ```bash $ kubectl get ing -n ``` For example: - ``` + ```bash $ kubectl get ing -n oigns ``` The output will look similar to the following: ``` - NAME CLASS HOSTS ADDRESS PORTS AGE - governancedomain-nginx * 80 49s + NAME CLASS HOSTS ADDRESS PORTS AGE + governancedomain-nginx * x.x.x.x 80 49s ``` 1. Find the node port of NGINX using the following command: - ``` + ```bash $ kubectl get services -n nginxssl -o jsonpath="{.spec.ports[1].nodePort}" nginx-ingress-ingress-nginx-controller ``` The output will look similar to the following: ``` - 32033$ + 32033 ``` 1. Run the following command to check the ingress: - ``` + ```bash $ kubectl describe ing governancedomain-nginx -n ``` For example: - ``` + ```bash $ kubectl describe ing governancedomain-nginx -n oigns ``` The output will look similar to the following: ``` - Name: governancedomain-nginx Namespace: oigns - Address: 10.103.131.225 + Address: 10.96.160.58 Default backend: default-http-backend:80 () Rules: Host Path Backends ---- ---- -------- * - /console governancedomain-adminserver:7001 (10.244.1.42:7001) - /em governancedomain-adminserver:7001 (10.244.1.42:7001) - /soa governancedomain-cluster-soa-cluster:8001 (10.244.1.43:8001) - /integration governancedomain-cluster-soa-cluster:8001 (10.244.1.43:8001) - /soa-infra governancedomain-cluster-soa-cluster:8001 (10.244.1.43:8001) - governancedomain-cluster-oim-cluster:14000 (10.244.1.44:14000) + /console governancedomain-adminserver:7001 (10.244.2.96:7001) + /em governancedomain-adminserver:7001 (10.244.2.96:7001) + /soa governancedomain-cluster-soa-cluster:8001 (10.244.2.97:8001) + /integration governancedomain-cluster-soa-cluster:8001 (10.244.2.97:8001) + /soa-infra governancedomain-cluster-soa-cluster:8001 (10.244.2.97:8001) + /identity governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) + /admin governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) + /oim governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) + /sysadmin governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) + /workflowservice governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) + /xlWebApp governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) + /Nexaweb governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) + /callbackResponseService governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) + /spml-xsd governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) + /HTTPClnt governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) + /reqsvc governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) + /iam governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) + /provisioning-callback governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) + /CertificationCallbackService governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) + /ucs governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) + /FacadeWebApp governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) + /OIGUI governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) + /weblogic governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) Annotations: kubernetes.io/ingress.class: nginx meta.helm.sh/release-name: governancedomain-nginx meta.helm.sh/release-namespace: oigns + nginx.ingress.kubernetes.io/affinity: cookie nginx.ingress.kubernetes.io/configuration-snippet: more_set_input_headers "X-Forwarded-Proto: https"; more_set_input_headers "WL-Proxy-SSL: true"; + nginx.ingress.kubernetes.io/enable-access-log: false nginx.ingress.kubernetes.io/ingress.allow-http: false nginx.ingress.kubernetes.io/proxy-buffer-size: 2000k Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal CREATE 5m4s nginx-ingress-controller Ingress oigns/governancedomain-nginx - Normal UPDATE 4m9s nginx-ingress-controller Ingress oigns/governancedomain-nginx + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Sync 17s (x2 over 28s) nginx-ingress-controller Scheduled for sync ``` -1. To confirm that the new Ingress is successfully routing to the domain's server pods, run the following command to send a request to the URL for the "WebLogic ReadyApp framework": +1. To confirm that the new Ingress is successfully routing to the domain's server pods, run the following command to send a request to the URL for the `WebLogic ReadyApp framework`: - ``` + ```bash $ curl -v -k https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/weblogic/ready ``` For example: - ``` + ```bash $ curl -v -k https://masternode.example.com:32033/weblogic/ready ``` The output will look similar to the following: ``` - $ curl -v https://masternode.example.com:32033/weblogic/ready - * About to connect() to 12.345.678.9 port 32033 (#0) - * Trying 12.345.678.9... - * Connected to 12.345.678.9 (12.345.678.9) port 32033 (#0) + $ curl -v -k https://masternode.example.com:32033/weblogic/ready + * About to connect() to X.X.X.X port 32033 (#0) + * Trying X.X.X.X... + * Connected to masternode.example.com (X.X.X.X) port 32033 (#0) * Initializing NSS with certpath: sql:/etc/pki/nssdb * skipping SSL peer certificate verification * SSL connection using TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 * Server certificate: * subject: CN=masternode.example.com - * start date: Sep 29 14:52:35 2020 GMT - * expire date: Sep 29 14:52:35 2021 GMT + * start date: Nov 10 13:05:21 2021 GMT + * expire date: Nov 10 13:05:21 2022 GMT * common name: masternode.example.com * issuer: CN=masternode.example.com > GET /weblogic/ready HTTP/1.1 > User-Agent: curl/7.29.0 - > Host: 12.345.678.9:32033 + > Host: X.X.X.X:32033 > Accept: */* > < HTTP/1.1 200 OK < Server: nginx/1.19.1 - < Date: Tue, 29 Sep 2020 16:10:10 GMT + < Date: Mon, 15 Nov 2021 10:49:21 GMT < Content-Length: 0 < Connection: keep-alive < Strict-Transport-Security: max-age=15724800; includeSubDomains < - * Connection #0 to host 12.345.678.9 left intact + * Connection #0 to host X.X.X.X left intact ``` -#### Verify that You can Access the Domain URL - -After setting up the NGINX ingress, verify that the domain applications are accessible through the NGINX ingress port (for example 32033) as per [Validate Domain URLs ]({{< relref "/oig/validate-domain-urls" >}}) - -#### Cleanup - -If you need to remove the NGINX Ingress then remove the ingress with the following commands: - -``` -$ helm delete governancedomain-nginx -n oigns -$ helm delete nginx-ingress -n nginxssl -$ kubectl delete namespace nginxssl -``` - -The output will look similar to the following: - -``` -$ helm delete governancedomain-nginx -n oigns -release "governancedomain-nginx" uninstalled - -$ helm delete nginx-ingress -n nginxssl -release "nginx-ingress" uninstalled +#### Verify that you can access the domain URL -$ kubectl delete namespace nginxssl -namespace "nginxssl" deleted -``` \ No newline at end of file +After setting up the NGINX ingress, verify that the domain applications are accessible through the NGINX ingress port (for example 32033) as per [Validate Domain URLs ]({{< relref "/oig/validate-domain-urls" >}}) \ No newline at end of file diff --git a/docs-source/content/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-K8S.md b/docs-source/content/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-K8S.md index 93c0b6c71..c8ffb4bc1 100644 --- a/docs-source/content/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-K8S.md +++ b/docs-source/content/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-K8S.md @@ -3,20 +3,24 @@ title: "a. Using an Ingress with NGINX (non-SSL)" description: "Steps to set up an Ingress for NGINX to direct traffic to the OIG domain (non-SSL)." --- -### Setting Up an ingress for NGINX for the OIG Domain on Kubernetes (non-SSL) +### Setting up an ingress for NGINX for the OIG domain on Kubernetes (non-SSL) The instructions below explain how to set up NGINX as an ingress for the OIG domain with non-SSL termination. **Note**: All the steps below should be performed on the **master** node. 1. [Install NGINX](#install-nginx) - 1. [Configure the repository](#configure-the-repository) - 1. [Create a Namespace](#create-a-namespace) - 1. [Install NGINX using helm](#install-nginx-using-helm) - 1. [Setup Routing Rules for the Domain](#setup-routing-rules-for-the-domain) -1. [Create an Ingress for the Domain](#create-an-ingress-for-the-domain) -1. [Verify that You can Access the Domain URL](#verify-that-you-can-access-the-domain-url) -1. [Cleanup](#cleanup) + + a. [Configure the repository](#configure-the-repository) + + b. [Create a namespace](#create-a-namespace) + + c. [Install NGINX using helm](#install-nginx-using-helm) + + d. [Setup routing rules for the domain](#setup-routing-rules-for-the-domain) + +1. [Create an ingress for the domain](#create-an-ingress-for-the-domain) +1. [Verify that you can access the domain URL](#verify-that-you-can-access-the-domain-url) ### Install NGINX @@ -33,7 +37,6 @@ Use helm to install NGINX. The output will look similar to the following: ``` - $ helm repo add stable https://kubernetes.github.io/ingress-nginx "stable" has been added to your repositories ``` @@ -45,17 +48,17 @@ Use helm to install NGINX. The output will look similar to the following: - ```bash + ``` Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the "stable" chart repository Update Complete. Happy Helming! ``` -#### Create a Namespace +#### Create a namespace 1. Create a Kubernetes namespace for NGINX by running the following command: - ``` + ```bash $ kubectl create namespace nginx ``` @@ -75,24 +78,26 @@ If you are using a Managed Service for your Kubernetes cluster,for example Oracl a) Using NodePort + ```bash + $ helm install nginx-ingress -n nginx --set controller.service.type=NodePort --set controller.admissionWebhooks.enabled=false stable/ingress-nginx ``` - $ helm install nginx-ingress -n nginx --set controller.service.type=NodePort --set controller.admissionWebhooks.enabled=false stable/ingress-nginx --version=3.34.0 - ``` + + **Note**: If using Kubernetes 1.18 then add `--version=3.34.0` to the end of command. The output will look similar to the following: ``` NAME: nginx-ingress - LAST DEPLOYED: Tue Sep 29 08:07:03 2020 + LAST DEPLOYED: Fri Nov 12 07:55:04 2021 NAMESPACE: nginx STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: - The nginx-ingress controller has been installed. + The ingress-nginx controller has been installed. Get the application URL by running these commands: - export HTTP_NODE_PORT=$(kubectl --namespace nginx get services -o jsonpath="{.spec.ports[0].nodePort}" nginx-ingress-controller) - export HTTPS_NODE_PORT=$(kubectl --namespace nginx get services -o jsonpath="{.spec.ports[1].nodePort}" nginx-ingress-controller) + export HTTP_NODE_PORT=$(kubectl --namespace nginx get services -o jsonpath="{.spec.ports[0].nodePort}" nginx-ingress-ingress-nginx-controller) + export HTTPS_NODE_PORT=$(kubectl --namespace nginx get services -o jsonpath="{.spec.ports[1].nodePort}" nginx-ingress-ingress-nginx-controller) export NODE_IP=$(kubectl --namespace nginx get nodes -o jsonpath="{.items[0].status.addresses[1].address}") echo "Visit http://$NODE_IP:$HTTP_NODE_PORT to access your application via HTTP." @@ -100,7 +105,7 @@ If you are using a Managed Service for your Kubernetes cluster,for example Oracl An example Ingress that makes use of the controller: - apiVersion: extensions/v1beta1 + apiVersion: networking.k8s.io/v1 kind: Ingress metadata: annotations: @@ -108,19 +113,22 @@ If you are using a Managed Service for your Kubernetes cluster,for example Oracl name: example namespace: foo spec: + ingressClassName: example-class rules: - host: www.example.com http: paths: - - backend: - serviceName: exampleService - servicePort: 80 - path: / + - path: / + pathType: Prefix + backend: + service: + name: exampleService + port: 80 # This section is only required if TLS is to be enabled for the Ingress tls: - - hosts: - - www.example.com - secretName: example-tls + - hosts: + - www.example.com + secretName: example-tls If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: @@ -137,15 +145,17 @@ If you are using a Managed Service for your Kubernetes cluster,for example Oracl b) Using LoadBalancer + ```bash + $ helm install nginx-ingress -n nginx --set controller.service.type=LoadBalancer --set controller.admissionWebhooks.enabled=false stable/ingress-nginx ``` - $ helm install nginx-ingress -n nginx --set controller.service.type=LoadBalancer --set controller.admissionWebhooks.enabled=false stable/ingress-nginx --version=3.34.0 - ``` + + **Note**: If using Kubernetes 1.18 then add `--version=3.34.0` to the end of command. The output will look similar to the following: ``` NAME: nginx-ingress - LAST DEPLOYED: Tue Sep 29 08:07:03 2020 + LAST DEPLOYED: Fri Nov 12 07:59:17 2021 NAMESPACE: nginx STATUS: deployed REVISION: 1 @@ -157,7 +167,7 @@ If you are using a Managed Service for your Kubernetes cluster,for example Oracl An example Ingress that makes use of the controller: - apiVersion: extensions/v1beta1 + apiVersion: networking.k8s.io/v1 kind: Ingress metadata: annotations: @@ -165,19 +175,22 @@ If you are using a Managed Service for your Kubernetes cluster,for example Oracl name: example namespace: foo spec: + ingressClassName: example-class rules: - host: www.example.com http: paths: - - backend: - serviceName: exampleService - servicePort: 80 - path: / + - path: / + pathType: Prefix + backend: + service: + name: exampleService + port: 80 # This section is only required if TLS is to be enabled for the Ingress tls: - - hosts: - - www.example.com - secretName: example-tls + - hosts: + - www.example.com + secretName: example-tls If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: @@ -192,80 +205,62 @@ If you are using a Managed Service for your Kubernetes cluster,for example Oracl type: kubernetes.io/tls ``` -#### Setup Routing Rules for the Domain +#### Setup routing rules for the domain 1. Setup routing rules by running the following commands: + ```bash + $ cd $WORKDIR/kubernetes/charts/ingress-per-domain ``` - $ cd /weblogic-kubernetes-operator/kubernetes/samples/charts/ingress-per-domain - $ cp values.yaml values.yaml.orig - $ vi values.yaml - ``` - - Edit `values.yaml` and ensure that `type=NGINX` and `tls=NONSSL` are set. Change the `domainUID` to the value of the domain e.g `governancedomain`, for example: + + Edit `values.yaml` and change the `domainUID` parameter to match your `domainUID`, for example `domainUID: governancedomain`. Also change `sslType` to `NONSSL`. The file should look as follows: ``` - $ cat values.yaml - # Copyright 2020 Oracle Corporation and/or its affiliates. - # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. - + # Load balancer type. Supported values are: TRAEFIK, NGINX + type: NGINX - # Default values for ingress-per-domain. - # This is a YAML-formatted file. - # Declare variables to be passed into your templates. + # Type of Configuration Supported Values are : NONSSL, SSL + sslType: NONSSL - # Load balancer type. Supported values are: VOYAGER, NGINX - type: NGINX - # Type of Configuration Supported Values are : NONSSL,SSL - # tls: NONSSL - tls: NONSSL # TLS secret name if the mode is SSL secretName: domain1-tls-cert - - # WLS domain as backend to the load balancer + #WLS domain as backend to the load balancer wlsDomain: domainUID: governancedomain - oimClusterName: oim_cluster + adminServerName: AdminServer + adminServerPort: 7001 soaClusterName: soa_cluster soaManagedServerPort: 8001 + oimClusterName: oim_cluster oimManagedServerPort: 14000 - adminServerName: adminserver - adminServerPort: 7001 - - # Voyager specific values - voyager: - # web port - webPort: 30305 - # stats port - statsPort: 30315 ``` -### Create an Ingress for the Domain +### Create an ingress for the domain 1. Create an Ingress for the domain (`governancedomain-nginx`), in the domain namespace by using the sample Helm chart: - ``` - $ cd /weblogic-kubernetes-operator - $ helm install governancedomain-nginx kubernetes/samples/charts/ingress-per-domain --namespace --values kubernetes/samples/charts/ingress-per-domain/values.yaml + ```bash + $ cd $WORKDIR + $ helm install governancedomain-nginx kubernetes/charts/ingress-per-domain --namespace --values kubernetes/charts/ingress-per-domain/values.yaml ``` - **Note**: The `/weblogic-kubernetes-operator/kubernetes/samples/charts/ingress-per-domain/templates/nginx-ingress.yaml` has `nginx.ingress.kubernetes.io/enable-access-log` set to `false`. If you want to enable access logs then set this value to `true` before executing the command. Enabling access-logs can cause issues with disk space if not regularly maintained. + **Note**: The `/samples/kubernetes/charts/ingress-per-domain/templates//nginx-ingress-k8s1.19.yaml and nginx-ingress.yaml` has `nginx.ingress.kubernetes.io/enable-access-log` set to `false`. If you want to enable access logs then set this value to `true` before executing the command. Enabling access-logs can cause issues with disk space if not regularly maintained. For example: - ``` - $ cd /scratch/OIGDockerK8S/weblogic-kubernetes-operator - $ helm install governancedomain-nginx kubernetes/samples/charts/ingress-per-domain --namespace oigns --values kubernetes/samples/charts/ingress-per-domain/values.yaml + ```bash + $ cd $WORKDIR + $ helm install governancedomain-nginx kubernetes/charts/ingress-per-domain --namespace oigns --values kubernetes/charts/ingress-per-domain/values.yaml ``` The output will look similar to the following: ``` - $ helm install governancedomain-nginx kubernetes/samples/charts/ingress-per-domain --namespace oigns --values kubernetes/samples/charts/ingress-per-domain/values.yaml + $ helm install governancedomain-nginx kubernetes/charts/ingress-per-domain --namespace oigns --values kubernetes/charts/ingress-per-domain/values.yaml NAME: governancedomain-nginx - LAST DEPLOYED: Tue Sep 29 08:10:06 2020 + LAST DEPLOYED: Fri Nov 12 08:14:53 2021 NAMESPACE: oigns STATUS: deployed REVISION: 1 @@ -274,13 +269,13 @@ If you are using a Managed Service for your Kubernetes cluster,for example Oracl 1. Run the following command to show the ingress is created successfully: - ``` + ```bash $ kubectl get ing -n ``` For example: - ``` + ```bash $ kubectl get ing -n oigns ``` @@ -293,25 +288,25 @@ If you are using a Managed Service for your Kubernetes cluster,for example Oracl 1. Find the NodePort of NGINX using the following command (only if you installed NGINX using NodePort): - ``` + ```bash $ kubectl get services -n nginx -o jsonpath=”{.spec.ports[0].nodePort}” nginx-ingress-ingress-nginx-controller ``` The output will look similar to the following: ``` - 31578$ + 31530 ``` 1. Run the following command to check the ingress: - ``` - $ kubectl describe ing access-ingress -n + ```bash + $ kubectl describe ing governancedomain-ingress -n ``` For example: - ``` + ```bash $ kubectl describe ing governancedomain-nginx -n oigns ``` @@ -320,30 +315,49 @@ If you are using a Managed Service for your Kubernetes cluster,for example Oracl ``` Name: governancedomain-nginx Namespace: oigns - Address: 10.97.68.171 + Address: Default backend: default-http-backend:80 () Rules: Host Path Backends ---- ---- -------- * - /console governancedomain-adminserver:7001 (10.244.1.42:7001) - /em governancedomain-adminserver:7001 (10.244.1.42:7001) - /soa governancedomain-cluster-soa-cluster:8001 (10.244.1.43:8001) - /integration governancedomain-cluster-soa-cluster:8001 (10.244.1.43:8001) - /soa-infra governancedomain-cluster-soa-cluster:8001 (10.244.1.43:8001) - governancedomain-cluster-oim-cluster:14000 (10.244.1.44:14000) - Annotations: meta.helm.sh/release-name: governancedomain-nginx + /console governancedomain-adminserver:7001 (10.244.2.59:7001) + /em governancedomain-adminserver:7001 (10.244.2.59:7001) + /soa governancedomain-cluster-soa-cluster:8001 (10.244.2.60:8001) + /integration governancedomain-cluster-soa-cluster:8001 (10.244.2.60:8001) + /soa-infra governancedomain-cluster-soa-cluster:8001 (10.244.2.60:8001) + /identity governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) + /admin governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) + /oim governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) + /sysadmin governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) + /workflowservice governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) + /xlWebApp governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) + /Nexaweb governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) + /callbackResponseService governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) + /spml-xsd governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) + /HTTPClnt governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) + /reqsvc governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) + /iam governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) + /provisioning-callback governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) + /CertificationCallbackService governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) + /ucs governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) + /FacadeWebApp governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) + /OIGUI governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) + /weblogic governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) + Annotations: kubernetes.io/ingress.class: nginx + meta.helm.sh/release-name: governancedomain-nginx meta.helm.sh/release-namespace: oigns + nginx.ingress.kubernetes.io/affinity: cookie + nginx.ingress.kubernetes.io/enable-access-log: false Events: Type Reason Age From Message ---- ------ ---- ---- ------- - Normal CREATE 53s nginx-ingress-controller Ingress oigns/governancedomain-nginx - Normal UPDATE 42s nginx-ingress-controller Ingress oigns/governancedomain-nginx + Normal Sync 35s nginx-ingress-controller Scheduled for sync ``` -1. To confirm that the new Ingress is successfully routing to the domain's server pods, run the following command to send a request to the URL for the "WebLogic ReadyApp framework": +1. To confirm that the new ingress is successfully routing to the domain's server pods, run the following command to send a request to the URL for the `WebLogic ReadyApp framework`: - ``` + ```bash $ curl -v http://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/weblogic/ready ``` @@ -351,63 +365,38 @@ If you are using a Managed Service for your Kubernetes cluster,for example Oracl a) For NodePort - ``` - $ curl -v http://masternode.example.com:31578/weblogic/ready + ```bash + $ curl -v http://masternode.example.com:31530/weblogic/ready ``` b) For LoadBalancer - ``` + ```bash $ curl -v http://masternode.example.com:80/weblogic/ready ``` The output will look similar to the following: ``` - $ curl -v -k http://masternode.example.com:31578/weblogic/ready - * About to connect() to masternode.example.com port 31578 (#0) - * Trying 12.345.67.890... - * Connected to masternode.example.com (12.345.67.890) port 31578 (#0) + $ curl -v http://masternode.example.com:31530/weblogic/ready + * About to connect() to masternode.example.com port 31530 (#0) + * Trying X.X.X.X... + * Connected to masternode.example.com (X.X.X.X) port 31530 (#0) > GET /weblogic/ready HTTP/1.1 > User-Agent: curl/7.29.0 - > Host: masternode.example.com:31578 + > Host: masternode.example.com:31530 > Accept: */* > < HTTP/1.1 200 OK < Server: nginx/1.19.2 - < Date: Tue, 29 Sep 2020 15:16:20 GMT + < Date: Fri Nov 12 08:10:17 2021 < Content-Length: 0 < Connection: keep-alive < * Connection #0 to host masternode.example.com left intact ``` -### Verify that You can Access the Domain URL - -After setting up the NGINX ingress, verify that the domain applications are accessible through the NGINX ingress port (for example 31578) as per [Validate Domain URLs ]({{< relref "/oig/validate-domain-urls" >}}) - -### Cleanup - -If you need to remove the NGINX Ingress (for example to setup NGINX with SSL) then remove the ingress with the following commands: - -``` -$ helm delete governancedomain-nginx -n oigns - -$ helm delete nginx-ingress -n nginx - -$ kubectl delete namespace nginx - -``` - -The output will look similar to the following: - -``` -$ helm delete governancedomain-nginx -n oigns -release "governancedomain-nginx" uninstalled +### Verify that you can access the domain URL -$ helm delete nginx-ingress -n nginx -release "nginx-ingress" uninstalled +After setting up the NGINX ingress, verify that the domain applications are accessible through the NGINX ingress port (for example 31530) as per [Validate Domain URLs ]({{< relref "/oig/validate-domain-urls" >}}) -$ kubectl delete namespace nginx -namespace "nginx" deleted -``` \ No newline at end of file diff --git a/docs-source/content/oig/create-oig-domains/_index.md b/docs-source/content/oig/create-oig-domains/_index.md index 1d2f4a2c8..7ec89aeb3 100644 --- a/docs-source/content/oig/create-oig-domains/_index.md +++ b/docs-source/content/oig/create-oig-domains/_index.md @@ -8,17 +8,27 @@ description = "Sample for creating an OIG domain home on an existing PV or PVC, 1. [Introduction](#introduction) 1. [Prerequisites](#prerequisites) -1. [Prepare the Create Domain Script](#prepare-the-create-domain-script) - 1. [Edit Configuration Parameters](#edit-configuration-parameters) -1. [Run the Create Domain Script](#run-the-create-domain-script) - 1. [Generate the Create Domain Script](#generate-the-create-domain-script) - 1. [Create Docker Registry Secret](#create-docker-registry-secret) - 1. [Setting the OIM Server Memory Parameters](#setting-the-oim-server-memory-parameters) - 1. [Run the Create Domain Scripts](#run-the-create-domain-scripts) -1. [Verify the Results](#verify-the-results) - 1. [Verify the Domain, Pods and Services](#verify-the-domain-pods-and-services) - 1. [Verify the Domain](#verify-the-domain) - 1. [Verify the Pods](#verify-the-pods) +1. [Prepare the create domain script](#prepare-the-create-domain-script) + + a. [Create Docker registry secret](#create-docker-registry-secret) + + b. [Edit configuration parameters](#edit-configuration-parameters) + +1. [Run the create domain script](#run-the-create-domain-script) + + a. [Generate the create domain script](#generate-the-create-domain-script) + + b. [Setting the OIM server memory parameters](#setting-the-oim-server-memory-parameters) + + c. [Run the create domain scripts](#run-the-create-domain-scripts) + +1. [Verify the results](#verify-the-results) + + a. [Verify the domain, pods and services](#verify-the-domain-pods-and-services) + + b. [Verify the domain](#verify-the-domain) + + c. [Verify the pods](#verify-the-pods) ### Introduction @@ -32,12 +42,11 @@ Before you begin, perform the following steps: 1. Ensure that you have executed all the preliminary steps documented in [Prepare your environment]({{< relref "/oig/prepare-your-environment" >}}). 1. Ensure that the database is up and running. -#### Create Docker Registry Secret +#### Create Docker registry secret This section should only be followed if you are using a registry to store your container images and have not downloaded the container image to the master and worker nodes. -1. Create a Docker Registry Secret with name `oig-docker`. The operator validates the presence of this secret. The OIG image has been manually loaded in [Install the OIG Docker Image]({{< relref "/oig/prepare-your-environment#install-the-oig-docker-image" >}}) so you can run this command as is. The presence of the secret is sufficient for creating the Kubernetes resource in the next step. - +1. Create a Docker Registry Secret with name `oig-docker`: ```bash $ kubectl create secret docker-registry oig-docker -n --docker-username='' --docker-password='' --docker-server='' --docker-email='' @@ -46,52 +55,42 @@ This section should only be followed if you are using a registry to store your c For example: ```bash - $ kubectl create secret docker-registry oig-docker -n oigns --docker-username='' --docker-password='' --docker-server='' --docker-email='' + $ kubectl create secret docker-registry oig-docker -n oigns --docker-username='user1' --docker-password='' --docker-server='https://registry.example.com' --docker-email='user1@example.com' ``` - **Note**: The above command should be run as described. Do not change anything other than the ``. - - - The output will look similar to the following: - ```bash + ``` secret/oig-docker created ``` -### Prepare the Create Domain Script +### Prepare the create domain script -The sample scripts for Oracle Identity Governance domain deployment are available at `/kubernetes/samples/scripts/create-oim-domain`. +The sample scripts for Oracle Identity Governance domain deployment are available at `$WORKDIR/kubernetes/create-oim-domain`. 1. Make a copy of the `create-domain-inputs.yaml` file: ```bash - $ cd /weblogic-kubernetes-operator/kubernetes/samples/scripts/create-oim-domain/domain-home-on-pv + $ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv $ cp create-domain-inputs.yaml create-domain-inputs.yaml.orig ``` - - For example: - - ```bash - $ cd /scratch/OIGDockerK8S/weblogic-kubernetes-operator/kubernetes/samples/scripts/create-oim-domain/domain-home-on-pv - $ cp create-domain-inputs.yaml create-domain-inputs.yaml.orig - ``` You must edit `create-domain-inputs.yaml` (or a copy of it) to provide the details for your domain. Please refer to the configuration parameters below to understand the information that you must provide in this file. -#### Edit Configuration Parameters +#### Edit configuration parameters 1. Edit the `create-domain-inputs.yaml` and modify the following parameters. Save the file when complete: - ```bash + ``` domainUID: domainHome: /u01/oracle/user_projects/domains/ image: - namespace: + imagePullSecretName: weblogicCredentialsSecretName: - persistentVolumeClaimName: logHome: /u01/oracle/user_projects/domains/logs/ + namespace: + persistentVolumeClaimName: rcuSchemaPrefix: rcuDatabaseURL: :/ rcuCredentialsSecret: @@ -101,14 +100,15 @@ The sample scripts for Oracle Identity Governance domain deployment are availabl For example: - ```bash + ``` domainUID: governancedomain domainHome: /u01/oracle/user_projects/domains/governancedomain - image: oracle/oig:12.2.1.4.0 - namespace: oigns + image: oracle/oig:12.2.1.4.0-8-ol7-211022.0723 + imagePullSecretName: oig-docker weblogicCredentialsSecretName: oig-domain-credentials - persistentVolumeClaimName: governancedomain-domain-pvc logHome: /u01/oracle/user_projects/domains/logs/governancedomain + namespace: oigns + persistentVolumeClaimName: governancedomain-domain-pvc rcuSchemaPrefix: OIGK8S rcuDatabaseURL: mydatabasehost.example.com:1521/orcl.example.com rcuCredentialsSecret: oig-rcu-credentials @@ -116,9 +116,9 @@ The sample scripts for Oracle Identity Governance domain deployment are availabl frontEndPort: 14100 ``` - **Note**: `frontEndHost` and should be set to the entry point host and port for OIM. This can be changed later in [Set OIMFrontendURL using MBeans](../post-install-config). + **Note**: `frontEndHost` and `front_end_port` should be set to the entry point host and port for OIM. This can be changed later in [Set OIMFrontendURL using MBeans](../post-install-config). - **Note**: If using a container registry for your container images then you need to set `image` to the repository image name and `imagePullSecretName` to the name of the secret created earlier e.g: `oig-docker`. + **Note**: If using a container registry for your container images then you need to set `image` to the repository image name and `imagePullSecretName` to the name of the secret created earlier e.g: `oig-docker`. If not using a docker registry to pull docker images, comment out `imagePullSecretName: `. A full list of parameters in the `create-domain-inputs.yaml` file are shown below: @@ -172,15 +172,15 @@ The sample demonstrates how to create an OIG domain home and associated Kubernet that has one cluster only. In addition, the sample provides the capability for users to supply their own scripts to create the domain home for other use cases. The generated domain YAML file could also be modified to cover more use cases. -### Run the Create Domain Script +### Run the create domain script -#### Generate the Create Domain Script +#### Generate the create domain script 1. Run the create domain script, specifying your inputs file and an output directory to store the generated artifacts: ```bash - $ cd /weblogic-kubernetes-operator/kubernetes/samples/scripts/create-oim-domain/domain-home-on-pv + $ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv $ mkdir output $ ./create-domain.sh -i create-domain-inputs.yaml -o / ``` @@ -188,15 +188,14 @@ generated artifacts: For example: ```bash - $ cd /scratch/OIGDockerK8S/weblogic-kubernetes-operator/kubernetes/samples/scripts/create-oim-domain/domain-home-on-pv + $ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv $ mkdir output $ ./create-domain.sh -i create-domain-inputs.yaml -o output ``` The output will look similar to the following: - ```bash - $ ./create-domain.sh -i create-domain-inputs.yaml -o output + ``` Input parameters being used export version="create-weblogic-sample-domain-inputs-v1" export adminPort="7001" @@ -209,7 +208,7 @@ generated artifacts: export initialManagedServerReplicas="1" export managedServerNameBase="oim_server" export managedServerPort="14000" - export image="oracle/oig:12.2.1.4.0" + export image="oracle/oig:12.2.1.4.0-8-ol7-211022.0723" export imagePullPolicy="IfNotPresent" export imagePullSecretName="oig-docker" export productionModeEnabled="true" @@ -220,7 +219,7 @@ generated artifacts: export exposeAdminT3Channel="false" export adminNodePort="30701" export exposeAdminNodePort="false" - export namespace="governancedomain" + export namespace="oigns" javaOptions=-Dweblogic.StdoutDebugEnabled=false export persistentVolumeClaimName="governancedomain-domain-pvc" export domainPVMountPath="/u01/oracle/user_projects/domains" @@ -228,10 +227,11 @@ generated artifacts: export createDomainScriptName="create-domain-job.sh" export createDomainFilesDir="wlst" export rcuSchemaPrefix="OIGK8S" - export rcuDatabaseURL="mydatabasehost.example.com:1521/orcl.example.com" + export rcuDatabaseURL="slc12cpn.us.oracle.com:1521/orcl.us.oracle.com" export rcuCredentialsSecret="oig-rcu-credentials" - export frontEndHost="100.102.48.49" - export frontEndPort="80" + export frontEndHost="masternode.example.com" + export frontEndPort="14100" + Generating output/weblogic-domains/governancedomain/create-domain-job.yaml Generating output/weblogic-domains/governancedomain/delete-domain-job.yaml @@ -246,27 +246,27 @@ generated artifacts: job.batch/governancedomain-create-fmw-infra-sample-domain-job created Waiting for the job to complete... status on iteration 1 of 40 - pod governancedomain-create-fmw-infra-sample-domain-job-dktkk status is Running + pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running status on iteration 2 of 40 - pod governancedomain-create-fmw-infra-sample-domain-job-dktkk status is Running + pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running status on iteration 3 of 40 - pod governancedomain-create-fmw-infra-sample-domain-job-dktkk status is Running + pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running status on iteration 4 of 40 - pod governancedomain-create-fmw-infra-sample-domain-job-dktkk status is Running + pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running status on iteration 5 of 40 - pod governancedomain-create-fmw-infra-sample-domain-job-dktkk status is Running + pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running status on iteration 6 of 40 - pod governancedomain-create-fmw-infra-sample-domain-job-dktkk status is Running + pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running status on iteration 7 of 40 - pod governancedomain-create-fmw-infra-sample-domain-job-dktkk status is Running + pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running status on iteration 8 of 40 - pod governancedomain-create-fmw-infra-sample-domain-job-dktkk status is Running + pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running status on iteration 9 of 40 - pod governancedomain-create-fmw-infra-sample-domain-job-dktkk status is Running + pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running status on iteration 10 of 40 - pod governancedomain-create-fmw-infra-sample-domain-job-dktkk status is Running + pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running status on iteration 11 of 40 - pod governancedomain-create-fmw-infra-sample-domain-job-dktkk status is Completed + pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Completed Domain governancedomain was created and will be started by the WebLogic Kubernetes Operator @@ -282,26 +282,20 @@ generated artifacts: **Note**: If the create domain script creation fails, refer to the [Troubleshooting](../troubleshooting) section. -#### Setting the OIM Server Memory Parameters +#### Setting the OIM server memory parameters 1. Navigate to the `output` directory: ```bash - $ cd /weblogic-kubernetes-operator/kubernetes/samples/scripts/create-oim-domain/domain-home-on-pv/output/weblogic-domains/governancedomain + $ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv/output/weblogic-domains/governancedomain ``` - For example: - - ```bash - $ cd /scratch/OIGDockerK8S/weblogic-kubernetes-operator/kubernetes/samples/scripts/create-oim-domain/domain-home-on-pv/output/weblogic-domains/governancedomain - ``` - 1. Edit the `domain_oim_soa.yaml` and locate the section of the file starting with: `- clusterName: oim_cluster`. Immediately after the line: `topologyKey: "kubernetes.io/hostname"`, add the following lines: ``` env: - - name: USER_MEM_ARGS - value: "-Djava.security.egd=file:/dev/./urandom -Xms2408m -Xmx8192m" + - name: USER_MEM_ARGS + value: "-Djava.security.egd=file:/dev/./urandom -Xms2408m -Xmx8192m" ``` The file should looks as follows: @@ -334,25 +328,25 @@ generated artifacts: ``` -#### Run the Create Domain Scripts +#### Run the create domain scripts 1. Create the Kubernetes resource using the following command: ```bash - $ cd /weblogic-kubernetes-operator/kubernetes/samples/scripts/create-oim-domain/domain-home-on-pv/output/weblogic-domains/governancedomain + $ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv/output/weblogic-domains/governancedomain $ kubectl apply -f domain.yaml ``` For example: ```bash - $ cd /scratch/OIGDockerK8S/weblogic-kubernetes-operator/kubernetes/samples/scripts/create-oim-domain/domain-home-on-pv/output/weblogic-domains/governancedomain + $ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv/output/weblogic-domains/governancedomain $ kubectl apply -f domain.yaml ``` The output will look similar to the following: - ```bash + ``` domain.weblogic.oracle/governancedomain created ``` @@ -364,20 +358,20 @@ generated artifacts: The output will initially look similar to the following: - ```bash + ``` NAME READY STATUS RESTARTS AGE helper 1/1 Running 0 3h30m - governancedomain-create-fmw-infra-sample-domain-job-dktkk 0/1 Completed 0 27m + governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 27m governancedomain-introspect-domain-job-p4brt 1/1 Running 0 6s ``` - The `introspect-domain-job` pod will be displayed first. Run the command again after several minutes and check to see that the AdminServer and SOA Server are both started. When started they should have `STATUS` = `Running` and `READY` = `1/1`. + The `introspect-domain-job` pod will be displayed first. Run the command again after several minutes and check to see that the Administration Server and SOA Server are both started. When started they should have `STATUS` = `Running` and `READY` = `1/1`. - ```bash - NAME READY STATUS RESTARTS AGE - helper 1/1 Running 0 3h38m + ``` + NAME READY STATUS RESTARTS AGE + helper 1/1 Running 0 3h38m governancedomain-adminserver 1/1 Running 0 7m30s - governancedomain-create-fmw-infra-sample-domain-job-dktkk 0/1 Completed 0 35m + governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 35m governancedomain-soa-server1 1/1 Running 0 4m ``` @@ -392,26 +386,26 @@ generated artifacts: 1. Once both pods are running, start the OIM Server using the following command: ```bash - $ cd /weblogic-kubernetes-operator/kubernetes/samples/scripts/create-oim-domain/domain-home-on-pv/output/weblogic-domains/governancedomain + $ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv/output/weblogic-domains/governancedomain/ $ kubectl apply -f domain_oim_soa.yaml ``` For example: ```bash - $ cd /scratch/OIGDockerK8S/weblogic-kubernetes-operator/kubernetes/samples/scripts/create-oim-domain/domain-home-on-pv/output/weblogic-domains/governancedomain + $ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv/output/weblogic-domains/governancedomain/ $ kubectl apply -f domain_oim_soa.yaml ``` The output will look similar to the following: - ```bash + ``` domain.weblogic.oracle/governancedomain configured ``` -### Verify the Results +### Verify the results -#### Verify the Domain, Pods and Services +#### Verify the domain, pods and services 1. Verify the domain, servers pods and services are created and in the `READY` state with a `STATUS` of `1/1`, by running the following command: @@ -421,13 +415,13 @@ generated artifacts: The output will look similar to the following: - ```bash - NAME READY STATUS RESTARTS AGE - pod/helper 1/1 Running 0 3h40m + ``` + NAME READY STATUS RESTARTS AGE pod/governancedomain-adminserver 1/1 Running 0 16m - pod/governancedomain-create-fmw-infra-sample-domain-job-dktkk 0/1 Completed 0 36m + pod/governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 36m pod/governancedomain-oim-server1 1/1 Running 0 5m57s pod/governancedomain-soa-server1 1/1 Running 0 13m + pod/helper 1/1 Running 0 3h40m NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/governancedomain-adminserver ClusterIP None 7001/TCP 16m @@ -465,217 +459,225 @@ The default domain created by the script has the following characteristics: * One started OIG managed Server, named `oim_server1`, listening on port `14000`. * One started SOA managed Server, named `soa_server1`, listening on port `8001`. * Log files that are located in `/logs/` - - -#### Verify the Domain +#### Verify the domain -To confirm that the domain was created, use this command: +1. Run the following command to describe the domain: -``` -$ kubectl describe domain -n -``` - -For example: -``` -$ kubectl describe domain governancedomain -n oigns -``` + ```bash + $ kubectl describe domain -n + ``` -Here is an example of the output of this command: + For example: + + ```bash + $ kubectl describe domain governancedomain -n oigns + ``` -``` -Name: governancedomain -Namespace: oigns -Labels: weblogic.domainUID=governancedomain -Annotations: API Version: weblogic.oracle/v8 -Kind: Domain -Metadata: - Creation Timestamp: 2020-09-29T14:08:09Z - Generation: 2 - Managed Fields: - API Version: weblogic.oracle/v8 - Fields Type: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - .: - f:kubectl.kubernetes.io/last-applied-configuration: - f:labels: - .: - f:weblogic.domainUID: - Manager: kubectl - Operation: Update - Time: 2020-09-29T14:19:58Z - API Version: weblogic.oracle/v8 - Fields Type: FieldsV1 - fieldsV1: - f:status: - .: - f:clusters: - f:conditions: - f:servers: - f:startTime: - Manager: OpenAPI-Generator - Operation: Update - Time: 2020-09-29T14:27:30Z - Resource Version: 1278400 - Self Link: /apis/weblogic.oracle/v8/namespaces/oigns/domains/governancedomain - UID: 94604c47-6995-43c5-8848-5c5975ba5ace -Spec: - Admin Server: - Server Pod: - Env: - Name: USER_MEM_ARGS - Value: -Djava.security.egd=file:/dev/./urandom -Xms512m -Xmx1024m - Server Start State: RUNNING - Clusters: - Cluster Name: soa_cluster - Replicas: 1 - Server Pod: - Affinity: - Pod Anti Affinity: - Preferred During Scheduling Ignored During Execution: - Pod Affinity Term: - Label Selector: - Match Expressions: - Key: weblogic.clusterName - Operator: In - Values: - $(CLUSTER_NAME) - Topology Key: kubernetes.io/hostname - Weight: 100 - Server Service: - Precreate Service: true - Server Start State: RUNNING - Cluster Name: oim_cluster - Replicas: 1 - Server Pod: - Affinity: - Pod Anti Affinity: - Preferred During Scheduling Ignored During Execution: - Pod Affinity Term: - Label Selector: - Match Expressions: - Key: weblogic.clusterName - Operator: In - Values: - $(CLUSTER_NAME) - Topology Key: kubernetes.io/hostname - Weight: 100 - Server Service: - Precreate Service: true - Server Start State: RUNNING - Data Home: - Domain Home: /u01/oracle/user_projects/domains/governancedomain - Domain Home Source Type: PersistentVolume - Http Access Log In Log Home: true - Image: oracle/oig:12.2.1.4.0 - Image Pull Policy: IfNotPresent - Image Pull Secrets: - Name: oig-docker - Include Server Out In Pod Log: true - Log Home: /u01/oracle/user_projects/domains/logs/governancedomain - Log Home Enabled: true - Server Pod: - Env: - Name: JAVA_OPTIONS - Value: -Dweblogic.StdoutDebugEnabled=false - Name: USER_MEM_ARGS - Value: -Djava.security.egd=file:/dev/./urandom -Xms256m -Xmx1024m - Volume Mounts: - Mount Path: /u01/oracle/user_projects/domains - Name: weblogic-domain-storage-volume - Volumes: - Name: weblogic-domain-storage-volume - Persistent Volume Claim: - Claim Name: governancedomain-domain-pvc - Server Start Policy: IF_NEEDED - Web Logic Credentials Secret: - Name: governancedomain-domain-credentials -Status: - Clusters: - Cluster Name: oim_cluster - Maximum Replicas: 5 - Minimum Replicas: 0 - Ready Replicas: 1 - Replicas: 1 - Replicas Goal: 1 - Cluster Name: soa_cluster - Maximum Replicas: 5 - Minimum Replicas: 0 - Ready Replicas: 1 - Replicas: 1 - Replicas Goal: 1 - Conditions: - Last Transition Time: 2020-09-29T14:25:51.338Z - Reason: ServersReady - Status: True - Type: Available - Servers: - Desired State: RUNNING - Health: - Activation Time: 2020-09-29T14:12:23.439Z - Overall Health: ok - Subsystems: - Subsystem Name: ServerRuntime - Symptoms: - Node Name: 10.250.111.112 - Server Name: AdminServer - State: RUNNING - Cluster Name: oim_cluster - Desired State: RUNNING - Health: - Activation Time: 2020-09-29T14:25:46.339Z - Overall Health: ok - Subsystems: - Subsystem Name: ServerRuntime - Symptoms: - Node Name: 10.250.111.112 - Server Name: oim_server1 - State: RUNNING - Cluster Name: oim_cluster - Desired State: SHUTDOWN - Server Name: oim_server2 - Cluster Name: oim_cluster - Desired State: SHUTDOWN - Server Name: oim_server3 - Cluster Name: oim_cluster - Desired State: SHUTDOWN - Server Name: oim_server4 - Cluster Name: oim_cluster - Desired State: SHUTDOWN - Server Name: oim_server5 - Cluster Name: soa_cluster - Desired State: RUNNING - Health: - Activation Time: 2020-09-29T14:15:11.288Z - Overall Health: ok - Subsystems: - Subsystem Name: ServerRuntime - Symptoms: - Node Name: 10.250.111.112 - Server Name: soa_server1 - State: RUNNING - Cluster Name: soa_cluster - Desired State: SHUTDOWN - Server Name: soa_server2 - Cluster Name: soa_cluster - Desired State: SHUTDOWN - Server Name: soa_server3 - Cluster Name: soa_cluster - Desired State: SHUTDOWN - Server Name: soa_server4 - Cluster Name: soa_cluster - Desired State: SHUTDOWN - Server Name: soa_server5 - Start Time: 2020-09-29T14:08:10.085Z -Events: -``` + The output will look similar to the following: -In the `Status` section of the output, the available servers and clusters are listed. + ``` + Name: governancedomain + Namespace: oigns + Labels: weblogic.domainUID=governancedomain + Annotations: + API Version: weblogic.oracle/v8 + Kind: Domain + Metadata: + Creation Timestamp: 2021-11-12T14:50:18Z + Generation: 2 + Managed Fields: + API Version: weblogic.oracle/v8 + Fields Type: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: + f:kubectl.kubernetes.io/last-applied-configuration: + f:labels: + .: + f:weblogic.domainUID: + Manager: kubectl-client-side-apply + Operation: Update + Time: 2021-11-12T14:59:44Z + API Version: weblogic.oracle/v8 + Fields Type: FieldsV1 + fieldsV1: + f:status: + .: + f:clusters: + f:conditions: + f:introspectJobFailureCount: + f:servers: + f:startTime: + Manager: Kubernetes Java Client + Operation: Update + Time: 2021-11-12T14:59:49Z + Resource Version: 383381 + UID: ea95c549-c414-42a6-8de4-beaf1204872e + Spec: + Admin Server: + Server Pod: + Env: + Name: USER_MEM_ARGS + Value: -Djava.security.egd=file:/dev/./urandom -Xms512m -Xmx1024m + Server Start State: RUNNING + Clusters: + Cluster Name: soa_cluster + Replicas: 1 + Server Pod: + Affinity: + Pod Anti Affinity: + Preferred During Scheduling Ignored During Execution: + Pod Affinity Term: + Label Selector: + Match Expressions: + Key: weblogic.clusterName + Operator: In + Values: + $(CLUSTER_NAME) + Topology Key: kubernetes.io/hostname + Weight: 100 + Server Service: + Precreate Service: true + Server Start State: RUNNING + Cluster Name: oim_cluster + Replicas: 1 + Server Pod: + Affinity: + Pod Anti Affinity: + Preferred During Scheduling Ignored During Execution: + Pod Affinity Term: + Label Selector: + Match Expressions: + Key: weblogic.clusterName + Operator: In + Values: + $(CLUSTER_NAME) + Topology Key: kubernetes.io/hostname + Weight: 100 + Env: + Name: USER_MEM_ARGS + Value: -Djava.security.egd=file:/dev/./urandom -Xms2408m -Xmx8192m + Server Service: + Precreate Service: true + Server Start State: RUNNING + Data Home: + Domain Home: /u01/oracle/user_projects/domains/governancedomain + Domain Home Source Type: PersistentVolume + Http Access Log In Log Home: true + Image: oracle/oig:12.2.1.4.0-8-ol7-211022.0723 + Image Pull Policy: IfNotPresent + Include Server Out In Pod Log: true + Log Home: /u01/oracle/user_projects/domains/logs/governancedomain + Log Home Enabled: true + Server Pod: + Env: + Name: JAVA_OPTIONS + Value: -Dweblogic.StdoutDebugEnabled=false + Name: USER_MEM_ARGS + Value: -Djava.security.egd=file:/dev/./urandom -Xms256m -Xmx1024m + Volume Mounts: + Mount Path: /u01/oracle/user_projects/domains + Name: weblogic-domain-storage-volume + Volumes: + Name: weblogic-domain-storage-volume + Persistent Volume Claim: + Claim Name: governancedomain-domain-pvc + Server Start Policy: IF_NEEDED + Web Logic Credentials Secret: + Name: oig-domain-credentials + Status: + Clusters: + Cluster Name: oim_cluster + Maximum Replicas: 5 + Minimum Replicas: 0 + Ready Replicas: 1 + Replicas: 1 + Replicas Goal: 1 + Cluster Name: soa_cluster + Maximum Replicas: 5 + Minimum Replicas: 0 + Ready Replicas: 1 + Replicas: 1 + Replicas Goal: 1 + Conditions: + Last Transition Time: 2021-11-12T15:06:30.709900Z + Reason: ServersReady + Status: True + Type: Available + Introspect Job Failure Count: 0 + Servers: + Desired State: RUNNING + Health: + Activation Time: 2021-11-12T14:54:46.370000Z + Overall Health: ok + Subsystems: + Subsystem Name: ServerRuntime + Symptoms: + Node Name: 10.250.40.59 + Server Name: AdminServer + State: RUNNING + Cluster Name: oim_cluster + Desired State: RUNNING + Health: + Activation Time: 2021-11-12T15:06:21.693000Z + Overall Health: ok + Subsystems: + Subsystem Name: ServerRuntime + Symptoms: + Node Name: 10.250.40.59 + Server Name: oim_server1 + State: RUNNING + Cluster Name: oim_cluster + Desired State: SHUTDOWN + Server Name: oim_server2 + Cluster Name: oim_cluster + Desired State: SHUTDOWN + Server Name: oim_server3 + Cluster Name: oim_cluster + Desired State: SHUTDOWN + Server Name: oim_server4 + Cluster Name: oim_cluster + Desired State: SHUTDOWN + Server Name: oim_server5 + Cluster Name: soa_cluster + Desired State: RUNNING + Health: + Activation Time: 2021-11-12T14:57:49.506000Z + Overall Health: ok + Subsystems: + Subsystem Name: ServerRuntime + Symptoms: + Node Name: 10.250.40.59 + Server Name: soa_server1 + State: RUNNING + Cluster Name: soa_cluster + Desired State: SHUTDOWN + Server Name: soa_server2 + Cluster Name: soa_cluster + Desired State: SHUTDOWN + Server Name: soa_server3 + Cluster Name: soa_cluster + Desired State: SHUTDOWN + Server Name: soa_server4 + Cluster Name: soa_cluster + Desired State: SHUTDOWN + Server Name: soa_server5 + Start Time: 2021-11-12T14:50:19.148541Z + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal DomainCreated 19m weblogic.operator Domain resource governancedomain was created + Normal DomainProcessingCompleted 12m weblogic.operator Successfully completed processing domain resource governancedomain + Normal DomainChanged 10m weblogic.operator Domain resource governancedomain was changed + Normal DomainProcessingStarting 10m (x2 over 19m) weblogic.operator Creating or updating Kubernetes presence for WebLogic Domain with UID governancedomai + ``` + + In the `Status` section of the output, the available servers and clusters are listed. -#### Verify the Pods +#### Verify the pods Use the following command to see the pods running the servers and which nodes they are running on: @@ -691,16 +693,16 @@ $ kubectl get pods -n oigns -o wide The output will look similar to the following: -```bash -NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES -helper 1/1 Running 0 3h50m 10.244.1.39 10.250.111.112 -governancedomain-adminserver 1/1 Running 0 27m 10.244.1.42 10.250.111.112 -governancedomain-create-fmw-infra-sample-domain-job-dktkk 0/1 Completed 0 47m 10.244.1.40 10.250.111.112 -governancedomain-oim-server1 1/1 Running 0 16m 10.244.1.44 10.250.111.112 -governancedomain-soa-server1 1/1 Running 0 24m 10.244.1.43 10.250.111.112 +``` +NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +helper 1/1 Running 0 3h50m 10.244.1.39 worker-node2 +governancedomain-adminserver 1/1 Running 0 27m 10.244.1.42 worker-node2 +governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 47m 10.244.1.40 worker-node2 +governancedomain-oim-server1 1/1 Running 0 16m 10.244.1.44 worker-node2 +governancedomain-soa-server1 1/1 Running 0 24m 10.244.1.43 worker-node2 ``` -You are now ready to configure an Ingress to direct traffic for your OIG domain as per [Configure an Ingress for an OIG domain](../configure-ingress). +You are now ready to configure an Ingress to direct traffic for your OIG domain as per [Configure an ingress for an OIG domain](../configure-ingress). diff --git a/docs-source/content/oig/create-or-update-image/_index.md b/docs-source/content/oig/create-or-update-image/_index.md new file mode 100644 index 000000000..19241b907 --- /dev/null +++ b/docs-source/content/oig/create-or-update-image/_index.md @@ -0,0 +1,366 @@ ++++ +title = "Create or update an image" +weight = 10 +pre = "10. " +description= "Create or update an Oracle Identity Governance (OIG) container image used for deploying OIG domains. An OIG container image can be created using the WebLogic Image Tool or using the Dockerfile approach." ++++ + + +As described in [Prepare Your Environment]({{< relref "/oig/prepare-your-environment" >}}) you can obtain or build OIG container images in the following ways: + +1. Download the latest prebuilt OIG container image from [My Oracle Support](https://support.oracle.com) by referring to the document ID 2723908.1. This image is prebuilt by Oracle and includes Oracle Identity Governance 12.2.1.4.0 and the latest PSU. + +1. Build your own OIG image using the WebLogic Image Tool or by using the dockerfile, scripts and base images from Oracle Container Registry (OCR). You can also build your own image by using only the dockerfile and scripts. [Building the OIG Image](https://github.com/oracle/docker-images/tree/master/OracleIdentityGovernance/#building-the-oig-image). + +If you have access to the My Oracle Support (MOS), and there is a need to build a new image with an interim or one off patch, it is recommended to use the WebLogic Image Tool to build an Oracle Identity Governance image for production deployments. + + +### Create or update an Oracle Identity Governance image using the WebLogic Image Tool + +Using the WebLogic Image Tool, you can [create]({{< relref "/oig/create-or-update-image/#create-an-image" >}}) a new Oracle Identity Governance image with PSU's and interim patches or [update]({{< relref "/oig/create-or-update-image/#update-an-image" >}}) an existing image with one or more interim patches. + +> **Recommendations:** +> * Use [create]({{< relref "/oig/create-or-update-image/#create-an-image" >}}) for creating a new Oracle Identity Governance image containing the Oracle Identity Governance binaries, bundle patch and interim patches. This is the recommended approach if you have access to the OIG patches because it optimizes the size of the image. +> * Use [update]({{< relref "/oig/create-or-update-image/#update-an-image" >}}) for patching an existing Oracle Identity Governance image with a single interim patch. Note that the patched image size may increase considerably due to additional image layers introduced by the patch application tool. + +#### Create an image + +#### Set up the WebLogic Image Tool + +* [Prerequisites](#prerequisites) +* [Set up the WebLogic Image Tool](#set-up-the-weblogic-image-tool) +* [Validate setup](#validate-setup) +* [WebLogic Image Tool build directory](#weblogic-image-tool-build-directory) +* [WebLogic Image Tool cache](#weblogic-image-tool-cache) + +##### Prerequisites + +Verify that your environment meets the following prerequisites: + +* Docker client and daemon on the build machine, with minimum Docker version 18.03.1.ce. +* Bash version 4.0 or later, to enable the command complete feature. +* JAVA_HOME environment variable set to the appropriate JDK location e.g: /scratch/export/oracle/product/jdk + +##### Set up the WebLogic Image Tool + +To set up the WebLogic Image Tool: + +1. Create a working directory and change to it: + + ```bash + $ mkdir + $ cd + ``` + + For example: + + ```bash + $ mkdir /scratch/imagetool-setup + $ cd /scratch/imagetool-setup + ``` +1. Download the latest version of the WebLogic Image Tool from the [releases page](https://github.com/oracle/weblogic-image-tool/releases/latest). + + ```bash + $ wget https://github.com/oracle/weblogic-image-tool/releases/download/release-X.X.X/imagetool.zip + ``` + + where X.X.X is the latest release referenced on the [releases page](https://github.com/oracle/weblogic-image-tool/releases/latest). + + +1. Unzip the release ZIP file in the `imagetool-setup` directory. + + ```bash + $ unzip imagetool.zip + ```` + +1. Execute the following commands to set up the WebLogic Image Tool: + + ```bash + $ cd /imagetool-setup/imagetool/bin + $ source setup.sh + ``` + + For example: + + ```bash + $ cd /scratch/imagetool-setup/imagetool/bin + $ source setup.sh + ``` + +##### Validate setup +To validate the setup of the WebLogic Image Tool: + +1. Enter the following command to retrieve the version of the WebLogic Image Tool: + + ``` bash + $ imagetool --version + ``` + +2. Enter `imagetool` then press the Tab key to display the available `imagetool` commands: + + ``` bash + $ imagetool + cache create help rebase update + ``` + +##### WebLogic Image Tool build directory + +The WebLogic Image Tool creates a temporary Docker context directory, prefixed by `wlsimgbuilder_temp`, every time the tool runs. Under normal circumstances, this context directory will be deleted. However, if the process is aborted or the tool is unable to remove the directory, it is safe for you to delete it manually. By default, the WebLogic Image Tool creates the Docker context directory under the user's home directory. If you prefer to use a different directory for the temporary context, set the environment variable `WLSIMG_BLDDIR`: + +``` bash +$ export WLSIMG_BLDDIR="/path/to/buid/dir" +``` + +##### WebLogic Image Tool cache + +The WebLogic Image Tool maintains a local file cache store. This store is used to look up where the Java, WebLogic Server installers, and WebLogic Server patches reside in the local file system. By default, the cache store is located in the user's `$HOME/cache` directory. Under this directory, the lookup information is stored in the `.metadata` file. All automatically downloaded patches also reside in this directory. You can change the default cache store location by setting the environment variable `WLSIMG_CACHEDIR`: + +```bash +$ export WLSIMG_CACHEDIR="/path/to/cachedir" +``` + +##### Set up additional build scripts + +Creating an Oracle Identity Governance Docker image using the WebLogic Image Tool requires additional container scripts for Oracle Identity Governance domains. + +1. Clone the [docker-images](https://github.com/oracle/docker-images.git) repository to set up those scripts. In these steps, this directory is `DOCKER_REPO`: + + ```bash + $ cd /imagetool-setup + $ git clone https://github.com/oracle/docker-images.git + ``` + + For example: + + ```bash + $ cd /scratch/imagetool-setup + $ git clone https://github.com/oracle/docker-images.git + ``` + +>Note: If you want to create the image continue with the following steps, otherwise to update the image see [update an image](#update-an-image). + +#### Create an image + +After [setting up the WebLogic Image Tool]({{< relref "/oig/create-or-update-image/#set-up-the-weblogic-image-tool" >}}), follow these steps to use the WebLogic Image Tool to `create` a new Oracle Identity Governance image. + +##### Download the Oracle Identity Governance installation binaries and patches + +You must download the required Oracle Identity Governance installation binaries and patches as listed below from the [Oracle Software Delivery Cloud](https://edelivery.oracle.com/) and save them in a directory of your choice. + +The installation binaries and patches required are: + +* Oracle Identity and Access Management 12.2.1.4.0 + * fmw_12.2.1.4.0_idm.jar + +* Oracle Fusion Middleware 12c Infrastructure 12.2.1.4.0 + * fmw_12.2.1.4.0_infrastructure.jar + +* Oracle SOA Suite for Oracle Middleware 12.2.1.4.0 + * fmw_12.2.1.4.0_soa.jar + +* Oracle Service Bus 12.2.1.4.0 + * fmw_12.2.1.4.0_osb.jar + +* OIG and FMW Infrastructure Patches: + * View document ID 2723908.1 on [My Oracle Support](https://support.oracle.com). In the `Container Image Download/Patch Details` section, locate the `Oracle Identity Governance (OIG)` table. For the latest PSU click the `README` link in the `Documentation` column. In the README, locate the "Installed Software" section. All the patch numbers to be download are listed here. Download all these individual patches from My Oracle Support. + +* Oracle JDK v8 + * jdk-8uXXX-linux-x64.tar.gz where XXX is the JDK version referenced in the README above. + +##### Update required build files + +The following files in the code repository location `/docker-images/OracleIdentityGovernance/imagetool/12.2.1.4.0` are used for creating the image: + +* `additionalBuildCmds.txt` +* `buildArgs` + +. Edit the `/imagetool-setup/docker-images/OracleIdentityGovernance/imagetool/12.2.1.4.0/buildArgs` file and change `%DOCKER_REPO%`, `%JDK_VERSION%` and `%BUILDTAG%` appropriately. + + For example: + + ``` + create + --jdkVersion=8u311 + --type oig + --chown oracle:root + --version=12.2.1.4.0 + --tag=oig-latestpsu:12.2.1.4.0 + --pull + --installerResponseFile /scratch/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file,/scratch/imagetool-setup/docker-images/OracleSOASuite/dockerfiles/12.2.1.4.0/install/soasuite.response,/scratch/imagetool-setup/docker-images/OracleSOASuite/dockerfiles/12.2.1.4.0/install/osb.response,/scratch/imagetool-setup/docker-images/OracleIdentityGovernance/dockerfiles/12.2.1.4.0/idmqs.response + --additionalBuildCommands /scratch/imagetool-setup/docker-images/OracleIdentityGovernance/imagetool/12.2.1.4.0/additionalBuildCmds.txt + --additionalBuildFiles /scratch/imagetool-setup/docker-images/OracleIdentityGovernance/dockerfiles/12.2.1.4.0/container-scripts + ``` + +1. Edit the `/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4.0/install.file` and under the GENERIC section add the line INSTALL_TYPE="Fusion Middleware Infrastructure". For example: + + ``` + [GENERIC] + INSTALL_TYPE="Fusion Middleware Infrastructure" + DECLINE_SECURITY_UPDATES=true + SECURITY_UPDATES_VIA_MYORACLESUPPORT=false + ``` + +##### Create the image + +1. Add a JDK package to the WebLogic Image Tool cache. For example: + + ``` bash + $ imagetool cache addInstaller --type jdk --version 8uXXX --path /jdk-8uXXX-linux-x64.tar.gz + ``` + + where `XXX` is the JDK version downloaded + +1. Add the downloaded installation binaries to the WebLogic Image Tool cache. For example: + + ``` bash + $ imagetool cache addInstaller --type fmw --version 12.2.1.4.0 --path /fmw_12.2.1.4.0_infrastructure.jar + + $ imagetool cache addInstaller --type soa --version 12.2.1.4.0 --path /fmw_12.2.1.4.0_soa.jar + + $ imagetool cache addInstaller --type osb --version 12.2.1.4.0 --path /fmw_12.2.1.4.0_osb.jar + + $ imagetool cache addInstaller --type idm --version 12.2.1.4.0 --path /fmw_12.2.1.4.0_idm.jar + ``` + +1. Add the downloaded OPatch patch to the WebLogic Image Tool cache. For example: + + ``` bash + $ imagetool cache addEntry --key 28186730_13.9.4.2.7 --value /p28186730_139427_Generic.zip + ``` + +1. Add the rest of the downloaded product patches to the WebLogic Image Tool cache: + + ``` bash + $ imagetool cache addEntry --key _12.2.1.4.0 --value /p_122140_Generic.zip + ``` + + For example: + + ```bash + $ imagetool cache addEntry --key 33416868_12.2.1.4.0 --value /p33416868_122140_Generic.zip + $ imagetool cache addEntry --key 33453703_12.2.1.4.0 --value /p33453703_122140_Generic.zip + $ imagetool cache addEntry --key 32999272_12.2.1.4.0 --value /p32999272_122140_Generic.zip + $ imagetool cache addEntry --key 33093748_12.2.1.4.0 --value /p33093748_122140_Generic.zip + $ imagetool cache addEntry --key 33281560_12.2.1.4.0 --value /p33281560_122140_Generic.zip + $ imagetool cache addEntry --key 31544353_12.2.1.4.0 --value /p31544353_122140_Linux-x86-64.zip + $ imagetool cache addEntry --key 33313802_12.2.1.4.0 --value /p33313802_122140_Generic.zip + $ imagetool cache addEntry --key 33408307_12.2.1.4.0 --value /p33408307_122140_Generic.zip + $ imagetool cache addEntry --key 33286160_12.2.1.4.0 --value /p33286160_122140_Generic.zip + $ imagetool cache addEntry --key 32880070_12.2.1.4.0 --value /p32880070_122140_Generic.zip + $ imagetool cache addEntry --key 32905339_12.2.1.4.0 --value /p32905339_122140_Generic.zip + $ imagetool cache addEntry --key 32784652_12.2.1.4.0 --value /p32784652_122140_Generic.zip + ``` + +1. Edit the `/imagetool-setup/docker-images/OracleIdentityGovernance/imagetool/12.2.1.4.0/buildArgs` file and append the product patches and opatch patch as follows: + + ``` + --patches 33416868_12.2.1.4.0,33453703_12.2.1.4.0,32999272_12.2.1.4.0,33093748_12.2.1.4.0,33281560_12.2.1.4.0,31544353_12.2.1.4.0,33313802_12.2.1.4.0,33408307_12.2.1.4.0,33286160_12.2.1.4.0,32880070_12.2.1.4.0,32905339_12.2.1.4.0,32784652_12.2.1.4.0 + --opatchBugNumber=28186730_13.9.4.2.7 + ``` + + An example `buildArgs` file is now as follows: + + ``` + create + --jdkVersion=8u301 + --type oig + --version=12.2.1.4.0 + --tag=oig-latestpsu:12.2.1.4.0 + --pull + --installerResponseFile /scratch/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file,/scratch/docker-images/OracleSOASuite/dockerfiles/12.2.1.4.0/install/soasuite.response,/scratch/docker-images/OracleSOASuite/dockerfiles/12.2.1.4.0/install/osb.response,/scratch/docker-images/OracleIdentityGovernance/dockerfiles/12.2.1.4.0/idmqs.response + --additionalBuildCommands /scratch/imagetool-setup/docker-images/OracleIdentityGovernance/imagetool/12.2.1.4.0/additionalBuildCmds.txt + --additionalBuildFiles /scratch/imagetool-setup/docker-images/OracleIdentityGovernance/dockerfiles/12.2.1.4.0/container-scripts + --patches 33416868_12.2.1.4.0,33453703_12.2.1.4.0,32999272_12.2.1.4.0,33093748_12.2.1.4.0,33281560_12.2.1.4.0,31544353_12.2.1.4.0,33313802_12.2.1.4.0,33408307_12.2.1.4.0,33286160_12.2.1.4.0,32880070_12.2.1.4.0,32905339_12.2.1.4.0,32784652_12.2.1.4.0 + --opatchBugNumber=28186730_13.9.4.2.7 + ``` + + >Note: In the `buildArgs` file: + > * `--jdkVersion` value must match the `--version` value used in the `imagetool cache addInstaller` command for `--type jdk`. + > * `--version` value must match the `--version` value used in the `imagetool cache addInstaller` command for `--type idm`. + > * `--pull` always pulls the latest base Linux image `oraclelinux:7-slim` from the Docker registry. + + Refer to [this page](https://oracle.github.io/weblogic-image-tool/userguide/tools/create-image/) for the complete list of options available with the WebLogic Image Tool `create` command. + +1. Create the Oracle Identity Governance image: + + ```bash + $ imagetool @ + ``` + >Note: Make sure that the absolute path to the `buildargs` file is prepended with a `@` character, as shown in the example above. + + For example: + + ```bash + $ imagetool @/docker-images/OracleIdentityGovernance/imagetool/12.2.1.4.0/buildArgs + ``` + +1. Check the created image using the `docker images` command: + + ```bash + $ docker images | grep oig + ``` + + The output will look similar to the following: + + ``` + oig-latestpsu 12.2.1.4.0 e391ed154bcb 50 seconds ago 4.43GB + ``` + +#### Update an image + +The steps below show how to update an existing Oracle Identity Governance image with an interim patch. In the examples below the image `oracle/oig:12.2.1.4.0` is updated with an interim patch. + +```bash +$ docker images + +REPOSITORY TAG IMAGE ID CREATED SIZE +oracle/oig 12.2.1.4.0 298fdb98e79c 3 months ago 4.42GB +``` + +1. [Set up the WebLogic Image Tool]({{< relref "/oig/create-or-update-image/#set-up-the-weblogic-image-tool" >}}). + +1. Download the required interim patch and latest Opatch (28186730) from [My Oracle Support](https://support.oracle.com). and save them in a directory of your choice. + +1. Add the OPatch patch to the WebLogic Image Tool cache, for example: + + ```bash + $ imagetool cache addEntry --key 28186730_13.9.4.2.7 --value /p28186730_139427_Generic.zip + ``` + +1. Execute the `imagetool cache addEntry` command for each patch to add the required patch(es) to the WebLogic Image Tool cache. For example, to add patch `p32701831_12214210607_Generic.zip`: + + ```bash + $ imagetool cache addEntry --key=33165837_12.2.1.4.210708 --value /p33165837_12214210708_Generic.zip + ``` + +1. Provide the following arguments to the WebLogic Image Tool `update` command: + + * `–-fromImage` - Identify the image that needs to be updated. In the example below, the image to be updated is `oracle/oig:12.2.1.4.0`. + * `–-patches` - Multiple patches can be specified as a comma-separated list. + * `--tag` - Specify the new tag to be applied for the image being built. + + Refer [here](https://oracle.github.io/weblogic-image-tool/userguide/tools/update-image/) for the complete list of options available with the WebLogic Image Tool `update` command. + + > Note: The WebLogic Image Tool cache should have the latest OPatch zip. The WebLogic Image Tool will update the OPatch if it is not already updated in the image. + + For example: + + ```bash + $ imagetool update --fromImage oracle/oig:12.2.1.4.0 --tag=oracle/oig-new:12.2.1.4.0 --patches=33165837_12.2.1.4.210708 --opatchBugNumber=28186730_13.9.4.2.7 + ``` + + > Note: If the command fails because the files in the image being upgraded are not owned by `oracle:oracle`, then add the parameter `--chown :` to correspond with the values returned in the error. + +1. Check the built image using the `docker images` command: + + ```bash + $ docker images | grep oig + ``` + + The output will look similar to the following: + + ``` + REPOSITORY TAG IMAGE ID CREATED SIZE + oracle/oig-new 12.2.1.4.0 0c8381922e95 16 seconds ago 4.91GB + oracle/oig 12.2.1.4.0 298fdb98e79c 3 months ago 4.42GB + ``` diff --git a/docs-source/content/oig/manage-oig-domains/_index.md b/docs-source/content/oig/manage-oig-domains/_index.md index 823784587..8e51ade94 100644 --- a/docs-source/content/oig/manage-oig-domains/_index.md +++ b/docs-source/content/oig/manage-oig-domains/_index.md @@ -1,5 +1,5 @@ +++ -title = "Manage OIG Domains" +title = "Manage OIG domains" weight = 9 pre = "9. " description= "This document provides steps to manage the OIG domain." diff --git a/docs-source/content/oig/manage-oig-domains/delete-domain-home.md b/docs-source/content/oig/manage-oig-domains/delete-domain-home.md index 4af1d06d0..7aef633da 100644 --- a/docs-source/content/oig/manage-oig-domains/delete-domain-home.md +++ b/docs-source/content/oig/manage-oig-domains/delete-domain-home.md @@ -7,23 +7,19 @@ description: "Learn about the steps to cleanup the OIG domain home." Sometimes in production, but most likely in testing environments, you might want to remove the domain home that is generated using the `create-domain.sh` script. -1. Run the following command to delete the jobs, domain, and configmaps: +1. Run the following command to delete the domain: ```bash - $ kubectl delete jobs -n - $ kubectl delete domain -n - $ kubectl delete configmaps -cm -n + $ cd $WORKDIR/kubernetes/delete-domain + $ ./delete-weblogic-domain-resources.sh -d ``` For example: ```bash - $ kubectl delete jobs governancedomain-create-fmw-infra-sample-domain-job -n oigns - $ kubectl delete domain governancedomain -n oigns - $ kubectl delete configmaps governancedomain-create-fmw-infra-sample-domain-job-cm -n oigns + $ cd $WORKDIR/kubernetes/delete-domain + $ ./delete-weblogic-domain-resources.sh -d governancedomain ``` - - 1. Drop the RCU schemas as follows: @@ -51,69 +47,119 @@ Sometimes in production, but most likely in testing environments, you might want -component WLS -component STB -component OIM -component SOAINFRA -component UCSUMS -f < /tmp/pwd.txt ``` -1. Delete the Persistent Volume and Persistent Volume Claim: + +1. Delete the contents of the persistent volume: ```bash - $ kubectl delete pv -n - $ kubectl delete pvc -n + $ rm -rf //governancedomainpv/* ``` - + For example: ```bash - $ kubectl delete pv governancedomain-domain-pv -n oigns - $ kubectl delete pvc governancedomain-domain-pvc -n oigns + $ rm -rf /scratch/OIGK8S/governancedomainpv/* ``` + -1. Delete the contents of the persistent volume, for example: +1. Delete the WebLogic Kubernetes Operator, by running the following command: ```bash - $ rm -rf //governancedomainpv/* + $ helm delete weblogic-kubernetes-operator -n opns ``` +1. Delete the label from the OIG namespace: + + ```bash + $ kubectl label namespaces weblogic-operator- + ``` + For example: ```bash - $ rm -rf /scratch/OIGDockerK8S/governancedomainpv/* + $ kubectl label namespaces oigns weblogic-operator- ``` +1. Delete the service account for the operator: + ```bash + $ kubectl delete serviceaccount -n + ``` + + For example: -5. Delete the WebLogic Kubernetes Operator, by running the following command: + ```bash + $ kubectl delete serviceaccount op-sa -n opns + ``` + +1. Delete the operator namespace: ```bash - $ helm delete weblogic-kubernetes-operator -n opns + $ kubectl delete namespace ``` -6. To delete NGINX: - + For example: + + ```bash + $ kubectl delete namespace opns + ``` + + +1. To delete NGINX: ```bash - $ helm delete governancedomain-nginx -n oigns - $ helm delete nginx-ingress -n nginx - $ kubectl delete namespace nginx + $ helm delete governancedomain-nginx-designconsole -n ``` - or if using SSL: + For example: + + ```bash + $ helm delete governancedomain-nginx-designconsole -n oigns + ``` + + Then run: + + ```bash + $ helm delete governancedomain-nginx -n + ``` + + For example: ```bash $ helm delete governancedomain-nginx -n oigns + ``` + + Then run: + + ```bash + $ helm delete nginx-ingress -n + ``` + + For example: + + ```bash $ helm delete nginx-ingress -n nginxssl + ``` + + Then delete the NGINX namespace: + + ```bash + $ kubectl delete namespace + ``` + + For example: + + ```bash $ kubectl delete namespace nginxssl ``` + + +1. Delete the OIG namespace: -7. To delete Voyager: - ```bash - $ helm delete governancedomain-voyager -n oigns - $ helm delete voyager-ingress -n voyager - $ kubectl delete namespace voyager + $ kubectl delete namespace ``` - or if using SSL: - + For example: ```bash - $ helm delete governancedomain-voyager -n oigns - $ helm delete voyager-ingress -n voyagerssl - $ kubectl delete namespace voyagerssl + $ kubectl delete namespace oigns ``` \ No newline at end of file diff --git a/docs-source/content/oig/manage-oig-domains/domain-lifecycle.md b/docs-source/content/oig/manage-oig-domains/domain-lifecycle.md index 986920906..0e4314660 100644 --- a/docs-source/content/oig/manage-oig-domains/domain-lifecycle.md +++ b/docs-source/content/oig/manage-oig-domains/domain-lifecycle.md @@ -1,14 +1,14 @@ --- -title: "Domain Life Cycle" +title: "Domain life cycle" weight: 1 pre : "1. " description: "Learn about the domain life cyle of an OIG domain." --- -1. [View Existing OIG Servers](#view-existing-oig-servers) -1. [Starting/Scaling up OIG Managed Servers](#startingscaling-up-oig-managed-servers) -1. [Stopping/Scaling down OIG Managed Servers](#stoppingscaling-down-oig-managed-servers) -1. [Stopping and Starting the AdminServer and Managed Servers](#stopping-and-starting-the-adminserver-and-managed-servers) +1. [View existing OIG servers](#view-existing-oig-servers) +1. [Starting/Scaling up OIG Managed servers](#startingscaling-up-oig-managed-servers) +1. [Stopping/Scaling down OIG Managed servers](#stoppingscaling-down-oig-managed-servers) +1. [Stopping and starting the Administration Server and Managed Servers](#stopping-and-starting-the-administration-server-and-managed-servers) As OIG domains use the WebLogic Kubernetes Operator, domain lifecyle operations are managed using the WebLogic Kubernetes Operator itself. @@ -20,9 +20,9 @@ For more detailed information refer to [Domain Life Cycle](https://oracle.github Do not use the WebLogic Server Administration Console or Oracle Enterprise Manager Console to start or stop servers. {{% /notice %}} -### View Existing OIG Servers +### View existing OIG Servers -The default OIG deployment starts the AdminServer (`AdminServer`), one OIG Managed Server (`oim_server1`) and one SOA Managed Server (`soa_server1`). +The default OIG deployment starts the Administration Server (`AdminServer`), one OIG Managed Server (`oim_server1`) and one SOA Managed Server (`soa_server1`). The deployment also creates, but doesn't start, four extra OIG Managed Servers (`oim-server2` to `oim-server5`) and four more SOA Managed Servers (`soa_server2` to `soa_server5`). @@ -43,9 +43,9 @@ $ kubectl get pods -n oigns The output should look similar to the following: ``` -$ kubectl get pods -n oigns +NAME READY STATUS RESTARTS AGE governancedomain-adminserver 1/1 Running 0 23h -governancedomain-create-fmw-infra-sample-domain-job-dktkk 0/1 Completed 0 24h +governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 24h governancedomain-oim-server1 1/1 Running 0 23h governancedomain-soa-server1 1/1 Running 0 23h ``` @@ -68,7 +68,7 @@ The number of OIG Managed Servers running is dependent on the `replicas` paramet **Note**: This opens an edit session for the domain where parameters can be changed using standard `vi` commands. -1. In the edit session search for "clusterName: oim_cluster" and look for the `replicas` parameter. By default the replicas parameter is set to "1" hence a single OIG Managed Server is started (`oim_server1`): +1. In the edit session search for `clusterName: oim_cluster` and look for the `replicas` parameter. By default the replicas parameter is set to "1" hence a single OIG Managed Server is started (`oim_server1`): ``` - clusterName: oim_cluster @@ -114,7 +114,7 @@ The number of OIG Managed Servers running is dependent on the `replicas` paramet 1. Run the following kubectl command to view the pods: - ``` + ```bash $ kubectl get pods -n ``` @@ -129,7 +129,7 @@ The number of OIG Managed Servers running is dependent on the `replicas` paramet ``` NAME READY STATUS RESTARTS AGE governancedomain-adminserver 1/1 Running 0 23h - governancedomain-create-fmw-infra-sample-domain-job-dktkk 0/1 Completed 0 24h + governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 24h governancedomain-oim-server1 1/1 Running 0 23h governancedomain-oim-server2 0/1 Running 0 7s governancedomain-soa-server1 1/1 Running 0 23h @@ -140,7 +140,7 @@ The number of OIG Managed Servers running is dependent on the `replicas` paramet ``` NAME READY STATUS RESTARTS AGE governancedomain-adminserver 1/1 Running 0 23h - governancedomain-create-fmw-infra-sample-domain-job-dktkk 0/1 Completed 0 24h + governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 24h governancedomain-oim-server1 1/1 Running 0 23h governancedomain-oim-server2 1/1 Running 0 5m27s governancedomain-soa-server1 1/1 Running 0 23h @@ -164,17 +164,17 @@ As mentioned in the previous section, the number of OIG Managed Servers running 1. Run the following kubectl command to edit the domain: - ``` + ```bash $ kubectl edit domain -n ``` For example: - ``` + ```bash $ kubectl edit domain governancedomain -n oigns ``` -1. In the edit session search for "clusterName: oim_cluster" and look for the `replicas` parameter. In the example below `replicas` is set to "2" hence two OIG Managed Servers are started (oim_server1 and oim_server2): +1. In the edit session search for `clusterName: oim_cluster` and look for the `replicas` parameter. In the example below `replicas` is set to "2" hence two OIG Managed Servers are started (`oim_server1` and `oim_server2`): ``` - clusterName: oim_cluster @@ -214,7 +214,7 @@ As mentioned in the previous section, the number of OIG Managed Servers running 1. Run the following kubectl command to view the pods: - ``` + ```bash $ kubectl get pods -n ``` @@ -227,39 +227,37 @@ As mentioned in the previous section, the number of OIG Managed Servers running The output will look similar to the following: ``` - $ kubectl get pods -n oigns NAME READY STATUS RESTARTS AGE governancedomain-adminserver 1/1 Running 0 23h - governancedomain-create-fmw-infra-sample-domain-job-dktkk 0/1 Completed 0 24h + governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 24h governancedomain-oim-server1 1/1 Running 0 23h governancedomain-oim-server2 1/1 Terminating 0 7m30s governancedomain-soa-server1 1/1 Running 0 23h ``` - The exiting pod shows a `STATUS` of `Terminating` (governancedomain-oim-server2). The server may take a minute or two to stop, so keep executing the command until the pod has disappeared: + The exiting pod shows a `STATUS` of `Terminating` (`governancedomain-oim-server2`). The server may take a minute or two to stop, so keep executing the command until the pod has disappeared: ``` - $ kubectl get pods -n oigns NAME READY STATUS RESTARTS AGE governancedomain-adminserver 1/1 Running 0 23h - governancedomain-create-fmw-infra-sample-domain-job-dktkk 0/1 Completed 0 24h + governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 24h governancedomain-oim-server1 1/1 Running 0 23h governancedomain-soa-server1 1/1 Running 0 23h ``` -### Stopping and Starting the AdminServer and Managed Servers +### Stopping and Starting the Administration Server and Managed Servers -To stop all the OIG Managed Servers and the AdminServer in one operation: +To stop all the OIG Managed Servers and the Administration Server in one operation: 1. Run the following kubectl command to edit the domain: - ``` + ```bash $ kubectl edit domain -n ``` For example: - ``` + ```bash $ kubectl edit domain governancedomain -n oigns ``` @@ -293,7 +291,7 @@ To stop all the OIG Managed Servers and the AdminServer in one operation: 1. Run the following kubectl command to view the pods: - ``` + ```bash $ kubectl get pods -n ``` @@ -308,7 +306,7 @@ To stop all the OIG Managed Servers and the AdminServer in one operation: ``` NAME READY STATUS RESTARTS AGE governancedomain-adminserver 1/1 Terminating 0 23h - governancedomain-create-fmw-infra-sample-domain-job-dktkk 0/1 Completed 0 24h + governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 24h governancedomain-oim-server1 1/1 Terminating 0 23h governancedomain-soa-server1 1/1 Terminating 0 23h ``` @@ -317,10 +315,10 @@ To stop all the OIG Managed Servers and the AdminServer in one operation: ``` NAME READY STATUS RESTARTS AGE - governancedomain-create-fmw-infra-sample-domain-job-dktkk 0/1 Completed 0 24h + governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 24h ``` -1. To start the AdminServer and Managed Servers up again, repeat the previous steps but change `serverStartPolicy: NEVER` to `IF_NEEDED` as follows: +1. To start the Administration Server and Managed Servers up again, repeat the previous steps but change `serverStartPolicy: NEVER` to `IF_NEEDED` as follows: ``` volumeMounts: @@ -335,7 +333,7 @@ To stop all the OIG Managed Servers and the AdminServer in one operation: 1. Run the following kubectl command to view the pods: - ``` + ```bash $ kubectl get pods -n ``` @@ -349,16 +347,16 @@ To stop all the OIG Managed Servers and the AdminServer in one operation: ``` NAME READY STATUS RESTARTS AGE - governancedomain-adminserver 0/1 Running 0 22s - governancedomain-create-fmw-infra-sample-domain-job-dktkk 0/1 Completed 0 24h + governancedomain-adminserver 0/1 Running 0 4s + governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 24h ``` - The AdminServer pod will start followed by the OIG Managed Servers pods. This process will take several minutes, so keep executing the command until all the pods are running with `READY` status `1/1` : + The Administration Server pod will start followed by the OIG Managed Servers pods. This process will take several minutes, so keep executing the command until all the pods are running with `READY` status `1/1` : ``` NAME READY STATUS RESTARTS AGE governancedomain-adminserver 1/1 Running 0 6m57s - governancedomain-create-fmw-infra-sample-domain-job-dktkk 0/1 Completed 0 24h + governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 24h governancedomain-oim-server1 1/1 Running 0 4m33s governancedomain-soa-server1 1/1 Running 0 4m33s ``` \ No newline at end of file diff --git a/docs-source/content/oig/manage-oig-domains/logging-and-visualization.md b/docs-source/content/oig/manage-oig-domains/logging-and-visualization.md index 757961fa8..da98fcc7b 100644 --- a/docs-source/content/oig/manage-oig-domains/logging-and-visualization.md +++ b/docs-source/content/oig/manage-oig-domains/logging-and-visualization.md @@ -1,5 +1,5 @@ --- -title: "Logging and Visualization" +title: "Logging and visualization" weight: 4 pre : "4. " description: "Describes the steps for logging and visualization with Elasticsearch and Kibana." @@ -9,7 +9,7 @@ After the OIG domain is set up you can publish operator and WebLogic Server logs In [Prepare your environment](../../prepare-your-environment) if you decided to use the Elasticsearch and Kibana by setting the parameter `elkIntegrationEnabled` to `true`, then the steps below must be followed to complete the setup. -If you did not set `elkIntegrationEnabled` to `true` and want to do so post configuration, run the following command: +If you did not set `elkIntegrationEnabled` to `true` and want to do so post configuration, run the following command from the `$WORKDIR` directory: ```bash $ helm upgrade --reuse-values --namespace operator --set "elkIntegrationEnabled=true" --set "logStashImage=logstash:6.6.0" --set "elasticSearchHost=elasticsearch.default.svc.cluster.local" --set "elasticSearchPort=9200" --wait weblogic-kubernetes-operator kubernetes/charts/weblogic-operator @@ -17,10 +17,10 @@ If you did not set `elkIntegrationEnabled` to `true` and want to do so post conf The output will look similar to the following: - ```bash + ``` Release "weblogic-kubernetes-operator" has been upgraded. Happy Helming! NAME: weblogic-kubernetes-operator - LAST DEPLOYED: Tue Aug 18 05:57:11 2020 + LAST DEPLOYED: Mon Nov 15 09:04:11 2021 NAMESPACE: operator STATUS: deployed REVISION: 3 @@ -32,13 +32,7 @@ If you did not set `elkIntegrationEnabled` to `true` and want to do so post conf 1. Create the Kubernetes resource using the following command: ```bash - $ kubectl apply -f /weblogic-kubernetes-operator/kubernetes/samples/scripts/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml - ``` - - For example: - - ```bash - $ kubectl apply -f /scratch/OIGDockerK8S/weblogic-kubernetes-operator/kubernetes/samples/scripts/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml + $ kubectl apply -f $WORKDIR/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml ``` The output will look similar to the following: @@ -60,35 +54,41 @@ If you did not set `elkIntegrationEnabled` to `true` and want to do so post conf ``` COMPUTED VALUES: - dedicated: false + clusterSizePaddingValidationEnabled: true + domainNamespaceLabelSelector: weblogic-operator=enabled + domainNamespaceSelectionStrategy: LabelSelector domainNamespaces: - - oigns + - default elasticSearchHost: elasticsearch.default.svc.cluster.local elasticSearchPort: 9200 elkIntegrationEnabled: true + enableClusterRoleBinding: true externalDebugHttpPort: 30999 externalRestEnabled: false externalRestHttpsPort: 31001 - image: weblogic-kubernetes-operator:3.0.1 + externalServiceNameSuffix: -ext + image: weblogic-kubernetes-operator:3.3.0 imagePullPolicy: IfNotPresent internalDebugHttpPort: 30999 - istioEnabled: false - javaLoggingLevel: INFO + introspectorJobNameSuffix: -introspector + javaLoggingFileCount: 10 + javaLoggingFileSizeLimit: 20000000 + javaLoggingLevel: FINE logStashImage: logstash:6.6.0 remoteDebugNodePortEnabled: false - serviceAccount: operator-serviceaccount + serviceAccount: op-sa suspendOnDebugStartup: false ``` 1. To check that Elasticsearch and Kibana are deployed in the Kubernetes cluster, run the following command: - ``` + ```bash $ kubectl get pods ``` The output will look similar to the following: - ```bash + ``` NAME READY STATUS RESTARTS AGE elasticsearch-857bd5ff6b-tvqdn 1/1 Running 0 2m9s kibana-594465687d-zc2rt 1/1 Running 0 2m9s @@ -113,7 +113,7 @@ OIG Server logs can be pushed to the Elasticsearch server using the `logstash` p The output will look similar to the following: - ```bash + ``` NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE governancedomain-domain-pv 10Gi RWX Retain Bound oigns/governancedomain-domain-pvc governancedomain-oim-storage-class 28h ``` @@ -134,11 +134,11 @@ OIG Server logs can be pushed to the Elasticsearch server using the `logstash` p The output will look similar to the following: - ```bash + ``` Mount Path: /u01/oracle/user_projects/domains ``` -1. Navigate to the `/weblogic-kubernetes-operator/kubernetes/samples/scripts/elasticsearch-and-kibana` directory and create a `logstash.yaml` file as follows. +1. Navigate to the `$WORKDIR/kubernetes/elasticsearch-and-kibana` directory and create a `logstash.yaml` file as follows. Change the `claimName` and `mountPath` values to match the values returned in the previous commands: ``` @@ -180,8 +180,8 @@ OIG Server logs can be pushed to the Elasticsearch server using the `logstash` p 1. In the NFS persistent volume directory that corresponds to the mountPath `/u01/oracle/user_projects/domains`, create a `logstash` directory. For example: - ``` - $ mkdir -p /scratch/OIGDockerK8S/governancedomainpv/logstash + ```bash + $ mkdir -p /scratch/OIGK8S/governancedomainpv/logstash ``` 1. Create a `logstash.conf` in the newly created `logstash` directory that contains the following. Make sure the paths correspond to your `mountPath` and `domain` name: @@ -244,7 +244,7 @@ OIG Server logs can be pushed to the Elasticsearch server using the `logstash` p 1. Deploy the `logstash` pod by executing the following command: ```bash - $ kubectl create -f /weblogic-kubernetes-operator/kubernetes/samples/scripts/elasticsearch-and-kibana/logstash.yaml + $ kubectl create -f $WORKDIR/kubernetes/elasticsearch-and-kibana/logstash.yaml ``` The output will look similar to the following: @@ -267,13 +267,13 @@ OIG Server logs can be pushed to the Elasticsearch server using the `logstash` p The output should look similar to the following: - ```bash + ``` NAME READY STATUS RESTARTS AGE - logstash-wls-85867765bc-bhs54 1/1 Running 0 9s governancedomain-adminserver 1/1 Running 0 90m - governancedomain-create-fmw-infra-sample-domain-job-dktkk 0/1 Completed 0 25h + governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 25h governancedomain-oim-server1 1/1 Running 0 87m governancedomain-soa-server1 1/1 Running 0 87m + logstash-wls-f448b44c8-92l27 1/1 Running 0 7s ``` Then run the following to get the Elasticsearch pod name: @@ -284,7 +284,7 @@ OIG Server logs can be pushed to the Elasticsearch server using the `logstash` p The output should look similar to the following: - ```bash + ``` NAME READY STATUS RESTARTS AGE elasticsearch-857bd5ff6b-tvqdn 1/1 Running 0 7m48s kibana-594465687d-zc2rt 1/1 Running 0 7m48s @@ -306,22 +306,22 @@ OIG Server logs can be pushed to the Elasticsearch server using the `logstash` p 1. In the elasticsearch bash shell run the following to check the indices: - ``` + ```bash [root@elasticsearch-857bd5ff6b-tvqdn elasticsearch]# curl -i "127.0.0.1:9200/_cat/indices?v" ``` The output will look similar to the following: - ```bash + ``` HTTP/1.1 200 OK content-type: text/plain; charset=UTF-8 content-length: 580 health status index uuid pri rep docs.count docs.deleted store.size pri.store.size - green open .kibana_task_manager 1qQ-C21GQJa38lAR28_7iA 1 0 2 0 12.6kb 12.6kb - green open .kibana_1 TwIdqENXTqm6mZlBRVy__A 1 0 2 0 7.6kb 7.6kb - yellow open logstash-2020.09.30 6LuZLYgARYCGGN-yZT5bJA 5 1 90794 0 22mb 22mb - yellow open logstash-2020.09.29 QBYQrolXRiW9l8Ct3DrSyQ 5 1 38 0 86.3kb 86.3kb + green open .kibana_1 Nb3C1lpMQrmptapuYb2PIQ 1 0 2 0 7.6kb 7.6kb + yellow open logstash-2021.11.11 OWbA_M5EQ2m6l2xZdS2zXw 5 1 150 0 107.6kb 107.6kb + green open .kibana_task_manager Qn_oHzAvQlWVcj_lItVdKQ 1 0 2 0 12.5kb 12.5kb + yellow open logstash-2021.11.15 5-V6CXrnQrOOmZDW4JOUgw 5 1 126338 0 45.6mb 45.6mb ``` Exit the bash shell by typing `exit`. @@ -334,10 +334,10 @@ OIG Server logs can be pushed to the Elasticsearch server using the `logstash` p The output will look similar to the following: - ```bash - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - elasticsearch ClusterIP 10.107.79.44 9200/TCP,9300/TCP 11m - kibana NodePort 10.103.60.126 5601:31490/TCP 11m + ``` + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + elasticsearch ClusterIP 10.111.37.189 9200/TCP,9300/TCP 11m + kibana NodePort 10.111.224.230 5601:31490/TCP 11m kubernetes ClusterIP 10.96.0.1 443/TCP 7d5h ``` diff --git a/docs-source/content/oig/manage-oig-domains/monitoring-oim-domains.md b/docs-source/content/oig/manage-oig-domains/monitoring-oim-domains.md index c9fc3f181..8bb5f2d52 100644 --- a/docs-source/content/oig/manage-oig-domains/monitoring-oim-domains.md +++ b/docs-source/content/oig/manage-oig-domains/monitoring-oim-domains.md @@ -7,27 +7,288 @@ description: "Describes the steps for Monitoring the OIG domain and Publising th After the OIG domain is set up you can monitor the OIG instance using Prometheus and Grafana. See [Monitoring a domain](https://github.com/oracle/weblogic-monitoring-exporter). - The WebLogic Monitoring Exporter uses the WLS RESTful Management API to scrape runtime information and then exports Prometheus-compatible metrics. It is deployed as a web application in a WebLogic Server (WLS) instance, version 12.2.1 or later, typically, in the instance from which you want to get metrics. -### Deploy the Prometheus operator -1. Clone Prometheus by running the following commands: +There are two ways to setup monitoring and you should choose one method or the other: + +1. [Setup automatically using setup-monitoring.sh](#setup-automatically-using-setup-monitoring.sh) +1. [Setup using manual configuration](#setup-using-manual-configuration) + + +### Setup automatically using setup-monitoring.sh + +The `$WORKDIR/kubernetes/monitoring-service/setup-monitoring.sh` sets up the monitoring for the OIG domain. It installs Prometheus, Grafana, WebLogic Monitoring Exporter and deploys the web applications to the OIG domain. It also deploys the WebLogic Server Grafana dashboard. + +For usage details execute `./setup-monitoring.sh -h`. + +1. Edit the `$WORKDIR/kubernetes/monitoring-service/monitoring-inputs.yaml` and change the `domainUID`, `domainNamespace`, and `weblogicCredentialsSecretName` to correspond to your deployment. For example: + + ``` + version: create-oimcluster-monitoring-inputs-v1 + + # Unique ID identifying your domain. + # This ID must not contain an underscope ("_"), and must be lowercase and unique across all domains in a Kubernetes cluster. + domainUID: governancedomain + + # Name of the domain namespace + domainNamespace: oigns + + # Boolean value indicating whether to install kube-prometheus-stack + setupKubePrometheusStack: true + + # Additional parameters for helm install kube-prometheus-stack + # Refer https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/values.yaml for additional parameters + # Sample : + # additionalParamForKubePrometheusStack: --set nodeExporter.enabled=false --set prometheusOperator.tls.enabled=false --set prometheusOperator.admissionWebhooks.enabled=false + additionalParamForKubePrometheusStack: + + # Name of the monitoring namespace + monitoringNamespace: monitoring + + # Name of the Admin Server + adminServerName: AdminServer + # + # Port number for admin server + adminServerPort: 7001 + + # Cluster name + soaClusterName: soa_cluster + + # Port number for managed server + soaManagedServerPort: 8001 + + # WebLogic Monitoring Exporter to Cluster + wlsMonitoringExporterTosoaCluster: true + + # Cluster name + oimClusterName: oim_cluster + + # Port number for managed server + oimManagedServerPort: 14000 + + # WebLogic Monitoring Exporter to Cluster + wlsMonitoringExporterTooimCluster: true + + + # Boolean to indicate if the adminNodePort will be exposed + exposeMonitoringNodePort: true + + # NodePort to expose Prometheus + prometheusNodePort: 32101 + + # NodePort to expose Grafana + grafanaNodePort: 32100 + + # NodePort to expose Alertmanager + alertmanagerNodePort: 32102 + + # Name of the Kubernetes secret for the Admin Server's username and password + weblogicCredentialsSecretName: oig-domain-credentials + ``` + +1. Run the following command to setup monitoring: + + ```bash + $ cd $WORKDIR/kubernetes/monitoring-service + $ ./setup-monitoring.sh -i monitoring-inputs.yaml + ``` + + The output should be similar to the following: + + ``` + Monitoring setup in monitoring in progress + + node/worker-node1 not labeled + node/worker-node2 not labeled + node/master-node not labeled + Setup prometheus-community/kube-prometheus-stack started + "prometheus-community" already exists with the same configuration, skipping + Hang tight while we grab the latest from your chart repositories... + ...Successfully got an update from the "stable" chart repository + ...Successfully got an update from the "prometheus" chart repository + ...Successfully got an update from the "prometheus-community" chart repository + ...Successfully got an update from the "appscode" chart repository + Update Complete. ⎈Happy Helming!⎈ + Setup prometheus-community/kube-prometheus-stack in progress + NAME: monitoring + LAST DEPLOYED: Thu Nov 18 03:38:04 2021 + NAMESPACE: monitoring + STATUS: deployed + REVISION: 1 + NOTES: + kube-prometheus-stack has been installed. Check its status by running: + kubectl --namespace monitoring get pods -l "release=monitoring" + + Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create & configure Alertmanager and Prometheus instances using the Operator. + Setup prometheus-community/kube-prometheus-stack completed + Deploy WebLogic Monitoring Exporter started + Deploying WebLogic Monitoring Exporter with domainNamespace[oigns], domainUID[governancedomain], adminServerPodName[governancedomain-adminserver] + % Total % Received % Xferd Average Speed Time Time Time Current + Dload Upload Total Spent Left Speed + 100 655 100 655 0 0 1159 0 --:--:-- --:--:-- --:--:-- 1159 + 100 2196k 100 2196k 0 0 1763k 0 0:00:01 0:00:01 --:--:-- 20.7M + created $WORKDIR/kubernetes/monitoring-service/scripts/wls-exporter-deploy dir + created /tmp/ci-GJSQsiXrFE + /tmp/ci-GJSQsiXrFE $WORKDIR/kubernetes/monitoring-service + in temp dir + adding: WEB-INF/weblogic.xml (deflated 61%) + adding: config.yml (deflated 60%) + $WORKDIR/kubernetes/monitoring-service + created /tmp/ci-KeyZrdouMD + /tmp/ci-KeyZrdouMD $WORKDIR/kubernetes/monitoring-service + in temp dir + adding: WEB-INF/weblogic.xml (deflated 61%) + adding: config.yml (deflated 60%) + $WORKDIR/kubernetes/monitoring-service + created /tmp/ci-QE9HawIIgT + /tmp/ci-QE9HawIIgT $WORKDIR/kubernetes/monitoring-service + in temp dir + adding: WEB-INF/weblogic.xml (deflated 61%) + adding: config.yml (deflated 60%) + $WORKDIR/kubernetes/monitoring-service + + Initializing WebLogic Scripting Tool (WLST) ... + + Welcome to WebLogic Server Administration Scripting Shell + + Type help() for help on available commands + + Connecting to t3://governancedomain-adminserver:7001 with userid weblogic ... + Successfully connected to Admin Server "AdminServer" that belongs to domain "governancedomain". + + Warning: An insecure protocol was used to connect to the server. + To ensure on-the-wire security, the SSL port or Admin port should be used instead. + + Deploying ......... + Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war to targets AdminServer (upload=true) ... + + .Completed the deployment of Application with status completed + Current Status of your Deployment: + Deployment command type: deploy + Deployment State : completed + Deployment Message : no message + Starting application wls-exporter-adminserver. + + .Completed the start of Application with status completed + Current Status of your Deployment: + Deployment command type: start + Deployment State : completed + Deployment Message : no message + Deploying ......... + Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-soa.war to targets soa_cluster (upload=true) ... + + .Completed the deployment of Application with status completed + Current Status of your Deployment: + Deployment command type: deploy + Deployment State : completed + Deployment Message : no message + Starting application wls-exporter-soa. + + .Completed the start of Application with status completed + Current Status of your Deployment: + Deployment command type: start + Deployment State : completed + Deployment Message : no message + Deploying ......... + Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-oim.war to targets oim_cluster (upload=true) ... + + .Completed the deployment of Application with status completed + Current Status of your Deployment: + Deployment command type: deploy + Deployment State : completed + Deployment Message : no message + Starting application wls-exporter-oim. + + .Completed the start of Application with status completed + Current Status of your Deployment: + Deployment command type: start + Deployment State : completed + Deployment Message : no message + Disconnected from weblogic server: AdminServer + + + Exiting WebLogic Scripting Tool. + + + Deploy WebLogic Monitoring Exporter completed + secret/basic-auth created + servicemonitor.monitoring.coreos.com/wls-exporter created + Deploying WebLogic Server Grafana Dashboard.... + {"id":25,"slug":"weblogic-server-dashboard","status":"success","uid":"5yUwzbZWz","url":"/d/5yUwzbZWz/weblogic-server-dashboard","version":1} + Deployed WebLogic Server Grafana Dashboard successfully + + Grafana is available at NodePort: 32100 + Prometheus is available at NodePort: 32101 + Altermanager is available at NodePort: 32102 + ============================================================== + ``` + +#### Prometheus service discovery + +After the ServiceMonitor is deployed, the wls-exporter should be discovered by Prometheus and be able to collect metrics. + +1. Access the following URL to view Prometheus service discovery: `http://${MASTERNODE-HOSTNAME}:32101/service-discovery` + +1. Click on `serviceMonitor/oigns/wls-exporter/0` and then *show more*. Verify all the targets are mentioned. + +**Note** : It may take several minutes for `serviceMonitor/oigns/wls-exporter/0` to appear, so refresh the page until it does. + +#### Grafana dashboard + +1. Access the Grafana dashboard with the following URL: `http://${MASTERNODE-HOSTNAME}:32100` and login with `admin/admin`. Change your password when prompted. + +1. In the `Dashboards` panel, click on `WebLogic Server Dashboard`. The dashboard for your OIG domain should be displayed. If it is not displayed, click the `Search` icon in the left hand menu and search for `WebLogic Server Dashboard`. + + +#### Cleanup + +To uninstall the Prometheus, Grafana, WebLogic Monitoring Exporter and the deployments, you can run the `$WORKDIR/monitoring-service/kubernetes/delete-monitoring.sh` script. For usage details execute `./delete-monitoring.sh -h` + +1. To uninstall run the following command: ```bash - $ cd - $ git clone https://github.com/coreos/kube-prometheus.git + $ cd $WORKDIR/kubernetes/monitoring-service + $ ./delete-monitoring.sh -i monitoring-inputs.yaml + ``` + + +### Setup using manual configuration + +Install Prometheus, Grafana and WebLogic Monitoring Exporter manually. Create the web applications and deploy to the OIG domain. + + +#### Deploy the Prometheus operator + +1. Kube-Prometheus requires all nodes to be labelled with `kubernetes.io/os=linux`. To check if your nodes are labelled, run the following: + + ```bash + $ kubectl get nodes --show-labels ``` - **Note**: Please refer the compatibility matrix of [Kube Prometheus](https://github.com/coreos/kube-prometheus#kubernetes-compatibility-matrix). Please download the [release](https://github.com/prometheus-operator/kube-prometheus/releases) of the repository according to the Kubernetes version of your cluster. In the above example the latest release will be downloaded. + If the nodes are labelled the output will look similar to the following: - For example: + ``` + NAME STATUS ROLES AGE VERSION LABELS + worker-node1 Ready 42d v1.20.10 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker-node1,kubernetes.io/os=linux + worker-node2 Ready 42d v1.20.10 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker-node2,kubernetes.io/os=linux + master-node Ready master 42d v1.20.10 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=master-node,kubernetes.io/os=linux,node-role.kubernetes.io/master= + ``` + + If the nodes are not labelled, run the following command: ```bash - $ cd /scratch/OIGDockerK8S - $ git clone https://github.com/coreos/kube-prometheus.git + $ kubectl label nodes --all kubernetes.io/os=linux + ``` + +1. Clone Prometheus by running the following commands: + + ```bash + $ cd $WORKDIR/kubernetes/monitoring-service + $ git clone https://github.com/coreos/kube-prometheus.git -b v0.7.0 ``` + **Note**: Please refer the compatibility matrix of [Kube Prometheus](https://github.com/coreos/kube-prometheus#kubernetes-compatibility-matrix). Please download the [release](https://github.com/prometheus-operator/kube-prometheus/releases) of the repository according to the Kubernetes version of your cluster. 1. Run the following command to create the namespace and custom resource definitions: @@ -39,9 +300,9 @@ The WebLogic Monitoring Exporter uses the WLS RESTful Management API to scrape r The output will look similar to the following: - ```bash - kubectl create -f manifests/setup + ``` namespace/monitoring created + customresourcedefinition.apiextensions.k8s.io/alertmanagerconfigs.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/alertmanagers.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/podmonitors.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/probes.monitoring.coreos.com created @@ -64,13 +325,23 @@ The WebLogic Monitoring Exporter uses the WLS RESTful Management API to scrape r The output will look similar to the following: - ```bash + ``` alertmanager.monitoring.coreos.com/main created + prometheusrule.monitoring.coreos.com/alertmanager-main-rules created secret/alertmanager-main created service/alertmanager-main created serviceaccount/alertmanager-main created - servicemonitor.monitoring.coreos.com/alertmanager created + servicemonitor.monitoring.coreos.com/alertmanager-main created + clusterrole.rbac.authorization.k8s.io/blackbox-exporter created + clusterrolebinding.rbac.authorization.k8s.io/blackbox-exporter created + configmap/blackbox-exporter-configuration created + deployment.apps/blackbox-exporter created + service/blackbox-exporter created + serviceaccount/blackbox-exporter created + servicemonitor.monitoring.coreos.com/blackbox-exporter created + secret/grafana-config created secret/grafana-datasources created + configmap/grafana-dashboard-alertmanager-overview created configmap/grafana-dashboard-apiserver created configmap/grafana-dashboard-cluster-total created configmap/grafana-dashboard-controller-manager created @@ -92,22 +363,30 @@ The WebLogic Monitoring Exporter uses the WLS RESTful Management API to scrape r configmap/grafana-dashboard-prometheus created configmap/grafana-dashboard-proxy created configmap/grafana-dashboard-scheduler created - configmap/grafana-dashboard-statefulset created configmap/grafana-dashboard-workload-total created configmap/grafana-dashboards created deployment.apps/grafana created service/grafana created serviceaccount/grafana created servicemonitor.monitoring.coreos.com/grafana created + prometheusrule.monitoring.coreos.com/kube-prometheus-rules created clusterrole.rbac.authorization.k8s.io/kube-state-metrics created clusterrolebinding.rbac.authorization.k8s.io/kube-state-metrics created deployment.apps/kube-state-metrics created + prometheusrule.monitoring.coreos.com/kube-state-metrics-rules created service/kube-state-metrics created serviceaccount/kube-state-metrics created servicemonitor.monitoring.coreos.com/kube-state-metrics created + prometheusrule.monitoring.coreos.com/kubernetes-monitoring-rules created + servicemonitor.monitoring.coreos.com/kube-apiserver created + servicemonitor.monitoring.coreos.com/coredns created + servicemonitor.monitoring.coreos.com/kube-controller-manager created + servicemonitor.monitoring.coreos.com/kube-scheduler created + servicemonitor.monitoring.coreos.com/kubelet created clusterrole.rbac.authorization.k8s.io/node-exporter created clusterrolebinding.rbac.authorization.k8s.io/node-exporter created daemonset.apps/node-exporter created + prometheusrule.monitoring.coreos.com/node-exporter-rules created service/node-exporter created serviceaccount/node-exporter created servicemonitor.monitoring.coreos.com/node-exporter created @@ -125,8 +404,10 @@ The WebLogic Monitoring Exporter uses the WLS RESTful Management API to scrape r servicemonitor.monitoring.coreos.com/prometheus-adapter created clusterrole.rbac.authorization.k8s.io/prometheus-k8s created clusterrolebinding.rbac.authorization.k8s.io/prometheus-k8s created + prometheusrule.monitoring.coreos.com/prometheus-operator-rules created servicemonitor.monitoring.coreos.com/prometheus-operator created prometheus.monitoring.coreos.com/k8s created + prometheusrule.monitoring.coreos.com/prometheus-k8s-prometheus-rules created rolebinding.rbac.authorization.k8s.io/prometheus-k8s-config created rolebinding.rbac.authorization.k8s.io/prometheus-k8s created rolebinding.rbac.authorization.k8s.io/prometheus-k8s created @@ -135,38 +416,15 @@ The WebLogic Monitoring Exporter uses the WLS RESTful Management API to scrape r role.rbac.authorization.k8s.io/prometheus-k8s created role.rbac.authorization.k8s.io/prometheus-k8s created role.rbac.authorization.k8s.io/prometheus-k8s created - prometheusrule.monitoring.coreos.com/prometheus-k8s-rules created service/prometheus-k8s created serviceaccount/prometheus-k8s created - servicemonitor.monitoring.coreos.com/prometheus created - servicemonitor.monitoring.coreos.com/kube-apiserver created - servicemonitor.monitoring.coreos.com/coredns created - servicemonitor.monitoring.coreos.com/kube-controller-manager created - servicemonitor.monitoring.coreos.com/kube-scheduler created - servicemonitor.monitoring.coreos.com/kubelet created + servicemonitor.monitoring.coreos.com/prometheus-k8s created + unable to recognize "manifests/alertmanager-podDisruptionBudget.yaml": no matches for kind "PodDisruptionBudget" in version "policy/v1" + unable to recognize "manifests/prometheus-adapter-podDisruptionBudget.yaml": no matches for kind "PodDisruptionBudget" in version "policy/v1" + unable to recognize "manifests/prometheus-podDisruptionBudget.yaml": no matches for kind "PodDisruptionBudget" in version "policy/v1" ``` -1. Kube-Prometheus requires all nodes to be labelled with `kubernetes.io/os=linux`. To check if your nodes are labelled, run the following: - ```bash - $ kubectl get nodes --show-labels - ``` - - If the nodes are labelled the output will look similar to the following: - - ```bash - NAME STATUS ROLES AGE VERSION LABELS - worker-node1 Ready 42d v1.18.4 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker-node1,kubernetes.io/os=linux - worker-node2 Ready 42d v1.18.4 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker-node2,kubernetes.io/os=linux - master-node Ready master 42d v1.18.4 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=masternode,kubernetes.io/os=linux,node-role.kubernetes.io/master= - ``` - - If the nodes are not labelled, run the following command: - - ```bash - $ kubectl label nodes --all kubernetes.io/os=linux - ``` - 1. Provide external access for Grafana, Prometheus, and Alertmanager, by running the following commands: ```bash @@ -181,7 +439,7 @@ The WebLogic Monitoring Exporter uses the WLS RESTful Management API to scrape r The output will look similar to the following: - ```bash + ``` service/grafana patched service/prometheus-k8s patched service/alertmanager-main patched @@ -195,363 +453,216 @@ The WebLogic Monitoring Exporter uses the WLS RESTful Management API to scrape r The output should look similar to the following: - ```bash - NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES - pod/alertmanager-main-0 2/2 Running 0 97s 10.244.2.52 worker-node2 - pod/alertmanager-main-1 2/2 Running 0 97s 10.244.1.61 worker-node1 - pod/alertmanager-main-2 2/2 Running 0 97s 10.244.2.53 worker-node2 - pod/grafana-86445dccbb-dln2l 1/1 Running 0 96s 10.244.2.55 worker-node2 - pod/kube-state-metrics-5b67d79459-k7xrb 3/3 Running 0 96s 10.244.1.63 worker-node1 - pod/node-exporter-dhp4k 2/2 Running 0 96s 10.250.111.111 worker-node2 - pod/node-exporter-jknkv 2/2 Running 0 96s 10.196.4.112 masternode - pod/node-exporter-vpn9l 2/2 Running 0 96s 10.250.111.112 worker-node1 - pod/prometheus-adapter-66b855f564-snkjb 1/1 Running 0 96s 10.244.2.56 worker-node2 - pod/prometheus-k8s-0 3/3 Running 0 96s 10.244.2.54 worker-node2 - pod/prometheus-k8s-1 3/3 Running 0 96s 10.244.1.62 worker-node1 - pod/prometheus-operator-78fcb48ccf-gcgc5 2/2 Running 0 107s 10.244.1.60 worker-node1 - - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR - service/alertmanager-main NodePort 10.107.184.118 9093:32102/TCP 98s alertmanager=main,app=alertmanager - service/alertmanager-operated ClusterIP None 9093/TCP,9094/TCP,9094/UDP 97s app=alertmanager - service/grafana NodePort 10.96.249.254 3000:32100/TCP 97s app=grafana - service/kube-state-metrics ClusterIP None 8443/TCP,9443/TCP 97s app.kubernetes.io/name=kube-state-metrics - service/node-exporter ClusterIP None 9100/TCP 97s app.kubernetes.io/name=node-exporter - service/prometheus-adapter ClusterIP 10.100.222.239 443/TCP 97s name=prometheus-adapter - service/prometheus-k8s NodePort 10.106.163.78 9090:32101/TCP 96s app=prometheus,prometheus=k8s - service/prometheus-operated ClusterIP None 9090/TCP 96s app=prometheus - service/prometheus-operator ClusterIP None 8443/TCP 108s app.kubernetes.io/component=contr oller,app.kubernetes.io/name=prometheus-operator - ``` - - - -### Deploy WebLogic Monitoring Exporter - -1. Download WebLogic Monitoring Exporter: - - ```bash - $ mkdir -p /wls_exporter - $ cd /wls_exporter - $ wget https://github.com/oracle/weblogic-monitoring-exporter/releases/download//wls-exporter.war - $ wget https://github.com/oracle/weblogic-monitoring-exporter/releases/download//get.sh ``` - - For example: - - ```bash - $ mkdir -p /scratch/OIGDockerK8S/wls_exporter - $ cd /scratch/OIGDockerK8S/wls_exporter - $ wget https://github.com/oracle/weblogic-monitoring-exporter/releases/download/v1.2.0/wls-exporter.war - $ wget https://github.com/oracle/weblogic-monitoring-exporter/releases/download/v1.2.0/get1.2.0.sh - ``` - -1. Create a configuration file `config-admin.yaml` in the `/wls_exporter` directory that contains the following. Modify the `restPort` to match the server port for the OIG Administration Server: - - ``` - metricsNameSnakeCase: true - restPort: 7001 - queries: - - key: name - keyName: location - prefix: wls_server_ - applicationRuntimes: - key: name - keyName: app - componentRuntimes: - prefix: wls_webapp_config_ - type: WebAppComponentRuntime - key: name - values: [deploymentState, contextRoot, sourceInfo, openSessionsHighCount, openSessionsCurrentCount, sessionsOpenedTotalCount, sessionCookieMaxAgeSecs, sessionInvalidationIntervalSecs, sessionTimeoutSecs, singleThreadedServletPoolSize, sessionIDLength, servletReloadCheckSecs, jSPPageCheckSecs] - servlets: - prefix: wls_servlet_ - key: servletName - - JVMRuntime: - prefix: wls_jvm_ - key: name - - executeQueueRuntimes: - prefix: wls_socketmuxer_ - key: name - values: [pendingRequestCurrentCount] - - workManagerRuntimes: - prefix: wls_workmanager_ - key: name - values: [stuckThreadCount, pendingRequests, completedRequests] - - threadPoolRuntime: - prefix: wls_threadpool_ - key: name - values: [executeThreadTotalCount, queueLength, stuckThreadCount, hoggingThreadCount] - - JMSRuntime: - key: name - keyName: jmsruntime - prefix: wls_jmsruntime_ - JMSServers: - prefix: wls_jms_ - key: name - keyName: jmsserver - destinations: - prefix: wls_jms_dest_ - key: name - keyName: destination - - persistentStoreRuntimes: - prefix: wls_persistentstore_ - key: name - - JDBCServiceRuntime: - JDBCDataSourceRuntimeMBeans: - prefix: wls_datasource_ - key: name - - JTARuntime: - prefix: wls_jta_ - key: name - ``` - - - -1. Create a configuration file `config-oimserver.yaml` in the `/wls_exporter` directory that contains the following. Modify the `restPort` to match the server port for the OIG Managed Servers: - - ``` - metricsNameSnakeCase: true - restPort: 14000 - queries: - - key: name - keyName: location - prefix: wls_server_ - applicationRuntimes: - key: name - keyName: app - componentRuntimes: - prefix: wls_webapp_config_ - type: WebAppComponentRuntime - key: name - values: [deploymentState, contextRoot, sourceInfo, openSessionsHighCount, openSessionsCurrentCount, sessionsOpenedTotalCount, sessionCookieMaxAgeSecs, sessionInvalidationIntervalSecs, sessionTimeoutSecs, singleThreadedServletPoolSize, sessionIDLength, servletReloadCheckSecs, jSPPageCheckSecs] - servlets: - prefix: wls_servlet_ - key: servletName - - JVMRuntime: - prefix: wls_jvm_ - key: name - - executeQueueRuntimes: - prefix: wls_socketmuxer_ - key: name - values: [pendingRequestCurrentCount] - - workManagerRuntimes: - prefix: wls_workmanager_ - key: name - values: [stuckThreadCount, pendingRequests, completedRequests] - - threadPoolRuntime: - prefix: wls_threadpool_ - key: name - values: [executeThreadTotalCount, queueLength, stuckThreadCount, hoggingThreadCount] - - JMSRuntime: - key: name - keyName: jmsruntime - prefix: wls_jmsruntime_ - JMSServers: - prefix: wls_jms_ - key: name - keyName: jmsserver - destinations: - prefix: wls_jms_dest_ - key: name - keyName: destination - - persistentStoreRuntimes: - prefix: wls_persistentstore_ - key: name - - JDBCServiceRuntime: - JDBCDataSourceRuntimeMBeans: - prefix: wls_datasource_ - key: name - - JTARuntime: - prefix: wls_jta_ - key: name - ``` - -1. Create a configuration file `config-soaserver.yaml` in the `/wls_exporter` directory that contains the following. Modify the `restPort` to match the server port for the SOA Managed Servers: - - ``` - metricsNameSnakeCase: true - restPort: 8001 - queries: - - key: name - keyName: location - prefix: wls_server_ - applicationRuntimes: - key: name - keyName: app - componentRuntimes: - prefix: wls_webapp_config_ - type: WebAppComponentRuntime - key: name - values: [deploymentState, contextRoot, sourceInfo, openSessionsHighCount, openSessionsCurrentCount, sessionsOpenedTotalCount, sessionCookieMaxAgeSecs, sessionInvalidationIntervalSecs, sessionTimeoutSecs, singleThreadedServletPoolSize, sessionIDLength, servletReloadCheckSecs, jSPPageCheckSecs] - servlets: - prefix: wls_servlet_ - key: servletName - - JVMRuntime: - prefix: wls_jvm_ - key: name - - executeQueueRuntimes: - prefix: wls_socketmuxer_ - key: name - values: [pendingRequestCurrentCount] - - workManagerRuntimes: - prefix: wls_workmanager_ - key: name - values: [stuckThreadCount, pendingRequests, completedRequests] - - threadPoolRuntime: - prefix: wls_threadpool_ - key: name - values: [executeThreadTotalCount, queueLength, stuckThreadCount, hoggingThreadCount] - - JMSRuntime: - key: name - keyName: jmsruntime - prefix: wls_jmsruntime_ - JMSServers: - prefix: wls_jms_ - key: name - keyName: jmsserver - destinations: - prefix: wls_jms_dest_ - key: name - keyName: destination - - persistentStoreRuntimes: - prefix: wls_persistentstore_ - key: name - - JDBCServiceRuntime: - JDBCDataSourceRuntimeMBeans: - prefix: wls_datasource_ - key: name - - JTARuntime: - prefix: wls_jta_ - key: name - ``` - -1. Generate the deployment package for the OIG Administration Server: - - ```bash - $ chmod 777 get.sh - $ ./get config-admin.yaml + pod/alertmanager-main-0 2/2 Running 0 40s 10.244.1.29 worker-node1 + pod/alertmanager-main-1 2/2 Running 0 40s 10.244.2.68 worker-node2 + pod/alertmanager-main-2 2/2 Running 0 40s 10.244.1.28 worker-node1 + pod/grafana-f8cd57fcf-zpjh2 1/1 Running 0 40s 10.244.2.69 worker-node2 + pod/kube-state-metrics-587bfd4f97-zw9zj 3/3 Running 0 38s 10.244.1.30 worker-node1 + pod/node-exporter-2cgrm 2/2 Running 0 38s 10.196.54.36 master-node + pod/node-exporter-fpl7f 2/2 Running 0 38s 10.247.95.26 worker-node1 + pod/node-exporter-kvvnr 2/2 Running 0 38s 10.250.40.59 worker-node2 + pod/prometheus-adapter-69b8496df6-9vfdp 1/1 Running 0 38s 10.244.2.70 worker-node2 + pod/prometheus-k8s-0 2/2 Running 0 37s 10.244.2.71 worker-node2 + pod/prometheus-k8s-1 2/2 Running 0 37s 10.244.1.31 worker-node1 + pod/prometheus-operator-7649c7454f-g5b4l 2/2 Running 0 47s 10.244.2.67 worker-node2 + + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR + service/alertmanager-main NodePort 10.105.76.223 9093:32102/TCP 41s alertmanager=main,app=alertmanager + service/alertmanager-operated ClusterIP None 9093/TCP,9094/TCP,9094/UDP 40s app=alertmanager + service/grafana NodePort 10.107.86.157 3000:32100/TCP 40s app=grafana + service/kube-state-metrics ClusterIP None 8443/TCP,9443/TCP 40s app.kubernetes.io/name=kube-state-metrics + service/node-exporter ClusterIP None 9100/TCP 39s app.kubernetes.io/name=node-exporter + service/prometheus-adapter ClusterIP 10.102.244.224 443/TCP 39s name=prometheus-adapter + service/prometheus-k8s NodePort 10.100.241.34 9090:32101/TCP 39s app=prometheus,prometheus=k8s + service/prometheus-operated ClusterIP None 9090/TCP 39s app=prometheus + service/prometheus-operator ClusterIP None 8443/TCP 47s app.kubernetes.io/component=controller,app.kubernetes.io/name=prometheus-operator ``` - - For example: - + + +#### Deploy WebLogic Monitoring Exporter + + +Generate the WebLogic Monitoring Exporter deployment package. The `wls-exporter.war` package need to be updated and created for each listening port (Administration Server and Managed Servers) in the domain. + +1. Set the below environment values and run the script `get-wls-exporter.sh` to generate the required WAR files at `${WORKDIR}/kubernetes/monitoring-service/scripts/wls-exporter-deploy`: + ```bash - $ chmod 777 get1.2.0.sh - $ ./get1.2.0.sh config-admin.yaml + $ cd $WORKDIR/kubernetes/monitoring-service/scripts + $ export adminServerPort=7001 + $ export wlsMonitoringExporterTosoaCluster=true + $ export soaManagedServerPort=8001 + $ export wlsMonitoringExporterTooimCluster=true + $ export oimManagedServerPort=14000 + $ sh get-wls-exporter.sh ``` - + The output will look similar to the following: ``` - % Total % Received % Xferd Average Speed Time Time Time Current + % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed - 100 629 100 629 0 0 1241 0 --:--:-- --:--:-- --:--:-- 1240 - 100 2033k 100 2033k 0 0 1219k 0 0:00:01 0:00:01 --:--:-- 2882k - created /tmp/ci-lKm0dOnLwU - /tmp/ci-lKm0dOnLwU /scratch/OIGDockerK8S/wls_exporter + 100 655 100 655 0 0 1159 0 --:--:-- --:--:-- --:--:-- 1159 + 100 2196k 100 2196k 0 0 1430k 0 0:00:01 0:00:01 --:--:-- 8479k + created $WORKDIR/kubernetes/monitoring-service/scripts/wls-exporter-deploy dir + domainNamespace is empty, setting to default oimcluster + domainUID is empty, setting to default oimcluster + weblogicCredentialsSecretName is empty, setting to default "oimcluster-domain-credentials" + adminServerPort is empty, setting to default "7001" + soaClusterName is empty, setting to default "soa_cluster" + oimClusterName is empty, setting to default "oim_cluster" + created /tmp/ci-NEZy7NOfoz + /tmp/ci-NEZy7NOfoz $WORKDIR/kubernetes/monitoring-service/scripts + in temp dir + adding: WEB-INF/weblogic.xml (deflated 61%) + adding: config.yml (deflated 60%) + $WORKDIR/kubernetes/monitoring-service/scripts + created /tmp/ci-J7QJ4Nc1lo + /tmp/ci-J7QJ4Nc1lo $WORKDIR/kubernetes/monitoring-service/scripts + in temp dir + adding: WEB-INF/weblogic.xml (deflated 61%) + adding: config.yml (deflated 60%) + $WORKDIR/kubernetes/monitoring-service/scripts + created /tmp/ci-f4GbaxM2aJ + /tmp/ci-f4GbaxM2aJ $WORKDIR/kubernetes/monitoring-service/scripts in temp dir - adding: config.yml (deflated 65%) - /scratch/OIGDockerK8S/wls_exporter + adding: WEB-INF/weblogic.xml (deflated 61%) + adding: config.yml (deflated 60%) + $WORKDIR/kubernetes/monitoring-service/scripts ``` + - This will generate a `wls-exporter.war` file in the same directory that contains a `config.yml` that corresponds to `config-admin.yaml`. Rename the file as follows: - +1. Deploy the WebLogic Monitoring Exporter WAR files into the Oracle Access Management domain: + ```bash - mv wls-exporter.war wls-exporter-admin.war + $ cd $WORKDIR/kubernetes/monitoring-service/scripts + $ kubectl cp wls-exporter-deploy /-adminserver:/u01/oracle + $ kubectl cp deploy-weblogic-monitoring-exporter.py /-adminserver:/u01/oracle/wls-exporter-deploy + $ kubectl exec -it -n -adminserver -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/deploy-weblogic-monitoring-exporter.py -domainName -adminServerName AdminServer -adminURL -adminserver:7001 -username weblogic -password -oimClusterName oim_cluster -wlsMonitoringExporterTooimCluster true -soaClusterName soa_cluster -wlsMonitoringExporterTosoaCluster true ``` - -1. Generate the deployment package for the OIG Managed Server and Policy Manager Server, for example: + For example: ```bash - $ ./get1.2.0.sh config-oimserver.yaml - $ mv wls-exporter.war wls-exporter-oimserver.war - $ ./get1.2.0.sh config-soaserver.yaml - $ mv wls-exporter.war wls-exporter-soaserver.war + $ cd $WORKDIR/kubernetes/monitoring-service/scripts + $ kubectl cp wls-exporter-deploy oigns/governancedomain-adminserver:/u01/oracle + $ kubectl cp deploy-weblogic-monitoring-exporter.py oigns/governancedomain-adminserver:/u01/oracle/wls-exporter-deploy + $ kubectl exec -it -n oigns governancedomain-adminserver -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/deploy-weblogic-monitoring-exporter.py -domainName governancedomain -adminServerName AdminServer -adminURL governancedomain-adminserver:7001 -username weblogic -password -oimClusterName oim_cluster -wlsMonitoringExporterTooimCluster true -soaClusterName soa_cluster -wlsMonitoringExporterTosoaCluster true ``` -1. Copy the war files to the persistent volume directory: + The output will look similar to the following: + + ``` + Initializing WebLogic Scripting Tool (WLST) ... + + Welcome to WebLogic Server Administration Scripting Shell + + Type help() for help on available commands + + Connecting to t3://accessdomain-adminserver:7001 with userid weblogic ... + Successfully connected to Admin Server "AdminServer" that belongs to domain "accessdomain". + + Warning: An insecure protocol was used to connect to the server. + To ensure on-the-wire security, the SSL port or Admin port should be used instead. + + Deploying ......... + Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war to targets AdminServer (upload=true) ... + + .Completed the deployment of Application with status completed + Current Status of your Deployment: + Deployment command type: deploy + Deployment State : completed + Deployment Message : no message + Starting application wls-exporter-adminserver. + + .Completed the start of Application with status completed + Current Status of your Deployment: + Deployment command type: start + Deployment State : completed + Deployment Message : no message + Deploying ......... + Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-soa.war to targets soa_cluster (upload=true) ... + + ..Completed the deployment of Application with status completed + Current Status of your Deployment: + Deployment command type: deploy + Deployment State : completed + Deployment Message : no message + Starting application wls-exporter-soa. + + .Completed the start of Application with status completed + Current Status of your Deployment: + Deployment command type: start + Deployment State : completed + Deployment Message : no message + Deploying ......... + Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-oim.war to targets oim_cluster (upload=true) ... + + .Completed the deployment of Application with status completed + Current Status of your Deployment: + Deployment command type: deploy + Deployment State : completed + Deployment Message : no message + Starting application wls-exporter-oim. + + .Completed the start of Application with status completed + Current Status of your Deployment: + Deployment command type: start + Deployment State : completed + Deployment Message : no message + Disconnected from weblogic server: AdminServer + + Exiting WebLogic Scripting Tool. + + + ``` + +#### Configure Prometheus Operator + +Prometheus enables you to collect metrics from the WebLogic Monitoring Exporter. The Prometheus Operator identifies the targets using service discovery. To get the WebLogic Monitoring Exporter end point discovered as a target, you must create a service monitor pointing to the service. + +The exporting of metrics from wls-exporter requires basicAuth, so a Kubernetes Secret is created with the user name and password that are base64 encoded. This Secret is used in the ServiceMonitor deployment. The `wls-exporter-ServiceMonitor.yaml` has basicAuth with credentials as username: `weblogic` and password: `` in base64 encoded. + +1. Run the following command to get the base64 encoded version of the weblogic password: ```bash - cp wls-exporter*.war // + $ echo -n "" | base64 ``` - For example: + The output will look similar to the following: - ```bash - $ cp wls-exporter*.war /scratch/OIGDockerK8S/governancedomainpv/ ``` - -### Deploy the wls-exporter war files in OIG WebLogic server - -1. Login to the Oracle Enterprise Manager Console using the URL `https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/em`. - -1. Navigate to *WebLogic Domain* > *Deployments*. Click on the padlock in the upper right hand corner and select *Lock and Edit*. - -1. From the 'Deployment' drop down menu select *Deploy*. - -1. In the *Select Archive* screen, under *Archive or exploded directory is on the server where Enterprise Manager is running*, click *Browse*. Navigate to the `/u01/oracle/user_projects/domains` -directory and select `wls-exporter-admin.war`. Click *OK* and then *Next*. - -1. In *Select Target* check *AdminServer* and click *Next*. - -1. In *Application Attributes* set the following and click *Next*: - - * Application Name: `wls-exporter-admin` - * Context Root: `wls-exporter` - * Distribution: `Install and start application (servicing all requests)` - -1. In *Deployment Settings* click *Deploy*. - -1. Once you see the message *Deployment Succeeded*, click *Close*. - -1. Click on the padlock in the upper right hand corner and select *Activate Changes*. - -1. Repeat the above steps to deploy `wls-exporter-oimserver.war` with the following caveats: - - * In *Select Target* choose *oim_cluster* - * In *Application Attributes* set Application Name: `wls-exporter-oimserver`, Context Root: `wls-exporter` - * In *Distribution* select `Install and start application (servicing all requests)` - -1. Repeat the above steps to deploy `wls-exporter-soaserver.war` with the following caveats: - - * In *Select Target* choose *soa_cluster* - * In *Application Attributes* set Application Name: `wls-exporter-soaserver`, Context Root: `wls-exporter` - * In *Distribution* select `Install and start application (servicing all requests)` + V2VsY29tZTE= + ``` -1. Check the wls-exporter is accessible using the URL: `https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/wls-exporter`. - - You should see a page saying *This is the WebLogic Monitoring Exporter*. - - -### Prometheus Operator Configuration - -Prometheus has to be configured to collect the metrics from the weblogic-monitor-exporter. The Prometheus operator identifies the targets using service discovery. To get the weblogic-monitor-exporter end point discovered as a target, you will need to create a service monitor to point to the service as follows: - -1. Create a `wls-exporter-service-monitor.yaml` in the `/wls_exporter` directory with the following contents: +1. Update the `$WORKDIR/kubernetes/monitoring-service/manifests/wls-exporter-ServiceMonitor.yaml` and change the `password:` value to the value returned above. Also change any reference to the `namespace` and `weblogic.domainName:` values to match your OIG namespace and domain name. For example: ``` apiVersion: v1 kind: Secret metadata: name: basic-auth - namespace: monitoring + namespace: oigns data: - password: V2VsY29tZTE= ## base64 - user: d2VibG9naWM= ## weblogic base64 + password: V2VsY29tZTE= + user: d2VibG9naWM= type: Opaque --- apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: - name: wls-exporter-governancedomain - namespace: monitoring + name: wls-exporter + namespace: oigns labels: k8s-app: wls-exporter + release: monitoring spec: namespaceSelector: matchNames: - oigns selector: matchLabels: - weblogic.domainName: governancedomain + weblogic.domainName: governancedomain endpoints: - basicAuth: password: @@ -567,41 +678,33 @@ Prometheus has to be configured to collect the metrics from the weblogic-monitor interval: 10s honorLabels: true path: /wls-exporter/metrics - ``` - - **Note**: In the above example, change the `password` value to the base64 encoded version of your weblogic password. To find the base64 value run the following: - - ```bash - $ echo -n "" | base64 ``` - If using a different namespace from `oigns` or a different `domainUID` from `governancedomain`, then change accordingly. - -1. Add Rolebinding for the WebLogic OIG domain namespace: +1. Update the `$WORKDIR/kubernetes/monitoring-service/manifests/prometheus-roleSpecific-domain-namespace.yaml` and change the `namespace` to match your OIG namespace. For example: - ```bash - $ cd /kube-prometheus/manifests - ``` - - Edit the `prometheus-roleBindingSpecificNamespaces.yaml` file and add the following to the file for your OIG domain namespace, for example `oigns`: - ``` + apiVersion: rbac.authorization.k8s.io/v1 + items: - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding + kind: Role metadata: name: prometheus-k8s namespace: oigns - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: prometheus-k8s - subjects: - - kind: ServiceAccount - name: prometheus-k8s - namespace: monitoring + rules: + - apiGroups: + - "" + resources: + - services + - endpoints + - pods + verbs: + - get + - list + - watch + kind: RoleList ``` - For example the file should now read: +1. Update the `$WORKDIR/kubernetes/monitoring-service/manifests/prometheus-roleBinding-domain-namespace.yaml and change the `namespace` to match your OIG namespace. For example: ``` apiVersion: rbac.authorization.k8s.io/v1 @@ -619,96 +722,72 @@ Prometheus has to be configured to collect the metrics from the weblogic-monitor - kind: ServiceAccount name: prometheus-k8s namespace: monitoring - - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: prometheus-k8s - namespace: default - .... - ``` + kind: RoleBindingList + ``` + + +1. Run the following command to enable Prometheus: -1. Add the Role for WebLogic OIG domain namespace. Edit the `prometheus-roleSpecificNamespaces.yaml` and change the namespace to your OIG domain namespace, for example `oigns`: + ```bash + $ cd $WORKDIR/kubernetes/monitoring-service/manifests + $ kubectl apply -f . + ``` + The output will look similar to the following: + ``` - - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: prometheus-k8s - namespace: oigns - rules: - - apiGroups: - - "" - resources: - - services - - endpoints - - pods - verbs: - - get - - list - - watch - .... + rolebinding.rbac.authorization.k8s.io/prometheus-k8s created + role.rbac.authorization.k8s.io/prometheus-k8s created + secret/basic-auth created + servicemonitor.monitoring.coreos.com/wls-exporter created ``` -1. Apply the yaml files as follows: +#### Prometheus service discovery - ```bash - $ kubectl apply -f prometheus-roleBindingSpecificNamespaces.yaml - $ kubectl apply -f prometheus-roleSpecificNamespaces.yaml - ``` +After the ServiceMonitor is deployed, the wls-exporter should be discovered by Prometheus and be able to collect metrics. - The output should look similar to the following: +1. Access the following URL to view Prometheus service discovery: `http://${MASTERNODE-HOSTNAME}:32101/service-discovery` - ``` - kubectl apply -f prometheus-roleBindingSpecificNamespaces.yaml - rolebinding.rbac.authorization.k8s.io/prometheus-k8s created - Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply - rolebinding.rbac.authorization.k8s.io/prometheus-k8s configured - Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply - rolebinding.rbac.authorization.k8s.io/prometheus-k8s configured - Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply - rolebinding.rbac.authorization.k8s.io/prometheus-k8s configured - - $ kubectl apply -f prometheus-roleSpecificNamespaces.yaml - role.rbac.authorization.k8s.io/prometheus-k8s created - Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply - role.rbac.authorization.k8s.io/prometheus-k8s configured - Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply - role.rbac.authorization.k8s.io/prometheus-k8s configured - Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply - role.rbac.authorization.k8s.io/prometheus-k8s configured - ``` +1. Click on `oigns/wls-exporter/0` and then *show more*. Verify all the targets are mentioned. -### Deploy the ServiceMonitor +**Note**: It may take several minutes for `oigns/wls-exporter/0` to appear, so refresh the page until it does. -1. Run the following command to create the ServiceMonitor: +#### Grafana dashboard - ```bash - $ cd /wls_exporter - $ kubectl create -f wls-exporter-service-monitor.yaml - ``` - - The output will look similar to the following: +1. Access the Grafana dashboard with the following URL: `http://${MASTERNODE-HOSTNAME}:32100` and login with `admin/admin`. Change your password when prompted. + +1. Import the Grafana dashboard by navigating on the left hand menu to *Create* > *Import*. Copy the content from `$WORKDIR/kubernetes/monitoring-service/config/weblogic-server-dashboard-import.json` and paste. Then click *Load* and *Import*. The dashboard should be displayed. - ``` - servicemonitor.monitoring.coreos.com/wls-exporter-oim-cluster created - ``` -### Prometheus Service Discovery - -After ServiceMonitor is deployed, the wls-exporter should be discovered by Prometheus and be able to scrape metrics. +#### Cleanup -1. Access the following URL to view Prometheus service discovery: `http://${MASTERNODE-HOSTNAME}:32101/service-discovery` +To clean up a manual installation: -1. Click on `monitoring/wls-exporter-governancedomain/0 ` and then *show more*. Verify all the targets are mentioned. +1. Run the following commands: -### Grafana Dashboard + ```bash + $ cd $WORKDIR/kubernetes/monitoring-service/manifests/ + $ kubectl delete -f . + ``` + +1. Delete the deployments: -1. Access the Grafana dashboard with the following URL: `http://${MASTERNODE-HOSTNAME}:32100` and login with `admin/admin`. Change your password when prompted. + ```bash + $ cd $WORKDIR/kubernetes/monitoring-service/scripts/ + $ kubectl cp undeploy-weblogic-monitoring-exporter.py /-adminserver:/u01/oracle/wls-exporter-deploy + $ kubectl exec -it -n -adminserver -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/undeploy-weblogic-monitoring-exporter.py -domainName -adminServerName AdminServer -adminURL -adminserver:7001 -username weblogic -password -oimClusterName oim_cluster -wlsMonitoringExporterTooimCluster true -soaClusterName soa_cluster -wlsMonitoringExporterTosoaCluster true + ``` -1. Import the Grafana dashboard by navigating on the left hand menu to *Create* > *Import*. Copy the content from `/fmw-kubernetes/OracleIdentityGovernance/kubernetes/3.0.1/grafana/weblogic_dashboard.json` and paste. Then click *Load* and *Import*. +1. Delete Prometheus: + ```bash + $ cd $WORKDIR/kubernetes/monitoring-service/kube-prometheus + $ kubectl delete -f manifests + $ kubectl delete -f manifests/setup + ```` + + - \ No newline at end of file diff --git a/docs-source/content/oig/manage-oig-domains/running-oig-utilities.md b/docs-source/content/oig/manage-oig-domains/running-oig-utilities.md index 3962774e3..722684d3c 100644 --- a/docs-source/content/oig/manage-oig-domains/running-oig-utilities.md +++ b/docs-source/content/oig/manage-oig-domains/running-oig-utilities.md @@ -1,5 +1,5 @@ --- -title: "Runnning OIG Utilities" +title: "Runnning OIG utilities" weight: 3 pre : "3. " description: "Describes the steps for running OIG utilities in Kubernetes." @@ -15,7 +15,7 @@ Run OIG utlities inside the OIG Kubernetes cluster. $ kubectl -n oigns exec -it governancedomain-oim-server1 -- bash ``` - This will take you into a bash shell in the running governancedomain-oim-server1 pod: + This will take you into a bash shell in the running `governancedomain-oim-server1` pod: ```bash [oracle@governancedomain-oim-server1 oracle]$ diff --git a/docs-source/content/oig/manage-oig-domains/wlst-admin-operations.md b/docs-source/content/oig/manage-oig-domains/wlst-admin-operations.md index cca5428aa..bbd547274 100644 --- a/docs-source/content/oig/manage-oig-domains/wlst-admin-operations.md +++ b/docs-source/content/oig/manage-oig-domains/wlst-admin-operations.md @@ -1,54 +1,53 @@ --- -title: "WLST Administration Operations" +title: "WLST administration operations" weight: 2 pre : "2. " description: "Describes the steps for WLST administration using helper pod running in the same Kubernetes Cluster as OIG Domain." --- -### Invoke WLST and Access Administration Server +### Invoke WLST and access Administration Server To use WLST to administer the OIG domain, use a helper pod in the same Kubernetes cluster as the OIG Domain. 1. Run the following command to create a helper pod if one doesn't already exist: - ``` + ```bash $ kubectl run helper --image -n -- sleep infinity ``` For example: - ``` - $ kubectl run helper --image oracle/oig:12.2.1.4.0 -n oigns -- sleep infinity + ```bash + $ kubectl run helper --image 12.2.1.4.0-8-ol7-211022.0723 -n oigns -- sleep infinity ``` The output will look similar to the following: ``` - $ kubectl run helper --image oracle/oig:12.2.1.4.0 -n oigns -- sleep infinity pod/helper created ``` 1. Run the following command to start a bash shell in the helper pod: - ``` + ```bash $ kubectl exec -it helper -n -- /bin/bash ``` For example: - ``` + ```bash $ kubectl exec -it helper -n oigns -- /bin/bash ``` This will take you into a bash shell in the running helper pod: - ``` + ```bash [oracle@helper ~]$ ``` 1. Connect to WLST using the following commands: - ``` + ```bash [oracle@helper ~]$ cd $ORACLE_HOME/oracle_common/common/bin [oracle@helper ~]$ ./wlst.sh ``` @@ -56,8 +55,6 @@ To use WLST to administer the OIG domain, use a helper pod in the same Kubernete The output will look similar to the following: ``` - [oracle@helper bin]$ ./wlst.sh - Initializing WebLogic Scripting Tool (WLST) ... Jython scans all the jar files it can find at first startup. Depending on the system, this process may take a few minutes to complete, and WLST may not return a prompt right away. @@ -71,14 +68,13 @@ To use WLST to administer the OIG domain, use a helper pod in the same Kubernete 1. To access t3 for the Administration Server connect as follows: - ``` - connect('weblogic','','t3://governancedomain-adminserver:7001') + ```bash + wls:/offline> connect('weblogic','','t3://governancedomain-adminserver:7001') ``` The output will look similar to the following: ``` - wls:/offline> connect('weblogic','','t3://governancedomain-adminserver:7001') Connecting to t3://governancedomain-adminserver:7001 with userid weblogic ... Successfully connected to Admin Server "AdminServer" that belongs to domain "governancedomain". @@ -90,14 +86,13 @@ To use WLST to administer the OIG domain, use a helper pod in the same Kubernete Or to access t3 for the OIG Cluster service, connect as follows: - ``` - connect('weblogic','','t3://governancedomain-cluster-oim-cluster:14100') + ```bash + wls:/offline> connect('weblogic','','t3://governancedomain-cluster-oim-cluster:14000') ``` The output will look similar to the following: ``` - wls:/offline> connect('weblogic','','t3://governancedomain-cluster-oim-cluster:14000') Connecting to t3://governancedomain-cluster-oim-cluster:14000 with userid weblogic ... Successfully connected to managed Server "oim_server1" that belongs to domain "governancedomain". @@ -131,7 +126,7 @@ dr-- soa_server5 wls:/governancedomain/serverConfig/Servers> ``` -### Performing WLST Administration via SSL +### Performing WLST administration via SSL 1. By default the SSL port is not enabled for the Administration Server or OIG Managed Servers. To configure the SSL port for the Administration Server and Managed Servers login to WebLogic Administration console `https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console` and navigate to **Lock & Edit** -> **Environment** ->**Servers** -> **server_name** ->**Configuration** -> **General** -> **SSL Listen Port Enabled** -> **Provide SSL Port** ( For Administration Server: 7002 and for OIG Managed Server (oim_server1): 14101) - > **Save** -> **Activate Changes**. @@ -140,11 +135,12 @@ wls:/governancedomain/serverConfig/Servers> 1. Create a `myscripts` directory as follows: - ``` - $ cd /weblogic-kubernetes-operator/kubernetes/samples/scripts + ```bash + $ cd $WORKDIR/kubernetes $ mkdir myscripts $ cd myscripts ``` + 1. Create a sample yaml template file in the `myscripts` directory called `-adminserver-ssl.yaml` to create a Kubernetes service for the Administration Server: **Note**: Update the `domainName`, `domainUID` and `namespace` based on your environment. @@ -205,27 +201,27 @@ wls:/governancedomain/serverConfig/Servers> 1. Apply the template using the following command for the Administration Server: - ``` + ```bash $ kubectl apply -f governancedomain-adminserver-ssl.yaml service/governancedomain-adminserver-ssl created ``` or using the following command for the OIG Managed Server: - ``` + ```bash $ kubectl apply -f governancedomain-oim-cluster-ssl.yaml service/governancedomain-cluster-oim-cluster-ssl created ``` 1. Validate that the Kubernetes Services to access SSL ports are created successfully: - ``` + ```bash $ kubectl get svc -n |grep ssl ``` For example: - ``` + ```bash $ kubectl get svc -n oigns |grep ssl ``` @@ -238,13 +234,13 @@ wls:/governancedomain/serverConfig/Servers> 1. Connect to a bash shell of the helper pod: - ``` + ```bash $ kubectl exec -it helper -n oigns -- /bin/bash ``` 1. In the bash shell run the following: - ``` + ```bash [oracle@governancedomain-adminserver oracle]$ export WLST_PROPERTIES="-Dweblogic.security.SSL.ignoreHostnameVerification=true -Dweblogic.security.TrustKeyStore=DemoTrust" [oracle@governancedomain-adminserver oracle]$ cd /u01/oracle/oracle_common/common/bin [oracle@governancedomain-adminserver oracle]$ ./wlst.sh @@ -258,11 +254,12 @@ wls:/governancedomain/serverConfig/Servers> Connect to the Administration Server t3s service: - ``` + ```bash wls:/offline> connect('weblogic','','t3s://governancedomain-adminserver-ssl:7002') - - - + Connecting to t3s://governancedomain-adminserver-ssl:7002 with userid weblogic ... + + + Successfully connected to Admin Server "AdminServer" that belongs to domain "governancedomain". wls:/governancedomain/serverConfig/> @@ -270,9 +267,12 @@ wls:/governancedomain/serverConfig/Servers> To connect to the OIG Managed Server t3s service: - ``` + ```bash wls:/offline> connect('weblogic','','t3s://governancedomain-cluster-oim-cluster-ssl:14101') Connecting to t3s://governancedomain-cluster-oim-cluster-ssl:14101 with userid weblogic ... + + + Successfully connected to managed Server "oim_server1" that belongs to domain "governancedomain". wls:/governancedomain/serverConfig/> diff --git a/docs-source/content/oig/patch-and-upgrade/_index.md b/docs-source/content/oig/patch-and-upgrade/_index.md index f39c81615..ad892f8c0 100644 --- a/docs-source/content/oig/patch-and-upgrade/_index.md +++ b/docs-source/content/oig/patch-and-upgrade/_index.md @@ -1,11 +1,11 @@ +++ -title = "Patch and Upgrade" -weight = 10 -pre = "10. " -description= "This document provides steps to patch or upgrade an OIG image, Oracle WebLogic Kubernetes Operator or Kubernetes Cluster." +title = "Patch and upgrade" +weight = 11 +pre = "11. " +description= "This document provides steps to patch or upgrade an OIG image, or WebLogic Kubernetes Operator." +++ -Patch an existing Oracle OIG image, or upgrade the Oracle WebLogic Kubernetes Operator release. +Patch an existing Oracle OIG image, or upgrade the WebLogic Kubernetes Operator release. {{% children style="h4" description="true" %}} diff --git a/docs-source/content/oig/patch-and-upgrade/patch_an_image.md b/docs-source/content/oig/patch-and-upgrade/patch_an_image.md index ad46f6968..634d137a9 100644 --- a/docs-source/content/oig/patch-and-upgrade/patch_an_image.md +++ b/docs-source/content/oig/patch-and-upgrade/patch_an_image.md @@ -29,7 +29,7 @@ In all of the above cases, the WebLogic Kubernetes Operator will restart the Adm 1. Update the `image` tag to point at the new image, for example: - ```bash + ``` domainHomeInImage: false image: oracle/oig:12.2.1.4.0-new imagePullPolicy: IfNotPresent @@ -55,7 +55,7 @@ In all of the above cases, the WebLogic Kubernetes Operator will restart the Adm The output will look similar to the following: - ```bash + ``` domain.weblogic.oracle/governancedomain patched ``` diff --git a/docs-source/content/oig/patch-and-upgrade/upgrade_an_operator_release.md b/docs-source/content/oig/patch-and-upgrade/upgrade_an_operator_release.md index b9b16eb6a..576a55c0a 100644 --- a/docs-source/content/oig/patch-and-upgrade/upgrade_an_operator_release.md +++ b/docs-source/content/oig/patch-and-upgrade/upgrade_an_operator_release.md @@ -31,42 +31,42 @@ The new WebLogic Kubernetes Operator Docker image must be installed on the maste 1. On the master node, download the new WebLogic Kubernetes Operator source code from the operator github project: ```bash - $ mkdir /weblogic-kubernetes-operator-3.X.X - $ cd /weblogic-kubernetes-operator-3.X.X + $ mkdir /weblogic-kubernetes-operator-3.X.X + $ cd /weblogic-kubernetes-operator-3.X.X $ git clone https://github.com/oracle/weblogic-kubernetes-operator.git --branch v3.X.X ``` For example: ```bash - $ mkdir /scratch/OIGDockerK8S/weblogic-kubernetes-operator-3.X.X - $ cd /scratch/OIGDockerK8S/weblogic-kubernetes-operator-3.X.X + $ mkdir /scratch/OIGK8S/weblogic-kubernetes-operator-3.X.X + $ cd /scratch/OIGK8S/weblogic-kubernetes-operator-3.X.X $ git clone https://github.com/oracle/weblogic-kubernetes-operator.git --branch v3.X.X ``` - This will create the directory `/weblogic-kubernetes-operator-3.X.X/weblogic-kubernetes-operator` + This will create the directory `/weblogic-kubernetes-operator-3.X.X/weblogic-kubernetes-operator` 1. Run the following helm command to upgrade the operator: ```bash - $ cd /weblogic-kubernetes-operator-3.X.X/weblogic-kubernetes-operator + $ cd /weblogic-kubernetes-operator-3.X.X/weblogic-kubernetes-operator $ helm upgrade --reuse-values --set image=oracle/weblogic-kubernetes-operator:3.X.X --namespace --wait weblogic-kubernetes-operator kubernetes/charts/weblogic-operator ``` For example: ```bash - $ cd /scratch/OIGDockerK8S/weblogic-kubernetes-operator-3.X.X/weblogic-kubernetes-operator + $ cd /scratch/OIGK8S/weblogic-kubernetes-operator-3.X.X/weblogic-kubernetes-operator $ helm upgrade --reuse-values --set image=oracle/weblogic-kubernetes-operator:3.X.X --namespace operator --wait weblogic-kubernetes-operator kubernetes/charts/weblogic-operator ``` The output will look similar to the following: - ```bash + ``` Release "weblogic-kubernetes-operator" has been upgraded. Happy Helming! NAME: weblogic-kubernetes-operator - LAST DEPLOYED: Thu Oct 01 02:50:07 2020 + LAST DEPLOYED: Mon Nov 15 09:24:40 2021 NAMESPACE: operator STATUS: deployed REVISION: 3 diff --git a/docs-source/content/oig/post-install-config/_index.md b/docs-source/content/oig/post-install-config/_index.md index f242fdc79..951f9ff7b 100644 --- a/docs-source/content/oig/post-install-config/_index.md +++ b/docs-source/content/oig/post-install-config/_index.md @@ -1,5 +1,5 @@ +++ -title = "Post Install Configuration" +title = "Post install configuration" weight = 7 pre = "7. " description = "Post install configuration." diff --git a/docs-source/content/oig/post-install-config/install_and_configure_connectors.md b/docs-source/content/oig/post-install-config/install_and_configure_connectors.md index 7bdc994a5..e33d04b56 100644 --- a/docs-source/content/oig/post-install-config/install_and_configure_connectors.md +++ b/docs-source/content/oig/post-install-config/install_and_configure_connectors.md @@ -1,25 +1,25 @@ +++ -title = "b. Install and Configure Connectors" +title = "b. Install and configure connectors" description = "Install and Configure Connectors." +++ -### Download the Connector +### Download the connector 1. Download the Connector you are interested in from [Oracle Identity Manager Connector Downloads](https://www.oracle.com/middleware/technologies/identity-management/oim-connectors-downloads.html). -1. Copy the connector zip file to a staging directory on the master node e.g. `/scratch/OIGDocker/stage` and unzip it: +1. Copy the connector zip file to a staging directory on the master node e.g. `/stage` and unzip it: - ``` - $ cp $HOME/Downloads/.zip // - $ cd / + ```bash + $ cp $HOME/Downloads/.zip // + $ cd / $ unzip .zip ``` For example: - ``` - $ cp $HOME/Downloads/Exchange-12.2.1.3.0.zip /scratch/OIGDocker/stage/ - $ cd /scratch/OIGDockerK8S/stage/ + ```bash + $ cp $HOME/Downloads/Exchange-12.2.1.3.0.zip /scratch/OIGK8S/stage/ + $ cd /scratch/OIGK8S/stage/ $ unzip exchange-12.2.1.3.0.zip ``` @@ -38,10 +38,10 @@ description = "Install and Configure Connectors." $ kubectl exec -ti governancedomain-oim-server1 -n oigns -- mkdir -p /u01/oracle/user_projects/domains/ConnectorDefaultDirectory ``` - **Note**: This will create a directory in the persistent volume e:g `/scratch/OIGDockerK8S/governancedomainpv/ConnectorDefaultDirectory`, + **Note**: This will create a directory in the persistent volume e:g `/scratch/OIGK8S/governancedomainpv/ConnectorDefaultDirectory`, -### Copy OIG Connectors +### Copy OIG connectors There are two options to copy OIG Connectors to your Kubernetes cluster: @@ -51,37 +51,37 @@ There are two options to copy OIG Connectors to your Kubernetes cluster: It is recommended to use option a), however there may be cases, for example when using a Managed Service such as Oracle Kubernetes Engine on Oracle Cloud Infrastructure, where it may not be feasible to directly mount the domain directory. In such cases option b) should be used. -#### a) Copy the connector directly to the Persistent Volume +#### a) Copy the connector directly to the persistent volume 1. Copy the connector zip file to the persistent volume. For example: - ``` - $ cp -R / /governancedomainpv/ConnectorDefaultDirectory/ + ```bash + $ cp -R / /governancedomainpv/ConnectorDefaultDirectory/ ``` For example: - ``` - $ cp -R /scratch/OIGDockerK8S/stage/Exchange-12.2.1.3.0 /scratch/OIGDockerK8S/governancedomainpv/ConnectorDefaultDirectory/ + ```bash + $ cp -R /scratch/OIGK8S/stage/Exchange-12.2.1.3.0 /scratch/OIGK8S/governancedomainpv/ConnectorDefaultDirectory/ ``` -#### b) Use the `kubectl cp` command to copy the connector to the Persistent Volume +#### b) Use the `kubectl cp` command to copy the connector to the persistent volume 1. Run the following command to copy over the connector: - ``` + ```bash $ kubectl -n cp / :/u01/oracle/idm/server/ConnectorDefaultDirectory/ ``` For example: - ``` - $ kubectl -n oigns cp /scratch/OIGDockerK8S/stage/Exchange-12.2.1.3.0 governancedomain-oim-server1:/u01/oracle/idm/server/ConnectorDefaultDirectory/ + ```bash + $ kubectl -n oigns cp /scratch/OIGK8S/stage/Exchange-12.2.1.3.0 governancedomain-oim-server1:/u01/oracle/idm/server/ConnectorDefaultDirectory/ ``` -### Install the Connector +### Install the connector The connectors are installed as they are on a standard on-premises setup, via Application On Boarding or via Connector Installer. diff --git a/docs-source/content/oig/post-install-config/set_oimfronendurl_using_mbeans.md b/docs-source/content/oig/post-install-config/set_oimfronendurl_using_mbeans.md index 4b5a71762..ab84e155c 100644 --- a/docs-source/content/oig/post-install-config/set_oimfronendurl_using_mbeans.md +++ b/docs-source/content/oig/post-install-config/set_oimfronendurl_using_mbeans.md @@ -13,14 +13,8 @@ Follow these post install configuration steps. 1. Navigate to the following directory: - ``` - cd /weblogic-kubernetes-operator/kubernetes/samples/scripts/create-oim-domain/domain-home-on-pv/output/weblogic-domains/governancedomain - ``` - - For example: - - ``` - cd /scratch/OIGDockerK8S/weblogic-kubernetes-operator/kubernetes/samples/scripts/create-oim-domain/domain-home-on-pv/output/weblogic-domains/governancedomain + ```bash + cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv/output/weblogic-domains/governancedomain ``` 1. Create a `setUserOverrides.sh` with the following contents: @@ -33,22 +27,22 @@ Follow these post install configuration steps. 1. Copy the `setUserOverrides.sh` file to the Administration Server pod: - ``` - chmod 755 setUserOverrides.sh - kubectl cp setUserOverrides.sh oigns/governancedomain-adminserver:/u01/oracle/user_projects/domains/governancedomain/bin/setUserOverrides.sh + ```bash + $ chmod 755 setUserOverrides.sh + $ kubectl cp setUserOverrides.sh oigns/governancedomain-adminserver:/u01/oracle/user_projects/domains/governancedomain/bin/setUserOverrides.sh ``` - Where `oigns` is the OIG namespace and `governancedomain` is the `DOMAIN_NAME/UID`. + Where `oigns` is the OIG namespace and `governancedomain` is the `domain_UID`. 1. Stop the OIG domain using the following command: - ``` + ```bash $ kubectl -n patch domains --type='json' -p='[{"op": "replace", "path": "/spec/serverStartPolicy", "value": "NEVER" }]' ``` For example: - ``` + ```bash $ kubectl -n oigns patch domains governancedomain --type='json' -p='[{"op": "replace", "path": "/spec/serverStartPolicy", "value": "NEVER" }]' ``` @@ -60,13 +54,13 @@ Follow these post install configuration steps. 1. Check that all the pods are stopped: - ``` + ```bash $ kubectl get pods -n ``` For example: - ``` + ```bash $ kubectl get pods -n oigns ``` @@ -75,7 +69,7 @@ Follow these post install configuration steps. ``` NAME READY STATUS RESTARTS AGE governancedomain-adminserver 1/1 Terminating 0 18h - governancedomain-create-fmw-infra-domain-job-vj69h 0/1 Completed 0 24h + governancedomain-create-fmw-infra-domain-job-8cww8 0/1 Completed 0 24h governancedomain-oim-server1 1/1 Terminating 0 18h governancedomain-soa-server1 1/1 Terminating 0 18h helper 1/1 Running 0 41h @@ -85,31 +79,31 @@ Follow these post install configuration steps. ``` NAME READY STATUS RESTARTS AGE - governancedomain-create-fmw-infra-domain-job-vj69h 0/1 Completed 0 24h + governancedomain-create-fmw-infra-domain-job-8cww8 0/1 Completed 0 24h helper 1/1 Running 0 41h ``` 1. Start the domain using the following command: - ``` + ```bash $ kubectl -n patch domains --type='json' -p='[{"op": "replace", "path": "/spec/serverStartPolicy", "value": "IF_NEEDED" }]' ``` For example: - ``` + ```bash $ kubectl -n oigns patch domains governancedomain --type='json' -p='[{"op": "replace", "path": "/spec/serverStartPolicy", "value": "IF_NEEDED" }]' ``` Run the following kubectl command to view the pods: - ``` + ```bash $ kubectl get pods -n ``` For example: - ``` + ```bash $ kubectl get pods -n oigns ``` @@ -143,7 +137,7 @@ Follow these post install configuration steps. * Expand *Identity and Access* > *Access* > *OIM* > *oim* * Right click the instance *oim* and select *System MBean Browser* - * Under *Application Defined MBeans*, navigate to *oracle.iam, Server:oim_server1, Application:oim* > *XMLConfig* > *Config* > *XMLConfig.DiscoveryConfig* > *Discovery*. + * Under *Application Defined MBeans*, navigate to *oracle.iam*, *Server:oim_server1* > *Application:oim* > *XMLConfig* > *Config* > *XMLConfig.DiscoveryConfig* > *Discovery*. 1. Enter a new value for the `OimFrontEndURL` attribute, in the format: @@ -157,28 +151,27 @@ Follow these post install configuration steps. **Note**: To find the `` run the following command: - ``` + ```bash $ kubectl -n oigns get svc ``` Your output will look similar to this: ``` - $ kubectl -n oigns get svc - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - governancedomain-adminserver ClusterIP None 7001/TCP 6d23h - governancedomain-cluster-oim-cluster ClusterIP 10.107.191.53 14000/TCP 6d23h - governancedomain-cluster-soa-cluster ClusterIP 10.97.108.226 8001/TCP 6d23h - governancedomain-oim-server1 ClusterIP None 14000/TCP 6d23h - governancedomain-oim-server2 ClusterIP 10.96.147.43 14000/TCP 6d23h - governancedomain-oim-server3 ClusterIP 10.103.65.77 14000/TCP 6d23h - governancedomain-oim-server4 ClusterIP 10.98.157.253 14000/TCP 6d23h - governancedomain-oim-server5 ClusterIP 10.102.19.32 14000/TCP 6d23h - governancedomain-soa-server1 ClusterIP None 8001/TCP 6d23h - governancedomain-soa-server2 ClusterIP 10.96.73.62 8001/TCP 6d23h - governancedomain-soa-server3 ClusterIP 10.105.198.83 8001/TCP 6d23h - governancedomain-soa-server4 ClusterIP 10.98.171.18 8001/TCP 6d23h - governancedomain-soa-server5 ClusterIP 10.105.196.107 8001/TCP 6d23h + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + governancedomain-adminserver ClusterIP None 7001/TCP 9m41s + governancedomain-cluster-oim-cluster ClusterIP 10.107.205.207 14002/TCP,14000/TCP 2d20h + governancedomain-cluster-soa-cluster ClusterIP 10.102.221.184 8001/TCP 2d20h + governancedomain-oim-server1 ClusterIP None 14002/TCP,14000/TCP 6m58s + governancedomain-oim-server2 ClusterIP 10.100.28.88 14002/TCP,14000/TCP 6m58s + governancedomain-oim-server3 ClusterIP 10.99.226.29 14002/TCP,14000/TCP 6m58s + governancedomain-oim-server4 ClusterIP 10.96.253.210 14002/TCP,14000/TCP 6m58s + governancedomain-oim-server5 ClusterIP 10.98.66.13 14002/TCP,14000/TCP 6m58s + governancedomain-soa-server1 ClusterIP None 8001/TCP 6m58s + governancedomain-soa-server2 ClusterIP 10.111.168.68 8001/TCP 6m58s + governancedomain-soa-server3 ClusterIP 10.96.183.16 8001/TCP 6m58s + governancedomain-soa-server4 ClusterIP 10.98.35.5 8001/TCP 6m58s + governancedomain-soa-server5 ClusterIP 10.98.200.195 8001/TCP 6m58s ``` diff --git a/docs-source/content/oig/prepare-your-environment/_index.md b/docs-source/content/oig/prepare-your-environment/_index.md index ce03f2f69..9b6c7abf0 100644 --- a/docs-source/content/oig/prepare-your-environment/_index.md +++ b/docs-source/content/oig/prepare-your-environment/_index.md @@ -10,17 +10,19 @@ description = "Preparation to deploy OIG on Kubernetes" 1. [Check the Kubernetes cluster is ready](#check-the-kubernetes-cluster-is-ready) 1. [Install the OIG Docker image](#install-the-oig-docker-image) 1. [Install the WebLogic Kubernetes Operator Docker Image](#install-the-weblogic-kubernetes-operator-docker-image) -1. [Setup the Code Repository to Deploy Oracle Identity Governance Domains](#setup-the-code-repository-to-deploy-oracle-identity-governance-domains) +1. [Setup the code repository to deploy OIG domains](#setup-the-code-repository-to-deploy-oig-domains) 1. [Install the WebLogic Kubernetes Operator](#install-the-weblogic-kubernetes-operator) +1. [Create a namespace for Oracle Identity Governance](#create-a-namespace-for-oracle-identity-governance) 1. [RCU schema creation](#rcu-schema-creation) 1. [Preparing the environment for domain creation](#preparing-the-environment-for-domain-creation) - 1. [Configure the operator for the domain namespace](#configure-the-operator-for-the-domain-namespace) - 1. [Creating Kubernetes secrets for the domain and RCU](#creating-kubernetes-secrets-for-the-domain-and-rcu) - 1. [Create a Kubernetes persistent volume and persistent volume claim](#create-a-kubernetes-persistent-volume-and-persistent-volume-claim) + + a. [Creating Kubernetes secrets for the domain and RCU](#creating-kubernetes-secrets-for-the-domain-and-rcu) + + b. [Create a Kubernetes persistent volume and persistent volume claim](#create-a-kubernetes-persistent-volume-and-persistent-volume-claim) ### Set up your Kubernetes cluster -If you need help setting up a Kubernetes environment, check our [cheat sheet](https://oracle.github.io/weblogic-kubernetes-operator/userguide/overview/k8s-setup/). +If you need help setting up a Kubernetes environment, refer to the official Kubernetes [documentation](https://kubernetes.io/docs/setup/#production-environment) to set up a production grade Kubernetes cluster. It is recommended you have a master node and one or more worker nodes. The examples in this documentation assume one master and two worker nodes. @@ -37,36 +39,34 @@ As per the [prerequisites](../prerequisites) an installation of Helm is required ### Check the Kubernetes cluster is ready -Run the following command on the master node to check the cluster and worker nodes are running: +1. Run the following command on the master node to check the cluster and worker nodes are running: -``` -$ kubectl get nodes,pods -n kube-system -``` + ```bash + $ kubectl get nodes,pods -n kube-system + ``` -The output will look similar to the following: + The output will look similar to the following: -``` -$ kubectl get nodes,pods -n kube-system -NAME STATUS ROLES AGE VERSION -node/worker-node1 Ready 10d v1.18.4 -node/worker-node2 Ready 10d v1.18.4 -node/master-node Ready master 11d v1.18.4 - -NAME READY STATUS RESTARTS AGE -pod/coredns-66bff467f8-slxdq 1/1 Running 0 11d -pod/coredns-66bff467f8-v77qt 1/1 Running 0 11d -pod/etcd-master-node 1/1 Running 0 11d -pod/kube-apiserver-master-node 1/1 Running 0 11d -pod/kube-controller-manager-master-node 1/1 Running 0 11d -pod/kube-flannel-ds-amd64-dcqjn 1/1 Running 0 10d -pod/kube-flannel-ds-amd64-g4ztq 1/1 Running 0 11d -pod/kube-flannel-ds-amd64-vpcbj 1/1 Running 1 10d -pod/kube-proxy-jtcxm 1/1 Running 0 11d -pod/kube-proxy-swfmm 1/1 Running 0 10d -pod/kube-proxy-w6x6t 1/1 Running 0 10d -pod/kube-scheduler-master-node 1/1 Running 0 11d -$ -``` + ``` + NAME STATUS ROLES AGE VERSION + node/worker-node1 Ready 17h v1.20.10 + node/worker-node2 Ready 17h v1.20.10 + node/master-node Ready master 23h v1.20.10 + + NAME READY STATUS RESTARTS AGE + pod/coredns-66bff467f8-fnhbq 1/1 Running 0 23h + pod/coredns-66bff467f8-xtc8k 1/1 Running 0 23h + pod/etcd-master 1/1 Running 0 21h + pod/kube-apiserver-master-node 1/1 Running 0 21h + pod/kube-controller-manager-master-node 1/1 Running 0 21h + pod/kube-flannel-ds-amd64-lxsfw 1/1 Running 0 17h + pod/kube-flannel-ds-amd64-pqrqr 1/1 Running 0 17h + pod/kube-flannel-ds-amd64-wj5nh 1/1 Running 0 17h + pod/kube-proxy-2kxv2 1/1 Running 0 17h + pod/kube-proxy-82vvj 1/1 Running 0 17h + pod/kube-proxy-nrgw9 1/1 Running 0 23h + pod/kube-scheduler-master 1/1 Running 0 21$ + ``` ### Install the OIG Docker Image @@ -79,121 +79,110 @@ You can deploy OIG Docker images in the following ways: Choose one of these options based on your requirements. {{% notice note %}} -The OIG Docker image must be installed on the master node AND each of the worker nodes in your Kubernetes cluster. Alternatively you can place the image in a Docker registry that your cluster can access. +The OIG Docker image must be installed on the master node and each of the worker nodes in your Kubernetes cluster. Alternatively you can place the image in a Docker registry that your cluster can access. {{% /notice %}} After installing the OIG Docker image run the following command to make sure the image is installed correctly on the master and worker nodes: -``` +```bash $ docker images ``` The output will look similar to the following: ``` -REPOSITORY TAG IMAGE ID CREATED SIZE -oracle/oig 12.2.1.4.0 59ffc14dddbb 3 days ago 4.96GB -k8s.gcr.io/kube-proxy v1.18.4 718fa77019f2 6 weeks ago 117MB -k8s.gcr.io/kube-scheduler v1.18.4 c663567f869e 6 weeks ago 95.3MB -k8s.gcr.io/kube-controller-manager v1.18.4 e8f1690127c4 6 weeks ago 162MB -k8s.gcr.io/kube-apiserver v1.18.4 408913fc18eb 6 weeks ago 173MB -quay.io/coreos/flannel v0.12.0-amd64 4e9f801d2217 4 months ago 52.8MB -k8s.gcr.io/pause 3.2 80d28bedfe5d 5 months ago 683kB -k8s.gcr.io/coredns 1.6.7 67da37a9a360 6 months ago 43.8MB -k8s.gcr.io/etcd 3.4.3-0 303ce5db0e90 9 months ago 288MB +REPOSITORY TAG IMAGE ID CREATED SIZE +oracle/oig 12.2.1.4.0-8-ol7-211022.0723 f05f3b63c9e8 2 weeks ago 4.43GB +quay.io/coreos/flannel v0.15.0 09b38f011a29 6 days ago 69.5MB +rancher/mirrored-flannelcni-flannel-cni-plugin v1.2 98660e6e4c3a 13 days ago 8.98MB +k8s.gcr.io/kube-proxy v1.20.10 945c9bce487a 2 months ago 99.7MB +k8s.gcr.io/kube-controller-manager v1.20.10 2f450864515d 2 months ago 116MB +k8s.gcr.io/kube-apiserver v1.20.10 644cadd07add 2 months ago 122MB +k8s.gcr.io/kube-scheduler v1.20.10 4c9be8dc650b 2 months ago 47.3MB +k8s.gcr.io/etcd 3.4.13-0 0369cf4303ff 14 months ago 253MB +k8s.gcr.io/coredns 1.7.0 bfe3a36ebd25 16 months ago 45.2MB +k8s.gcr.io/pause 3.2 80d28bedfe5d 20 months ago ``` ### Install the WebLogic Kubernetes Operator Docker Image -In this release only Oracle WebLogic Server Kubernetes Operator 3.0.1 is supported. - {{% notice note %}} -The Oracle WebLogic Server Kubernetes Operator Docker image must be installed on the master node and each of the worker nodes in your Kubernetes cluster. Alternatively you can place the image in a Docker registry that your cluster can access. +The WebLogic Kubernetes Operator Docker image must be installed on the master node and each of the worker nodes in your Kubernetes cluster. Alternatively you can place the image in a Docker registry that your cluster can access. {{% /notice %}} -1. Pull the Oracle WebLogic Server Kubernetes Operator 3.0.1 image by running the following command on the master node: +1. Pull the Oracle WebLogic Server Kubernetes Operator image by running the following command on the master node: ```bash - $ docker pull ghcr.io/oracle/weblogic-kubernetes-operator:3.0.1 + $ docker pull ghcr.io/oracle/weblogic-kubernetes-operator:3.3.0 ``` The output will look similar to the following: - ```bash - Trying to pull repository ghcr.io/oracle/weblogic-kubernetes-operator:3.0.1 ... - 3.0.1: Pulling from ghcr.io/oracle/weblogic-kubernetes-operator:3.0.1 - bce8f778fef0: Already exists - de14ddc50a70: Pull complete - 77401a861078: Pull complete - 9c5ac1423af4: Pull complete - 2b6f244f998f: Pull complete - 625e05083092: Pull complete - Digest: sha256:27047d032ac5a9077b39bec512b99d8ca54bf9bf71227f5fd1b7b26ac80c20d3 - Status: Downloaded newer image for ghcr.io/oracle/weblogic-kubernetes-operator:3.0.1 - ghcr.io/oracle/weblogic-kubernetes-operator:3.0.1 + ``` + Trying to pull repository ghcr.io/oracle/weblogic-kubernetes-operator ... + 3.3.0: Pulling from ghcr.io/oracle/weblogic-kubernetes-operator + c828c776e142: Pull complete + 175676c54fa1: Pull complete + b3231f480c32: Pull complete + ea4423fa8daa: Pull complete + f3ca38f7f95f: Pull complete + effd851583ec: Pull complete + 4f4fb700ef54: Pull complete + Digest: sha256:3e93848ad2f5b272c88680e7b37a4ee428dd12e4c4c91af6977fd2fa9ec1f9dc + Status: Downloaded newer image for ghcr.io/oracle/weblogic-kubernetes-operator:3.3.0 + ghcr.io/oracle/weblogic-kubernetes-operator:3.3.0 ``` 1. Run the docker tag command as follows: ```bash - $ docker tag ghcr.io/oracle/weblogic-kubernetes-operator:3.0.1 weblogic-kubernetes-operator:3.0.1 + $ docker tag ghcr.io/oracle/weblogic-kubernetes-operator:3.3.0 weblogic-kubernetes-operator:3.3.0 ``` - After installing the Oracle WebLogic Server Kubernetes Operator 3.0.1 Docker image, repeat the above on the worker nodes. + After installing the Oracle WebLogic Kubernetes Operator image, repeat the above on the worker nodes. -### Setup the Code Repository to Deploy Oracle Identity Governance Domains +### Setup the Code Repository to Deploy OIG Domains -Oracle Identity Governance domain deployment on Kubernetes leverages the WebLogic Kubernetes Operator infrastructure. For deploying the Oracle Identity Governance domains, you need to set up the deployment scripts on the **master** node as below: +Oracle Identity Governance domain deployment on Kubernetes leverages the WebLogic Kubernetes Operator infrastructure. For deploying the OIG domains, you need to set up the deployment scripts on the **master** node as below: 1. Create a working directory to setup the source code. ```bash - $ mkdir + $ mkdir ``` For example: + ```bash - $ mkdir /scratch/OIGDockerK8S + $ mkdir /scratch/OIGK8S ``` -1. Download the supported version of the WebLogic Kubernetes Operator source code from the operator github project. Currently the supported operator version is [3.0.1](https://github.com/oracle/weblogic-kubernetes-operator/releases/tag/v3.0.1): +1. Download the latest OIG deployment scripts from the OIG repository. ```bash - $ cd - $ git clone https://github.com/oracle/weblogic-kubernetes-operator.git --branch release/3.0.1 + $ cd + $ git clone https://github.com/oracle/fmw-kubernetes.git ``` For example: ```bash - $ cd /scratch/OIGDockerK8S - $ git clone https://github.com/oracle/weblogic-kubernetes-operator.git --branch release/3.0.1 + $ cd /scratch/OIGK8S + $ git clone https://github.com/oracle/fmw-kubernetes.git ``` - This will create the directory `/weblogic-kubernetes-operator` - -1. Clone the Oracle Identity Governance deployment scripts from the OIG [repository](https://github.com/oracle/fmw-kubernetes.git) and copy them into the WebLogic operator samples location. +1. Set the `$WORKDIR` environment variable as follows: ```bash - $ git clone https://github.com/oracle/fmw-kubernetes.git - $ cp -rf /fmw-kubernetes/OracleIdentityGovernance/kubernetes/3.0.1/create-oim-domain /weblogic-kubernetes-operator/kubernetes/samples/scripts/ - $ mv -f /weblogic-kubernetes-operator/kubernetes/samples/charts/ingress-per-domain /weblogic-kubernetes-operator/kubernetes/samples/charts/ingress-per-domain_backup - $ cp -rf /fmw-kubernetes/OracleIdentityGovernance/kubernetes/3.0.1/ingress-per-domain /weblogic-kubernetes-operator/kubernetes/samples/charts/ingress-per-domain - $ cp -rf /fmw-kubernetes/OracleIdentityGovernance/kubernetes/3.0.1/design-console-ingress /weblogic-kubernetes-operator/kubernetes/samples/charts/design-console-ingress + $ export WORKDIR=/fmw-kubernetes/OracleIdentityGovernance ``` - For example: - + For example: + ```bash - $ git clone https://github.com/oracle/fmw-kubernetes.git - $ cp -rf /scratch/OIGDockerK8S/fmw-kubernetes/OracleIdentityGovernance/kubernetes/3.0.1/create-oim-domain /scratch/OIGDockerK8S/weblogic-kubernetes-operator/kubernetes/samples/scripts/ - $ mv -f /scratch/OIGDockerK8S/weblogic-kubernetes-operator/kubernetes/samples/charts/ingress-per-domain /scratch/OIGDockerK8S/weblogic-kubernetes-operator/kubernetes/samples/charts/ingress-per-domain_backup - $ cp -rf /scratch/OIGDockerK8S/fmw-kubernetes/OracleIdentityGovernance/kubernetes/3.0.1/ingress-per-domain /scratch/OIGDockerK8S/weblogic-kubernetes-operator/kubernetes/samples/charts/ingress-per-domain - $ cp -rf /scratch/OIGDockerK8S/fmw-kubernetes/OracleIdentityGovernance/kubernetes/3.0.1/design-console-ingress /scratch/OIGDockerK8S/weblogic-kubernetes-operator/kubernetes/samples/charts/design-console-ingress + $ export WORKDIR=/scratch/OIGK8S/fmw-kubernetes/OracleIdentityGovernance ``` - You can now use the deployment scripts from `/weblogic-kubernetes-operator/kubernetes/samples/scripts/` to set up the OIG domains as further described in this document. - 1. Run the following command and see if the WebLogic custom resource definition name already exists: ```bash @@ -202,13 +191,13 @@ Oracle Identity Governance domain deployment on Kubernetes leverages the WebLogi In the output you should see: - ```bash + ``` No resources found in default namespace. ``` If you see the following: - ```bash + ``` NAME AGE domains.weblogic.oracle 5d ``` @@ -254,13 +243,13 @@ Oracle Identity Governance domain deployment on Kubernetes leverages the WebLogi The output will look similar to the following: - ```bash + ``` serviceaccount/op-sa created ``` -1. If you want to setup logging and visualisation with Elasticsearch and Kibana (post domain creation) edit the `/weblogic-kubernetes-operator/kubernetes/charts/weblogic-operator/values.yaml` and set the parameter `elkIntegrationEnabled` to `true` and make sure the following parameters are set: +1. If you want to setup logging and visualisation with Elasticsearch and Kibana (post domain creation) edit the `$WORKDIR/kubernetes/charts/weblogic-operator/values.yaml` and set the parameter `elkIntegrationEnabled` to `true` and make sure the following parameters are set: - ```bash + ``` # elkIntegrationEnabled specifies whether or not ELK integration is enabled. elkIntegrationEnabled: true @@ -277,42 +266,48 @@ Oracle Identity Governance domain deployment on Kubernetes leverages the WebLogi elasticSearchPort: 9200 ``` - After the domain creation see [Logging and Visualization](../manage-oam-domains/logging-and-visualization) in order to complete the setup of Elasticsearch and Kibana. + After the domain creation see [Logging and Visualization](../manage-oig-domains/logging-and-visualization) in order to complete the setup of Elasticsearch and Kibana. 1. Run the following helm command to install and start the operator: ```bash - $ cd /weblogic-kubernetes-operator - $ helm install kubernetes/charts/weblogic-operator \ + $ cd $WORKDIR + $ helm install weblogic-kubernetes-operator kubernetes/charts/weblogic-operator \ --namespace \ - --set image=weblogic-kubernetes-operator:3.0.1 \ + --set image=weblogic-kubernetes-operator:3.3.0 \ --set serviceAccount= \ - --set "domainNamespaces={}" + --set “enableClusterRoleBinding=true” \ + --set "domainNamespaceSelectionStrategy=LabelSelector" \ + --set "domainNamespaceLabelSelector=weblogic-operator\=enabled" \ + --set "javaLoggingLevel=FINE" --wait ``` For example: ```bash - $ cd /scratch/OIGDockerK8S/weblogic-kubernetes-operator + $ cd $WORKDIR $ helm install weblogic-kubernetes-operator kubernetes/charts/weblogic-operator \ --namespace opns \ - --set image=weblogic-kubernetes-operator:3.0.1 \ + --set image=weblogic-kubernetes-operator:3.3.0 \ --set serviceAccount=op-sa \ - --set "domainNamespaces={}" + --set "enableClusterRoleBinding=true" \ + --set "domainNamespaceSelectionStrategy=LabelSelector" \ + --set "domainNamespaceLabelSelector=weblogic-operator\=enabled" \ + --set "javaLoggingLevel=FINE" --wait ``` The output will look similar to the following: - ```bash + ``` NAME: weblogic-kubernetes-operator - LAST DEPLOYED: Tue Sep 29 02:33:06 2020 + LAST DEPLOYED: Thu Nov 11 09:02:50 2021 NAMESPACE: opns STATUS: deployed REVISION: 1 TEST SUITE: None ``` -1. Verify that the operator's pod is running by executing the following command to list the pods in the operator's namespace: +1. Verify that the operator's pod and services are running by executing the following command: ```bash $ kubectl get all -n @@ -326,21 +321,21 @@ Oracle Identity Governance domain deployment on Kubernetes leverages the WebLogi The output will look similar to the following: - ```bash + ``` NAME READY STATUS RESTARTS AGE - pod/weblogic-operator-5d5dfb74ff-t7ct5 2/2 Running 0 17m + pod/weblogic-operator-676d5cc6f4-rwzxf 2/2 Running 0 59s - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - service/internal-weblogic-operator-svc ClusterIP 10.101.11.127 8082/TCP 17m + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + service/internal-weblogic-operator-svc ClusterIP 10.102.7.232 8082/TCP 59s NAME READY UP-TO-DATE AVAILABLE AGE - deployment.apps/weblogic-operator 1/1 1 1 17m + deployment.apps/weblogic-operator 1/1 1 1 59s NAME DESIRED CURRENT READY AGE - replicaset.apps/weblogic-operator-5d5dfb74ff 1 1 1 17m + replicaset.apps/weblogic-operator-676d5cc6f4 1 1 1 59s ``` -1. Verify that the operator is up and running by viewing the operator pod's log: +1. Verify the operator pod's log: ```bash $ kubectl logs -n -c weblogic-operator deployments/weblogic-operator @@ -354,18 +349,14 @@ Oracle Identity Governance domain deployment on Kubernetes leverages the WebLogi The output will look similar to the following: - ```bash - {"timestamp":"09-29-2020T09:33:26.284+0000","thread":27,"fiber":"fiber-1","namespace":"operator","domainUID":"","level":"WARNING","class":"oracle.kubernetes.operator.utils.Certificates","method":"getCertificate","timeInMillis":1601372006284,"message":"No external certificate configured for REST endpoint. Endpoint will be disabled.","exception":"","code":"","headers":{},"body":""} - {"timestamp":"09-29-2020T09:33:28.611+0000","thread":27,"fiber":"fiber-1","namespace":"operator","domainUID":"","level":"INFO","class":"oracle.kubernetes.operator.rest.RestServer","method":"start","timeInMillis":1601372008611,"message":"Started the internal ssl REST server on https://0.0.0.0:8082/operator","exception":"","code":"","headers":{},"body":""} - {"timestamp":"09-29-2020T09:33:28.613+0000","thread":27,"fiber":"fiber-1","namespace":"operator","domainUID":"","level":"INFO","class":"oracle.kubernetes.operator.Main","method":"markReadyAndStartLivenessThread","timeInMillis":1601372008613,"message":"Starting Operator Liveness Thread","exception":"","code":"","headers":{},"body":""} + ``` + {"timestamp":"2021-11-11T17:04:53.167756673Z","thread":23,"fiber":"","namespace":"","domainUID":"","level":"CONFIG","class":"oracle.kubernetes.operator.TuningParametersImpl","method":"update","timeInMillis":1636650293167,"message":"Reloading tuning parameters from Operator's config map","exception":"","code":"","headers":{},"body":""} + {"timestamp":"2021-11-11T17:05:03.170083172Z","thread":30,"fiber":"","namespace":"","domainUID":"","level":"CONFIG","class":"oracle.kubernetes.operator.TuningParametersImpl","method":"update","timeInMillis":1636650303170,"message":"Reloading tuning parameters from Operator's config map","exception":"","code":"","headers":{},"body":""} + {"timestamp":"2021-11-11T17:05:13.172302644Z","thread":29,"fiber":"","namespace":"","domainUID":"","level":"CONFIG","class":"oracle.kubernetes.operator.TuningParametersImpl","method":"update","timeInMillis":1636650313172,"message":"Reloading tuning parameters from Operator's config map","exception":"","code":"","headers":{},"body":""} ``` -### RCU schema creation - -In this section you create the RCU schemas in the Oracle Database. - -Before following the steps in this section, make sure that the database and listener are up and running and you can connect to the database via SQL*Plus or other client tool. - +### Create a namespace for Oracle Identity Governance + 1. Run the following command to create a namespace for the domain: ```bash @@ -374,7 +365,7 @@ Before following the steps in this section, make sure that the database and list For example: - ``` + ```bash $ kubectl create namespace oigns ``` @@ -384,7 +375,58 @@ Before following the steps in this section, make sure that the database and list namespace/oigns created ``` - Run the following command to create a helper pod: +1. Run the following command to tag the namespace so the WebLogic Kubernetes Operator can manage it: + + ```bash + $ kubectl label namespaces weblogic-operator=enabled + ``` + + For example: + + ```bash + $ kubectl label namespaces oigns weblogic-operator=enabled + ``` + + The output will look similar to the following: + + ``` + namespace/oigns labeled + ``` + +1. Run the following command to check the label was created: + + ```bash + $ kubectl describe namespace + ``` + + For example: + + ```bash + $ kubectl describe namespace oigns + ``` + + + The output will look similar to the following: + + ``` + Name: oigns + Labels: weblogic-operator=enabled + Annotations: + Status: Active + + No resource quota. + + No LimitRange resource. + ``` + + +### RCU schema creation + +In this section you create the RCU schemas in the Oracle Database. + +Before following the steps in this section, make sure that the database and listener are up and running and you can connect to the database via SQL*Plus or other client tool. + +1. Run the following command to create a helper pod: ```bash $ kubectl run helper --image -n -- sleep infinity @@ -393,12 +435,12 @@ Before following the steps in this section, make sure that the database and list For example: ```bash - $ kubectl run helper --image oracle/oig:12.2.1.4.0 -n oigns -- sleep infinity + $ kubectl run helper --image oracle/oig:12.2.1.4.0-8-ol7-211022.0723 -n oigns -- sleep infinity ``` The output will look similar to the following: - ```bash + ``` pod/helper created ``` @@ -414,7 +456,7 @@ Before following the steps in this section, make sure that the database and list $ kubectl exec -it helper -n oigns -- /bin/bash ``` - This will take you into a bash shell in the running rcu pod: + This will take you into a bash shell in the running helper pod: ```bash [oracle@helper oracle]$ @@ -471,7 +513,7 @@ Before following the steps in this section, make sure that the database and list The output will look similar to the following: - ```bash + ``` RCU Logfile: /tmp/RCU2020-09-29_10-51_508080961/logs/rcu.log Processing command line .... @@ -554,22 +596,22 @@ Before following the steps in this section, make sure that the database and list Service Name : ORCL.EXAMPLE.COM Connected As : sys Prefix for (prefixable) Schema Owners : OIGK8S - RCU Logfile : /tmp/RCU2020-09-29_10-51_508080961/logs/rcu.log + RCU Logfile : /tmp/RCU2021-11-11_17-16_464189537/logs/rcu.log Component schemas created: ----------------------------- Component Status Logfile - Common Infrastructure Services Success /tmp/RCU2020-09-29_10-51_508080961/logs/stb.log - Oracle Platform Security Services Success /tmp/RCU2020-09-29_10-51_508080961/logs/opss.log - SOA Infrastructure Success /tmp/RCU2020-09-29_10-51_508080961/logs/soainfra.log - Oracle Identity Manager Success /tmp/RCU2020-09-29_10-51_508080961/logs/oim.log - User Messaging Service Success /tmp/RCU2020-09-29_10-51_508080961/logs/ucsums.log - Audit Services Success /tmp/RCU2020-09-29_10-51_508080961/logs/iau.log - Audit Services Append Success /tmp/RCU2020-09-29_10-51_508080961/logs/iau_append.log - Audit Services Viewer Success /tmp/RCU2020-09-29_10-51_508080961/logs/iau_viewer.log - Metadata Services Success /tmp/RCU2020-09-29_10-51_508080961/logs/mds.log - WebLogic Services Success /tmp/RCU2020-09-29_10-51_508080961/logs/wls.log + Common Infrastructure Services Success /tmp/RCU2021-11-11_17-16_464189537/logs/stb.log + Oracle Platform Security Services Success /tmp/RCU2021-11-11_17-16_464189537/logs/opss.log + SOA Infrastructure Success /tmp/RCU2021-11-11_17-16_464189537/logs/soainfra.log + Oracle Identity Manager Success /tmp/RCU2021-11-11_17-16_464189537/logs/oim.log + User Messaging Service Success /tmp/RCU2021-11-11_17-16_464189537/logs/ucsums.log + Audit Services Success /tmp/RCU2021-11-11_17-16_464189537/logs/iau.log + Audit Services Append Success /tmp/RCU2021-11-11_17-16_464189537/logs/iau_append.log + Audit Services Viewer Success /tmp/RCU2021-11-11_17-16_464189537/logs/iau_viewer.log + Metadata Services Success /tmp/RCU2021-11-11_17-16_464189537/logs/mds.log + WebLogic Services Success /tmp/RCU2021-11-11_17-16_464189537/logs/wls.log Repository Creation Utility - Create : Operation Completed [oracle@helper oracle]$ @@ -611,14 +653,16 @@ Before following the steps in this section, make sure that the database and list The output should look similar to below: ``` + ... run-patched-sql-files: [sql] Executing resource: /u01/oracle/idm/server/db/oim/oracle/StoredProcedures/API/oim_role_mgmt_pkg_body.sql [sql] Executing resource: /u01/oracle/idm/server/db/oim/oracle/Upgrade/oim12cps4/list/oim12cps4_dml_pty_insert_sysprop_ssointg_grprecon_matching_rolename.sql [sql] Executing resource: /u01/oracle/idm/server/db/oim/oracle/Upgrade/oim12cps4/list/oim12cps4_dml_pty_insert_sysprop_oimadpswdpolicy.sql - [sql] 3 of 3 SQL statements executed successfully + etc... + [sql] 34 of 34 SQL statements executed successfully BUILD SUCCESSFUL - Total time: 1 second + Total time: 5 second ``` @@ -628,44 +672,16 @@ Before following the steps in this section, make sure that the database and list In this section you prepare the environment for the OIG domain creation. This involves the following steps: - 1. Configure the operator for the domain namespace - 2. Create Kubernetes secrets for the domain and RCU - 3. Create a Kubernetes PV and PVC (Persistent Volume and Persistent Volume Claim) - -#### Configure the operator for the domain namespace - -1. Configure the WebLogic Kubernetes Operator to manage the domain in the domain namespace by running the following command: - - ```bash - $ cd /weblogic-kubernetes-operator - $ helm upgrade --reuse-values --namespace --set "domainNamespaces={oigns}" --wait weblogic-kubernetes-operator kubernetes/charts/weblogic-operator - ``` - - For example: - - ```bash - $ cd /scratch/OIGDockerK8S/weblogic-kubernetes-operator - $ helm upgrade --reuse-values --namespace opns --set "domainNamespaces={oigns}" --wait weblogic-kubernetes-operator kubernetes/charts/weblogic-operator - ``` - - The output will look similar to the following: + a. [Creating Kubernetes secrets for the domain and RCU](#creating-kubernetes-secrets-for-the-domain-and-rcu) - ```bash - Release "weblogic-kubernetes-operator" has been upgraded. Happy Helming! - NAME: weblogic-kubernetes-operator - LAST DEPLOYED: Tue Sep 29 04:01:43 2020 - NAMESPACE: opns - STATUS: deployed - REVISION: 2 - TEST SUITE: None - ``` + b. [Create a Kubernetes persistent volume and persistent volume claim](#create-a-kubernetes-persistent-volume-and-persistent-volume-claim) #### Creating Kubernetes secrets for the domain and RCU 1. Create a Kubernetes secret for the domain using the create-weblogic-credentials script in the same Kubernetes namespace as the domain: ```bash - $ cd /weblogic-kubernetes-operator/kubernetes/samples/scripts/create-weblogic-domain-credentials + $ cd $WORKDIR/kubernetes/create-weblogic-domain-credentials $ ./create-weblogic-credentials.sh -u weblogic -p -n -d -s ``` @@ -684,13 +700,13 @@ In this section you prepare the environment for the OIG domain creation. This in For example: ```bash - $ cd /scratch/OIGDockerK8S/weblogic-kubernetes-operator/kubernetes/samples/scripts/create-weblogic-domain-credentials + $ cd $WORKDIR/kubernetes/create-weblogic-domain-credentials $ ./create-weblogic-credentials.sh -u weblogic -p -n oigns -d governancedomain -s oig-domain-credentials ``` The output will look similar to the following: - ```bash + ``` secret/oig-domain-credentials created secret/oig-domain-credentials labeled The secret oig-domain-credentials has been successfully created in the oigns namespace. @@ -710,7 +726,7 @@ In this section you prepare the environment for the OIG domain creation. This in The output will look similar to the following: - ```bash + ``` $ kubectl get secret oig-domain-credentials -o yaml -n oigns apiVersion: v1 data: @@ -718,7 +734,7 @@ In this section you prepare the environment for the OIG domain creation. This in username: d2VibG9naWM= kind: Secret metadata: - creationTimestamp: "2020-09-29T11:04:44Z" + creationTimestamp: "2021-11-12T10:37:43Z" labels: weblogic.domainName: governancedomain weblogic.domainUID: governancedomain @@ -738,7 +754,7 @@ In this section you prepare the environment for the OIG domain creation. This in f:type: {} manager: kubectl operation: Update - time: "2020-09-29T11:04:44Z" + time: "2021-11-12T10:37:43Z" name: oig-domain-credentials namespace: oigns resourceVersion: "1249007" @@ -750,7 +766,7 @@ In this section you prepare the environment for the OIG domain creation. This in 1. Create a Kubernetes secret for RCU in the same Kubernetes namespace as the domain, using the `create-weblogic-credentials.sh` script: ```bash - $ cd /weblogic-kubernetes-operator/kubernetes/samples/scripts/create-rcu-credentials + $ cd $WORKDIR/kubernetes/create-rcu-credentials $ ./create-rcu-credentials.sh -u -p -a sys -q -d -n -s ``` @@ -771,13 +787,13 @@ In this section you prepare the environment for the OIG domain creation. This in For example: ```bash - $ cd /scratch/OIGDockerK8S/weblogic-kubernetes-operator/kubernetes/samples/scripts/create-rcu-credentials + $ cd $WORKDIR/kubernetes/create-rcu-credentials $ ./create-rcu-credentials.sh -u OIGK8S -p -a sys -q -d governancedomain -n oigns -s oig-rcu-credentials ``` The output will look similar to the following: - ```bash + ``` secret/oig-rcu-credentials created secret/oig-rcu-credentials labeled The secret oig-rcu-credentials has been successfully created in the oigns namespace. @@ -797,7 +813,7 @@ In this section you prepare the environment for the OIG domain creation. This in The output will look similar to the following: - ```bash + ``` apiVersion: v1 data: password: V2VsY29tZTE= @@ -806,7 +822,7 @@ In this section you prepare the environment for the OIG domain creation. This in username: T0lHSzhT kind: Secret metadata: - creationTimestamp: "2020-09-29T11:18:45Z" + creationTimestamp: "2021-11-12T10:39:24Z" labels: weblogic.domainName: governancedomain weblogic.domainUID: governancedomain @@ -828,7 +844,7 @@ In this section you prepare the environment for the OIG domain creation. This in f:type: {} manager: kubectl operation: Update - time: "2020-09-29T11:18:45Z" + time: "2021-11-12T10:39:24Z" name: oig-rcu-credentials namespace: oigns resourceVersion: "1251020" @@ -844,29 +860,29 @@ In the Kubernetes domain namespace created above, create the persistent volume ( 1. Make a backup copy of the `create-pv-pvc-inputs.yaml` file and create required directories: ```bash - $ cd /weblogic-kubernetes-operator/kubernetes/samples/scripts/create-weblogic-domain-pv-pvc + $ cd $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc $ cp create-pv-pvc-inputs.yaml create-pv-pvc-inputs.yaml.orig $ mkdir output - $ mkdir -p //governancedomainpv - $ chmod -R 777 //governancedomainpv + $ mkdir -p /governancedomainpv + $ chmod -R 777 /governancedomainpv ``` For example: ```bash - $ cd /scratch/OIGDockerK8S/weblogic-kubernetes-operator/kubernetes/samples/scripts/create-weblogic-domain-pv-pvc + $ cd $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc $ cp create-pv-pvc-inputs.yaml create-pv-pvc-inputs.yaml.orig $ mkdir output - $ mkdir -p /scratch/OIGDockerK8S/governancedomainpv - $ chmod -R 777 /scratch/OIGDockerK8S/governancedomainpv + $ mkdir -p /scratch/OIGK8S/governancedomainpv + $ chmod -R 777 /scratch/OIGK8S/governancedomainpv ``` - **Note**: The persistent volume directory needs to be accessible to both the master and worker node(s) via NFS. Make sure this path has **full** access permissions, and that the folder is empty. In this example `/scratch/OIGDockerK8S/governancedomainpv` is accessible from all nodes via NFS. + **Note**: The persistent volume directory needs to be accessible to both the master and worker node(s) via NFS. Make sure this path has **full** access permissions, and that the folder is empty. In this example `/scratch/OIGK8S/governancedomainpv` is accessible from all nodes via NFS. 1. On the master node run the following command to ensure it is possible to read and write to the persistent volume: ```bash - cd //governancedomainpv + cd /governancedomainpv touch file.txt ls filemaster.txt ``` @@ -874,7 +890,7 @@ In the Kubernetes domain namespace created above, create the persistent volume ( For example: ```bash - cd /scratch/OIGDockerK8S/governancedomainpv + cd /scratch/OIGK8S/governancedomainpv touch filemaster.txt ls filemaster.txt ``` @@ -882,7 +898,7 @@ In the Kubernetes domain namespace created above, create the persistent volume ( On the first worker node run the following to ensure it is possible to read and write to the persistent volume: ```bash - cd /scratch/OIGDockerK8S/governancedomainpv + cd /scratch/OIGK8S/governancedomainpv ls filemaster.txt touch fileworker1.txt ls fileworker1.txt @@ -892,7 +908,7 @@ In the Kubernetes domain namespace created above, create the persistent volume ( 1. Edit the `create-pv-pvc-inputs.yaml` file and update the following parameters to reflect your settings. Save the file when complete: - ```bash + ``` baseName: domainUID: namespace: @@ -904,7 +920,7 @@ In the Kubernetes domain namespace created above, create the persistent volume ( For example: - ```bash + ``` # The base name of the pv and pvc baseName: domain @@ -934,7 +950,7 @@ In the Kubernetes domain namespace created above, create the persistent volume ( # Note that the path where the domain is mounted in the WebLogic containers is not affected by this # setting, that is determined when you create your domain. # The following line must be uncomment and customized: - weblogicDomainStoragePath: /scratch/OIGDockerK8S/governancedomainpv + weblogicDomainStoragePath: /scratch/OIGK8S/governancedomainpv # Reclaim policy of the persistent storage # The valid values are: 'Retain', 'Delete', and 'Recycle' @@ -952,7 +968,7 @@ In the Kubernetes domain namespace created above, create the persistent volume ( The output will be similar to the following: - ```bash + ``` Input parameters being used export version="create-weblogic-sample-domain-pv-pvc-inputs-v1" export baseName="domain" @@ -960,7 +976,7 @@ In the Kubernetes domain namespace created above, create the persistent volume ( export namespace="oigns" export weblogicDomainStorageType="NFS" export weblogicDomainStorageNFSServer="mynfsserver" - export weblogicDomainStoragePath="/scratch/OIGDockerK8S/governancedomainpv" + export weblogicDomainStoragePath="/scratch/OIGK8S/governancedomainpv" export weblogicDomainStorageReclaimPolicy="Retain" export weblogicDomainStorageSize="10Gi" @@ -977,7 +993,7 @@ In the Kubernetes domain namespace created above, create the persistent volume ( ```bash $ ls output/pv-pvcs - create-pv-pvc-inputs.yaml governancedomain-domain-pvc.yaml governancedomain-domain-pv.yaml + create-pv-pvc-inputs.yaml governancedomain-domain-pv.yaml governancedomain-domain-pvc.yaml ``` 1. Run the following `kubectl` command to create the PV and PVC in the domain namespace: @@ -995,7 +1011,7 @@ In the Kubernetes domain namespace created above, create the persistent volume ( The output will look similar to the following: - ```bash + ``` persistentvolume/governancedomain-domain-pv created persistentvolumeclaim/governancedomain-domain-pvc created ``` @@ -1016,7 +1032,7 @@ In the Kubernetes domain namespace created above, create the persistent volume ( The output will look similar to the following: - ```bash + ``` $ kubectl describe pv governancedomain-domain-pv Name: governancedomain-domain-pv @@ -1035,12 +1051,12 @@ In the Kubernetes domain namespace created above, create the persistent volume ( Source: Type: NFS (an NFS mount that lasts the lifetime of a pod) Server: mynfsserver - Path: /scratch/OIGDockerK8S/governancedomainpv + Path: /scratch/OIGK8S/governancedomainpv ReadOnly: false Events: ``` - ```bash + ``` $ kubectl describe pvc governancedomain-domain-pvc -n oigns Name: governancedomain-domain-pvc diff --git a/docs-source/content/oig/prerequisites/_index.md b/docs-source/content/oig/prerequisites/_index.md index df20f2d37..490e27809 100644 --- a/docs-source/content/oig/prerequisites/_index.md +++ b/docs-source/content/oig/prerequisites/_index.md @@ -8,10 +8,7 @@ PVC, and the domain resource YAML file for deploying the generated OIG domain." ### Introduction -This document provides information about the system requirements and limitations for deploying and running OIG domains with the WebLogic Kubernetes Operator 3.0.1. - -In this release, OIG domains are supported using the “domain on a persistent volume” -[model](https://oracle.github.io/weblogic-kubernetes-operator/userguide/managing-domains/choosing-a-model/) only, where the domain home is located in a persistent volume (PV). +This document provides information about the system requirements and limitations for deploying and running OIG domains with the WebLogic Kubernetes Operator 3.3.0. ### System requirements for OIG domains @@ -19,21 +16,15 @@ In this release, OIG domains are supported using the “domain on a persistent v * You must have the `cluster-admin` role to install the operator. * We do not currently support running OIG in non-Linux containers. * A running Oracle Database 12.2.0.1 or later. The database must be a supported version for OIG as outlined in [Oracle Fusion Middleware 12c certifications](https://www.oracle.com/technetwork/middleware/fmw-122140-certmatrix-5763476.xlsx). It must meet the requirements as outlined in [About Database Requirements for an Oracle Fusion Middleware Installation](http://www.oracle.com/pls/topic/lookup?ctx=fmw122140&id=GUID-4D3068C8-6686-490A-9C3C-E6D2A435F20A) and in [RCU Requirements for Oracle Databases](http://www.oracle.com/pls/topic/lookup?ctx=fmw122140&id=GUID-35B584F3-6F42-4CA5-9BBB-116E447DAB83). -* Java Developer Kit (11.0.3 or later recommended) ### Limitations -Compared to running a WebLogic Server domain in Kubernetes using the operator, the -following limitations currently exist for OIG domains: +Compared to running a WebLogic Server domain in Kubernetes using the operator, the following limitations currently exist for OIG domains: +* In this release, OIG domains are supported using the “domain on a persistent volume” +[model](https://oracle.github.io/weblogic-kubernetes-operator/userguide/managing-domains/choosing-a-model/) only, where the domain home is located in a persistent volume (PV). * The "domain in image" model is not supported. -* Only configured clusters are supported. Dynamic clusters are not supported for - OIG domains. Note that you can still use all of the scaling features, - you just need to define the maximum size of your cluster at domain creation time. -* Deploying and running OIG domains is supported only with WebLogic Kubernetes Operator version 3.0.1 - currently supports the WebLogic MBean trees only. -* The [WebLogic Monitoring Exporter](https://github.com/oracle/weblogic-monitoring-exporter) - currently supports the WebLogic MBean trees only. Support for JRF MBeans has not - been added yet. +* Only configured clusters are supported. Dynamic clusters are not supported for OIG domains. Note that you can still use all of the scaling features, you just need to define the maximum size of your cluster at domain creation time. +* The [WebLogic Monitoring Exporter](https://github.com/oracle/weblogic-monitoring-exporter) currently supports the WebLogic MBean trees only. Support for JRF MBeans has not been added yet. diff --git a/docs-source/content/oig/release-notes.md b/docs-source/content/oig/release-notes.md index 504bcb9c4..eb0785bc8 100644 --- a/docs-source/content/oig/release-notes.md +++ b/docs-source/content/oig/release-notes.md @@ -12,5 +12,7 @@ Review the latest changes and known issues for Oracle Identity Governance on Kub | Date | Version | Change | | --- | --- | --- | -| September 3, 2021 | 21.3.3 | **A**) References to supported Kubernetes, Helm and Docker versions removed and replaced with Support note reference. **B**) Namespace and domain names changed to be consistent with [Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster](https://docs.oracle.com/en/middleware/fusion-middleware/12.2.1.4/ikedg/). **C**) Addtional post configuration tasks added. **D**) New section on how to start Design Console in a container. **E**) *Upgrading a Kubernetes Cluster* and *Security Hardening* removed as vendor specific.| +| November, 2021 | 21.4.2 | Supports Oracle Identity Governance domain deployment using WebLogic Kubernetes Operator 3.3.0. Voyager ingress removed as no longer supported.| +| October 2021 | 21.4.1 | **A**) References to supported Kubernetes, Helm and Docker versions removed and replaced with Support note reference. **B**) Namespace and domain names changed to be consistent with [Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster](https://docs.oracle.com/en/middleware/fusion-middleware/12.2.1.4/ikedg/). **C**) Addtional post configuration tasks added. **D**) New section on how to start Design Console in a container. **E**) *Upgrading a Kubernetes Cluster* and *Security Hardening* removed as vendor specific.| +| November 2020 | 20.4.1 | Initial release of Identity Governance on Kubernetes.| diff --git a/docs-source/content/oig/troubleshooting/_index.md b/docs-source/content/oig/troubleshooting/_index.md index e7a12513c..5887370fb 100644 --- a/docs-source/content/oig/troubleshooting/_index.md +++ b/docs-source/content/oig/troubleshooting/_index.md @@ -1,7 +1,7 @@ +++ title = "Troubleshooting" -weight = 11 -pre = "11. " +weight = 12 +pre = "12. " description = "Sample for creating an OIG domain home on an existing PV or PVC, and the domain resource YAML file for deploying the generated OIG domain." +++ @@ -35,19 +35,18 @@ If the OIG domain creation fails when running `create-domain.sh`, run the follow Using the output you should be able to diagnose the problem and resolve the issue. - Clean down the failed domain creation by following steps 1-4 in [Delete the OIG domain home]({{< relref "/oig/manage-oig-domains/delete-domain-home" >}}). Then - [recreate the PC and PVC]({{< relref "/oig/prepare-your-environment/#create-a-kubernetes-persistent-volume-and-persistent-volume-claim" >}}) then execute the [OIG domain creation]({{< relref "/oig/create-oig-domains" >}}) steps again. + Clean down the failed domain creation by following steps 1-3 in [Delete the OIG domain home]({{< relref "/oig/manage-oig-domains/delete-domain-home" >}}). Then follow [RCU schema creation]({{< relref "/oig/prepare-your-environment/#rcu-schema-creation" >}}) onwards to recreate the RCU schema, kubernetes secrets for domain and RCU, the persistent volume and the persistent volume claim. Then execute the [OIG domain creation]({{< relref "/oig/create-oig-domains" >}}) steps again. 2. If any of the above commands return the following error: - ```bash + ``` Failed to start container "create-fmw-infra-sample-domain-job": Error response from daemon: error while creating mount source path - '/scratch/OIGDockerK8S/governancedomainpv ': mkdir /scratch/OIGDockerK8S/governancedomainpv : permission denied + '/scratch/OIGK8S/governancedomainpv ': mkdir /scratch/OIGK8S/governancedomainpv : permission denied ``` then there is a permissions error on the directory for the PV and PVC and the following should be checked: - a) The directory has 777 permissions: `chmod -R 777 /governancedomainpv`. + a) The directory has 777 permissions: `chmod -R 777 /governancedomainpv`. b) If it does have the permissions, check if an `oracle` user exists and the `uid` and `gid` equal `1000`, for example: @@ -57,11 +56,10 @@ If the OIG domain creation fails when running `create-domain.sh`, run the follow Create the `oracle` user if it doesn't exist and set the `uid` and `gid` to `1000`. - c) Edit the `/weblogic-kubernetes-operator/kubernetes/samples/scripts/create-oim-domain-pv-pvc/create-pv-pvc-inputs.yaml` and add a slash to the end of the directory for the `weblogicDomainStoragePath` parameter: + c) Edit the `$WORKDIR/kubernetes/create-weblogic-domain-pv-pvc/create-pv-pvc-inputs.yaml` and add a slash to the end of the directory for the `weblogicDomainStoragePath` parameter: - ```bash - weblogicDomainStoragePath: /scratch/OIGDockerK8S/governancedomainpv/ + ``` + weblogicDomainStoragePath: /scratch/OIGK8S/governancedomainpv/ ``` - Clean down the failed domain creation by following steps 1-4 in [Delete the OIG domain home]({{< relref "/oig/manage-oig-domains/delete-domain-home" >}}). Then - [recreate the PC and PVC]({{< relref "/oig/prepare-your-environment/#create-a-kubernetes-persistent-volume-and-persistent-volume-claim" >}}) and then execute the [OIG domain creation]({{< relref "/oig/create-oig-domains" >}}) steps again. \ No newline at end of file + Clean down the failed domain creation by following steps 1-3 in [Delete the OIG domain home]({{< relref "/oig/manage-oig-domains/delete-domain-home" >}}). Then follow [RCU schema creation]({{< relref "/oig/prepare-your-environment/#rcu-schema-creation" >}}) onwards to recreate the RCU schema, kubernetes secrets for domain and RCU, the persistent volume and the persistent volume claim. Then execute the [OIG domain creation]({{< relref "/oig/create-oig-domains" >}}) steps again. \ No newline at end of file diff --git a/docs-source/content/oig/validate-domain-urls/_index.md b/docs-source/content/oig/validate-domain-urls/_index.md index 14081f85d..16c283eac 100644 --- a/docs-source/content/oig/validate-domain-urls/_index.md +++ b/docs-source/content/oig/validate-domain-urls/_index.md @@ -1,15 +1,15 @@ +++ -title = "Validate Domain URLs" +title = "Validate domain URLs" weight = 6 pre = "6. " description = "Sample for validating domain urls." +++ -In this section you validate the OIG domain URLs that are accessible via the NGINX or Voyager ingress. +In this section you validate the OIG domain URLs that are accessible via the NGINX ingress. Make sure you know the master hostname and port before proceeding. -#### Validate the OIG domain urls via the Ingress +#### Validate the OIG domain urls via the ingress Launch a browser and access the following URL's. Use `http` or `https` depending on whether you configured your ingress for non-ssl or ssl. diff --git a/docs-source/content/oud/_index.md b/docs-source/content/oud/_index.md index 1b51904b1..e1ad45378 100644 --- a/docs-source/content/oud/_index.md +++ b/docs-source/content/oud/_index.md @@ -2,7 +2,7 @@ title: "Oracle Unified Directory" date: 2019-02-23T16:43:45-05:00 description: "Oracle Unified Directory provides a comprehensive Directory Solution for robust Identity Management" -weight: 2 +weight: 5 --- Oracle Unified Directory provides a comprehensive Directory Solution for robust Identity Management. @@ -30,5 +30,6 @@ If performing an Enterprise Deployment, refer to the [Enterprise Deployment Guid ### Current release -The current supported release of Oracle Unified Directory is OUD 12c PS4 (12.2.1.4.0) +The current production release for Oracle Unified Directory 12c PS4 (12.2.1.4.0) deployment on Kubernetes is [21.4.2](https://github.com/oracle/fmw-kubernetes/releases). + diff --git a/docs-source/content/oud/create-oud-instances/create-oud-instances-helm/oud-ds-rs/_index.md b/docs-source/content/oud/create-oud-instances/create-oud-instances-helm/oud-ds-rs/_index.md index cbc54d697..cbf8b29d1 100644 --- a/docs-source/content/oud/create-oud-instances/create-oud-instances-helm/oud-ds-rs/_index.md +++ b/docs-source/content/oud/create-oud-instances/create-oud-instances-helm/oud-ds-rs/_index.md @@ -9,7 +9,6 @@ description= "This document provides details of the oud-ds-rs Helm chart." 1. [Verify the Replication](#verify-the-replication) 1. [Ingress Controller Setup](#ingress-controller-setup) 1. [Ingress with NGINX](#ingress-with-nginx) - 1. [Ingress with Voyager](#ingress-with-voyager) 1. [Access to Interfaces through Ingress](#access-to-interfaces-through-ingress) 1. [Configuration Parameters](#configuration-parameters) @@ -636,26 +635,6 @@ controller: * The configuration above assumes that you have `oud-ds-rs` installed with value `oud-ds-rs` as a deployment/release name. * Based on the deployment/release name in your environment, TCP port mapping may be required to be changed/updated. -#### Ingress with Voyager - -Voyager ingress implementation can be deployed/installed in a Kubernetes environment. - -##### Add Repo reference to helm for retriving/installing Chart for Voyager implementation. - -``` -$ helm repo add appscode https://charts.appscode.com/stable -``` - -##### Command `helm install` to install voyager related objects like pod, service, deployment, etc. - -``` -$ helm install --namespace mynginx \ ---set cloudProvider=baremetal \ -voyager-operator appscode/voyager -``` - -* For more details about the `helm` command and parameters, please execute `helm --help` and `helm install --help`.
- ### Access to Interfaces through Ingress Using the Helm chart, Ingress objects are also created according to configuration. The following table details the rules configured in Ingress object(s) for access to Oracle Unified Directory Interfaces through Ingress. @@ -1208,7 +1187,7 @@ The following table lists the configurable parameters of the `oud-ds-rs` chart a | tolerations | node taints to tolerate | | | affinity | node/pod affinities | | | ingress.enabled | | true | -| ingress.type | Supported value: either nginx or voyager | nginx | +| ingress.type | Supported value: nginx | nginx | | ingress.nginx.http.host | Hostname to be used with Ingress Rules.
If not set, hostname would be configured according to fullname.
Hosts would be configured as < fullname >-http.< domain >, < fullname >-http-0.< domain >, < fullname >-http-1.< domain >, etc. | | | ingress.nginx.http.domain | Domain name to be used with Ingress Rules.
In ingress rules, hosts would be configured as < host >.< domain >, < host >-0.< domain >, < host >-1.< domain >, etc. | | | ingress.nginx.http.backendPort | | http | @@ -1216,11 +1195,6 @@ The following table lists the configurable parameters of the `oud-ds-rs` chart a | ingress.nginx.admin.host | Hostname to be used with Ingress Rules.
If not set, hostname would be configured according to fullname.
Hosts would be configured as < fullname >-admin.< domain >, < fullname >-admin-0.< domain >, < fullname >-admin-1.< domain >, etc. | | | ingress.nginx.admin.domain | Domain name to be used with Ingress Rules.
In ingress rules, hosts would be configured as < host >.< domain >, < host >-0.< domain >, < host >-1.< domain >, etc. | | | ingress.nginx.admin.nginxAnnotations | | {
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/backend-protocol: "https"
} | -| ingress.voyagerAnnotations | | {
kubernetes.io/ingress.class: "voyager"
ingress.appscode.com/type: "NodePort"
} | -| ingress.voyagerNodePortHttp | NodePort value for HTTP Port exposed through Voyager LoadBalancer Service | 30080 | -| ingress.voyagerNodePortHttps | NodePort value for HTTPS Port exposed through Voyager LoadBalancer Service | 30443 | -| ingress.voyagerHttpPort | Port value for HTTP Port exposed through Voyager LoadBalancer Service | 80 | -| ingress.voyagerHttpsPort | Port value for HTTPS Port exposed through Voyager LoadBalancer Service | 443 | | ingress.ingress.tlsSecret | Secret name to use an already created TLS Secret. If such secret is not provided, one would be created with name < fullname >-tls-cert. If the TLS Secret is in different namespace, name can be mentioned as < namespace >/< tlsSecretName > | | | ingress.certCN | Subject's common name (cn) for SelfSigned Cert. | < fullname > | | ingress.certValidityDays | Validity of Self-Signed Cert in days | 365 | diff --git a/docs-source/content/oud/release-notes.md b/docs-source/content/oud/release-notes.md index 7b83a4c78..6c56ac0c7 100644 --- a/docs-source/content/oud/release-notes.md +++ b/docs-source/content/oud/release-notes.md @@ -12,5 +12,7 @@ Review the latest changes and known issues for Oracle Unified Directory on Kuber | Date | Version | Change | | --- | --- | --- | -| September 3, 2021 | 21.3.3 | **A**) References to supported Kubernetes, Helm and Docker versions removed and replaced with Support note reference. **B**) Namespace and domain names changed to be consistent with [Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster](https://docs.oracle.com/en/middleware/fusion-middleware/12.2.1.4/ikedg/). **C**) *Upgrading a Kubernetes Cluster* and *Security Hardening* removed as vendor specific.| +| November 2021 | 21.4.2 | Voyager ingress removed as no longer supported.| +| October 2021 | 21.4.1 | **A**) References to supported Kubernetes, Helm and Docker versions removed and replaced with Support note reference. **B**) Namespace and domain names changed to be consistent with [Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster](https://docs.oracle.com/en/middleware/fusion-middleware/12.2.1.4/ikedg/). **C**) *Upgrading a Kubernetes Cluster* and *Security Hardening* removed as vendor specific.| +| November 2020 | 20.4.1 | Initial release of Oracle Unified Directory on Kubernetes.| diff --git a/docs-source/content/oudsm/_index.md b/docs-source/content/oudsm/_index.md index b337a5347..b64727b14 100644 --- a/docs-source/content/oudsm/_index.md +++ b/docs-source/content/oudsm/_index.md @@ -2,7 +2,7 @@ title: "Oracle Unified Directory Services Manager" date: 2019-02-23T16:43:45-05:00 description: "Oracle Unified Directory Services Manager provides an interface for managing instances of Oracle Unified Directory" -weight: 2 +weight: 7 --- Oracle Unified Directory Services Manager is an interface for managing instances of Oracle Unified Directory. Oracle Unified Directory Services Manager enables you to configure the structure of the directory, define objects in the directory, add and configure users, groups, and other entries. Oracle Unified Directory Services Manager is also the interface you use to manage entries, schema, security, and other directory features. @@ -21,5 +21,4 @@ If performing an Enterprise Deployment, refer to the [Enterprise Deployment Guid ### Current release -The current supported release of Oracle Unified Directory Services Manager is OUD 12c PS4 (12.2.1.4.0) - +The current production release for Oracle Unified Directory Services Manager 12c PS4 (12.2.1.4.0) deployment on Kubernetes is [21.4.2](https://github.com/oracle/fmw-kubernetes/releases). diff --git a/docs-source/content/oudsm/create-oudsm-instances/create-oudsm-instances-helm/oudsm/_index.md b/docs-source/content/oudsm/create-oudsm-instances/create-oudsm-instances-helm/oudsm/_index.md index 5df714ca1..a60bd0567 100644 --- a/docs-source/content/oudsm/create-oudsm-instances/create-oudsm-instances-helm/oudsm/_index.md +++ b/docs-source/content/oudsm/create-oudsm-instances/create-oudsm-instances-helm/oudsm/_index.md @@ -10,7 +10,6 @@ description= "This document provides details of the oudsm Helm chart." 1. [Verify the Installation](#verify-the-installation) 1. [Ingress Controller Setup](#ingress-controller-setup) 1. [Ingress with NGINX](#ingress-with-nginx) - 1. [Ingress with Voyager](#ingress-with-voyager) 1. [Access to Interfaces through Ingress](#access-to-interfaces-through-ingress) 1. [Configuration Parameters](#configuration-parameters) @@ -331,35 +330,6 @@ controller: https: 30443 ``` -#### Ingress with Voyager - -Voyager ingress implementation can be deployed/installed in Kubernetes environment. - -##### Create a Kubernetes Namespace - -Create a Kubernetes namespace to provide a scope for NGINX objects such as pods and services that you create in the environment. To create your namespace issue the following command: - -``` -$ kubectl create ns myvoyager -namespace/mynginx created -``` - -##### Add Repo reference to helm for retrieving/installing Chart for voyager implementation. - -``` -$ helm repo add appscode https://charts.appscode.com/stable -``` - -##### Command `helm install` to install Voyager related objects like pod, service, deployment, etc. - -``` -$ helm install --namespace myvoyager \ ---set cloudProvider=baremetal \ -voyager-operator appscode/voyager -``` - -* For more details about the `helm` command and parameters, please execute `helm --help` and `helm install --help`. - ### Access to Interfaces through Ingress With the helm chart, Ingress objects are also created according to configuration. Following are the rules configured in Ingress object(s) for access to Oracle Unified Directory Services Manager Interfaces through Ingress. @@ -407,16 +377,11 @@ The following table lists the configurable parameters of the Oracle Unified Dire | tolerations | node taints to tolerate | | | affinity | node/pod affinities | | | ingress.enabled | | true | -| ingress.type | Supported value: either nginx or voyager | nginx | +| ingress.type | Supported value: nginx | nginx | | ingress.host | Hostname to be used with Ingress Rules.
If not set, hostname would be configured according to fullname.
Hosts would be configured as < fullname >-http.< domain >, < fullname >-http-0.< domain >, < fullname >-http-1.< domain >, etc. | | | ingress.domain | Domain name to be used with Ingress Rules.
In ingress rules, hosts would be configured as < host >.< domain >, < host >-0.< domain >, < host >-1.< domain >, etc. | | | ingress.backendPort | | http | | ingress.nginxAnnotations | | {
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/affinity-mode: "persistent"
nginx.ingress.kubernetes.io/affinity: "cookie"
}| -| ingress.voyagerAnnotations | | {
kubernetes.io/ingress.class: "voyager"
ingress.appscode.com/affinity: "cookie"
ingress.appscode.com/type: "NodePort"
} | -| ingress.voyagerNodePortHttp | NodePort value for HTTP Port exposed through Voyager LoadBalancer Service | 30080 | -| ingress.voyagerNodePortHttps | NodePort value for HTTPS Port exposed through Voyager LoadBalancer Service | 30443 | -| ingress.voyagerHttpPort | Port value for HTTP Port exposed through Voyager LoadBalancer Service | 80 | -| ingress.voyagerHttpsPort | Port value for HTTPS Port exposed through Voyager LoadBalancer Service | 443 | | ingress.ingress.tlsSecret | Secret name to use an already created TLS Secret. If such secret is not provided, one would be created with name < fullname >-tls-cert. If the TLS Secret is in different namespace, name can be mentioned as < namespace >/< tlsSecretName > | | | ingress.certCN | Subject's common name (cn) for SelfSigned Cert. | < fullname > | | ingress.certValidityDays | Validity of Self-Signed Cert in days | 365 | @@ -435,10 +400,6 @@ The following table lists the configurable parameters of the Oracle Unified Dire | persistence.size | Specifies the size of the storage | 10Gi | | persistence.storageClass | Specifies the storageclass of the persistence volume. | empty | | persistence.annotations | specifies any annotations that will be used | { } | -| ingress.voyagerNodePortHttp | | 31080 | -| ingress.voyagerNodePortHttps | | 31443 | -| ingress.voyagerHttpPort | | 80 | -| ingress.voyagerHttpsPort | | 443 | | oudsm.adminUser | Weblogic Administration User | weblogic | | oudsm.adminPass | Password for Weblogic Administration User | | | oudsm.startupTime | Expected startup time. After specified seconds readinessProbe would start | 900 | diff --git a/docs-source/content/oudsm/release-notes.md b/docs-source/content/oudsm/release-notes.md index 87e68de19..682801189 100644 --- a/docs-source/content/oudsm/release-notes.md +++ b/docs-source/content/oudsm/release-notes.md @@ -12,5 +12,8 @@ Review the latest changes and known issues for Oracle Unified Directory Services | Date | Version | Change | | --- | --- | --- | -| September 3, 2021 | 21.3.3 | **A**) References to supported Kubernetes, Helm and Docker versions removed and replaced with Support note reference. **B**) Namespace and domain names changed to be consistent with [Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster](https://docs.oracle.com/en/middleware/fusion-middleware/12.2.1.4/ikedg/). **C**) *Upgrading a Kubernetes Cluster* and *Security Hardening* removed as vendor specific.| +| November 2021 | 21.4.2 | Voyager ingress removed as no longer supported.| +| October 2021 | 21.4.1 | **A**) References to supported Kubernetes, Helm and Docker versions removed and replaced with Support note reference. **B**) Namespace and domain names changed to be consistent with [Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster](https://docs.oracle.com/en/middleware/fusion-middleware/12.2.1.4/ikedg/). **C**) *Upgrading a Kubernetes Cluster* and *Security Hardening* removed as vendor specific.| +| November 2020 | 20.4.1 | Initial release of Oracle Unified Directory Services Manager on Kubernetes.| + diff --git a/docs-source/content/soa-domains/_index.md b/docs-source/content/soa-domains/_index.md index aae9fd294..252da1a99 100644 --- a/docs-source/content/soa-domains/_index.md +++ b/docs-source/content/soa-domains/_index.md @@ -2,7 +2,7 @@ title: "Oracle SOA Suite" date: 2019-02-23T16:43:45-05:00 description: "The Oracle WebLogic Kubernetes Operator (the “operator”) supports deployment of Oracle SOA Suite components such as Oracle Service-Oriented Architecture (SOA), Oracle Service Bus, and Oracle Enterprise Scheduler (ESS). Follow the instructions in this guide to set up these Oracle SOA Suite domains on Kubernetes." -weight: 3 +weight: 4 --- The WebLogic Kubernetes Operator (the “operator”) supports deployment of Oracle SOA Suite components such as Oracle Service-Oriented Architecture (SOA), Oracle Service Bus, and Oracle Enterprise Scheduler (ESS). Currently the operator supports these domain types: @@ -25,7 +25,7 @@ The operator has several key features to assist you with deploying and managing #### Current production release -The current production release for the Oracle SOA Suite domains deployment on Kubernetes is [21.3.2](https://github.com/oracle/fmw-kubernetes/releases). This release uses the WebLogic Kubernetes Operator version [3.2.1](https://github.com/oracle/weblogic-kubernetes-operator/releases/tag/v3.2.1). +The current production release for the Oracle SOA Suite domains deployment on Kubernetes is [21.4.2](https://github.com/oracle/fmw-kubernetes/releases). This release uses the WebLogic Kubernetes Operator version [3.3.0](https://github.com/oracle/weblogic-kubernetes-operator/releases/tag/v3.3.0). #### Recent changes and known issues @@ -58,6 +58,7 @@ please consult this table of contents: To view documentation for an earlier release, see: +* [Version 21.3.2](https://oracle.github.io/fmw-kubernetes/21.3.2/soa-domains/) * [Version 21.2.2](https://oracle.github.io/fmw-kubernetes/21.2.2/soa-domains/) * [Version 21.1.2](https://oracle.github.io/fmw-kubernetes/21.1.2/soa-domains/) * [Version 20.4.2](https://oracle.github.io/fmw-kubernetes/20.4.2/soa-domains/) diff --git a/docs-source/content/soa-domains/adminguide/configure-load-balancer/_index.md b/docs-source/content/soa-domains/adminguide/configure-load-balancer/_index.md index ab038eeff..d84bbd288 100644 --- a/docs-source/content/soa-domains/adminguide/configure-load-balancer/_index.md +++ b/docs-source/content/soa-domains/adminguide/configure-load-balancer/_index.md @@ -7,6 +7,6 @@ pre: " " description: "Configure different load balancers for Oracle SOA Suite domains." --- -The WebLogic Kubernetes Operator supports ingress-based load balancers such as Traefik, NGINX (kubernetes/ingress-nginx), and Voyager. It also supports Apache web tier load balancer. +The WebLogic Kubernetes Operator supports ingress-based load balancers such as Traefik and NGINX (kubernetes/ingress-nginx). It also supports Apache web tier load balancer. {{% children style="h4" description="true" %}} diff --git a/docs-source/content/soa-domains/adminguide/configure-load-balancer/apache.md b/docs-source/content/soa-domains/adminguide/configure-load-balancer/apache.md index 80dea30a9..81f299a7c 100644 --- a/docs-source/content/soa-domains/adminguide/configure-load-balancer/apache.md +++ b/docs-source/content/soa-domains/adminguide/configure-load-balancer/apache.md @@ -2,8 +2,8 @@ title: "Apache web tier" date: 2019-02-22T15:44:42-05:00 draft: false -weight: 4 -pre: "d. " +weight: 3 +pre: "c. " description: "Configure the Apache web tier load balancer for Oracle SOA Suite domains." --- @@ -133,7 +133,7 @@ Refer to the [sample](https://github.com/oracle/docker-images/tree/main/OracleWe {{% /expand %}} -1. Create a PV and PVC (pv-claim-name) that can be used to store the custom_mod_wl_apache.conf. Refer to the [Sample](https://github.com/oracle/weblogic-kubernetes-operator/blob/v3.2.1/kubernetes/samples/scripts/create-weblogic-domain-pv-pvc/README.md) for creating a PV or PVC. +1. Create a PV and PVC (pv-claim-name) that can be used to store the custom_mod_wl_apache.conf. Refer to the [Sample](https://github.com/oracle/weblogic-kubernetes-operator/blob/v3.3.0/kubernetes/samples/scripts/create-weblogic-domain-pv-pvc/README.md) for creating a PV or PVC. #### Prepare the certificate and private key diff --git a/docs-source/content/soa-domains/adminguide/configure-load-balancer/nginx.md b/docs-source/content/soa-domains/adminguide/configure-load-balancer/nginx.md index 2445cc780..26628743f 100644 --- a/docs-source/content/soa-domains/adminguide/configure-load-balancer/nginx.md +++ b/docs-source/content/soa-domains/adminguide/configure-load-balancer/nginx.md @@ -33,12 +33,23 @@ Follow these steps to set up NGINX as a load balancer for an Oracle SOA Suite do 1. Deploy the `ingress-nginx` controller by using Helm on the domain namespace: + For Kubernetes versions up to v1.18.x: ```bash $ helm install nginx-ingress -n soans \ + --version=3.34.0 \ --set controller.service.type=NodePort \ --set controller.admissionWebhooks.enabled=false \ ingress-nginx/ingress-nginx ``` + + For Kubernetes versions v1.19.x+ onwards (NGINX version 4.0.6+): + ```bash + $ helm install nginx-ingress -n soans \ + --set controller.service.type=NodePort \ + --set controller.admissionWebhooks.enabled=false \ + ingress-nginx/ingress-nginx + ``` + {{%expand "Click here to see the sample output." %}} NAME: nginx-ingress LAST DEPLOYED: Tue Sep 15 08:40:47 2020 @@ -106,6 +117,18 @@ Follow these steps to set up NGINX as a load balancer for an Oracle SOA Suite do #### Install NGINX load balancer for end-to-end SSL configuration 1. Deploy the ingress-nginx controller by using Helm on the domain namespace: + + For Kubernetes versions up to v1.18.x: + ```bash + $ helm install nginx-ingress -n soans \ + --version=3.34.0 \ + --set controller.extraArgs.default-ssl-certificate=soans/domain1-tls-cert \ + --set controller.service.type=NodePort \ + --set controller.admissionWebhooks.enabled=false \ + --set controller.extraArgs.enable-ssl-passthrough=true \ + ingress-nginx/ingress-nginx + ``` + For Kubernetes versions v1.19.x+ onwards (NGINX version 4.0.6+): ```bash $ helm install nginx-ingress -n soans \ --set controller.extraArgs.default-ssl-certificate=soans/domain1-tls-cert \ @@ -184,7 +207,7 @@ Follow these steps to set up NGINX as a load balancer for an Oracle SOA Suite do 1. Create an ingress for the domain in the domain namespace by using the sample Helm chart. Here path-based routing is used for ingress. Sample values for default configuration are shown in the file `${WORKDIR}/charts/ingress-per-domain/values.yaml`. By default, `type` is `TRAEFIK` , `sslType` is `NONSSL`, and `domainType` is `soa`. These values can be overridden by passing values through the command line or can be edited in the sample file `values.yaml`. If needed, you can update the ingress YAML file to define more path rules (in section `spec.rules.host.http.paths`) based on the domain application URLs that need to be accessed. Update the template YAML file for the NGINX load balancer located at `${WORKDIR}/charts/ingress-per-domain/templates/nginx-ingress.yaml`. - > Note: See [here](https://github.com/oracle/fmw-kubernetes/blob/v21.3.2/OracleSOASuite/kubernetes/ingress-per-domain/README.md#configuration) for all the configuration parameters. + > Note: See [here](https://github.com/oracle/fmw-kubernetes/blob/v21.4.2/OracleSOASuite/kubernetes/ingress-per-domain/README.md#configuration) for all the configuration parameters. ```bash $ cd ${WORKDIR} diff --git a/docs-source/content/soa-domains/adminguide/configure-load-balancer/traefik.md b/docs-source/content/soa-domains/adminguide/configure-load-balancer/traefik.md index 8ab0dc07c..7dfb94412 100644 --- a/docs-source/content/soa-domains/adminguide/configure-load-balancer/traefik.md +++ b/docs-source/content/soa-domains/adminguide/configure-load-balancer/traefik.md @@ -157,7 +157,7 @@ Sample values for default configuration are shown in the file `${WORKDIR}/charts By default, `type` is `TRAEFIK`, `sslType` is `NONSSL`, and `domainType` is `soa`. These values can be overridden by passing values through the command line or can be edited in the sample file `values.yaml` based on the type of configuration (NONSSL, SSL, and E2ESSL). If needed, you can update the ingress YAML file to define more path rules (in section `spec.rules.host.http.paths`) based on the domain application URLs that need to be accessed. The template YAML file for the Traefik (ingress-based) load balancer is located at `${WORKDIR}/charts/ingress-per-domain/templates/traefik-ingress.yaml`. -> Note: See [here](https://github.com/oracle/fmw-kubernetes/blob/v21.3.2/OracleSOASuite/kubernetes/ingress-per-domain/README.md#configuration) for all the configuration parameters. +> Note: See [here](https://github.com/oracle/fmw-kubernetes/blob/v21.4.2/OracleSOASuite/kubernetes/ingress-per-domain/README.md#configuration) for all the configuration parameters. 1. Install `ingress-per-domain` using Helm for `NONSSL` configuration: diff --git a/docs-source/content/soa-domains/adminguide/configure-load-balancer/voyager.md b/docs-source/content/soa-domains/adminguide/configure-load-balancer/voyager.md deleted file mode 100644 index 7726b0fa4..000000000 --- a/docs-source/content/soa-domains/adminguide/configure-load-balancer/voyager.md +++ /dev/null @@ -1,492 +0,0 @@ ---- -title: "Voyager" -date: 2019-02-22T15:44:42-05:00 -draft: false -weight: 3 -pre: "c. " -description: "Configure the ingress-based Voyager load balancer for Oracle SOA Suite domains." ---- - -*Voyager/HAProxy* is a popular ingress-based load balancer for production environments. This section provides information about how to install and configure *Voyager/HAProxy* to load balance Oracle SOA Suite domain clusters. You can configure Voyager for non-SSL, SSL termination, and end-to-end SSL access of the application URL. - -Follow these steps to set up Voyager as a load balancer for an Oracle SOA Suite domain in a Kubernetes cluster: - - 1. [Install the Voyager load balancer](#install-the-voyager-load-balancer) - 2. [Configure Voyager to manage ingresses](#configure-voyager-to-manage-ingresses) - 3. [Verify domain application URL access](#verify-domain-application-url-access) - 4. [Uninstalling Voyager Ingress](#uninstalling-voyager-ingress) - 5. [Uninstall Voyager](#uninstall-voyager) - - -##### Install the Voyager load balancer - -1. Add the AppsCode chart repository: - - ```bash - $ helm repo add appscode https://charts.appscode.com/stable/ - $ helm repo update - ``` -1. Verify that the chart repository has been added: - - ```bash - $ helm search repo appscode/voyager - ``` - > **NOTE**: After updating the Helm repository, the Voyager version listed may be newer that the one appearing here. Check with the Voyager site for the latest supported versions. - -1. Install the Voyager operator: - - > **NOTE**: The Voyager version used for the install should match the version found with `helm search`. - - ```bash - $ kubectl create ns voyager - $ helm install voyager-operator appscode/voyager --version 12.0.0 \ - --namespace voyager \ - --set cloudProvider=baremetal \ - --set apiserver.enableValidatingWebhook=false - ``` - - Wait until the Voyager operator is running. - -1. Check the status of the Voyager operator: - ```bash - $ kubectl get all -n voyager - ``` - {{%expand "Click here to see the sample output." %}} - - NAME READY STATUS RESTARTS AGE - pod/voyager-operator-b84f95f8f-4szhl 1/1 Running 0 43h - - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - service/voyager-operator ClusterIP 10.107.201.155 443/TCP,56791/TCP 43h - - NAME READY UP-TO-DATE AVAILABLE AGE - deployment.apps/voyager-operator 1/1 1 1 43h - - NAME DESIRED CURRENT READY AGE - replicaset.apps/voyager-operator-b84f95f8f 1 1 1 43h - - {{% /expand %}} - - See the official [installation document](https://github.com/oracle/weblogic-kubernetes-operator/blob/main/kubernetes/samples/charts/voyager/README.md#a-step-by-step-guide-to-install-the-voyager-operator) for more details. - -1. Update the Voyager operator - - After the Voyager operator is installed and running, upgrade the Voyager operator using the `helm upgrade` command, where `voyager` is the Voyager namespace and `soans` is the namespace of the domain. - ```bash - $ helm upgrade voyager-operator appscode/voyager --namespace voyager - ``` - {{%expand "Click here to see the sample output." %}} - Release "voyager-operator" has been upgraded. Happy Helming! - NAME: voyager-operator - LAST DEPLOYED: Mon Sep 28 11:53:43 2020 - NAMESPACE: voyager - STATUS: deployed - REVISION: 2 - TEST SUITE: None - NOTES: - Set cloudProvider for installing Voyager - - To verify that Voyager has started, run: - - kubectl get deployment --namespace voyager -l "app.kubernetes.io/name=voyager,app.kubernetes.io/instance=voyager-operator" - - {{% /expand %}} - -##### Configure Voyager to manage ingresses - -1. Create an ingress for the domain in the domain namespace by using the sample Helm chart. Here path-based routing is used for ingress. Sample values for default configuration are shown in the file `${WORKDIR}/charts/ingress-per-domain/values.yaml`. By default, `type` is `TRAEFIK` , `sslType` is `NONSSL`, and `domainType` is `soa`. These values can be overridden by passing values through the command line or can be edited on the sample file `values.yaml`. - - > Note: See [here](https://github.com/oracle/fmw-kubernetes/blob/v21.3.2/OracleSOASuite/kubernetes/ingress-per-domain/README.md#configuration) for all the configuration parameters. - - If needed, you can update the ingress yaml file to define more path rules (in the `spec.rules.host.http.paths` section) based on the domain application URLs that need to be accessed. You need to update the template yaml file for the Voyager (ingress-based) load balancer located at `${WORKDIR}/charts/ingress-per-domain/templates/voyager-ingress.yaml` - - ```bash - $ cd ${WORKDIR} - $ helm install soa-voyager-ingress charts/ingress-per-domain \ - --namespace soans \ - --values charts/ingress-per-domain/values.yaml \ - --set type=VOYAGER - ``` - {{%expand "Click here to check the output of the ingress per domain " %}} - ```bash - NAME: soa-voyager-ingress - LAST DEPLOYED: Mon Jul 20 08:20:27 2020 - NAMESPACE: soans - STATUS: deployed - REVISION: 1 - TEST SUITE: None - ``` - {{% /expand %}} -1. To secure access (`SSL` and `E2ESSL`) to the Oracle SOA Suite application, create a certificate and generate secrets: - ```bash - $ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /tmp/tls1.key -out /tmp/tls1.crt -subj "/CN=*" - $ kubectl -n soans create secret tls domain1-tls-cert --key /tmp/tls1.key --cert /tmp/tls1.crt - ``` - -1. Deploy `ingress-per-domain` using Helm for SSL termination configuration. - - ```bash - $ cd ${WORKDIR} - $ helm install soa-voyager-ingress charts/ingress-per-domain \ - --namespace soans \ - --values charts/ingress-per-domain/values.yaml \ - --set type=VOYAGER \ - --set sslType=SSL - ``` - {{%expand "Click here to see the sample output of the above Commnad." %}} - ```bash - NAME: soa-voyager-ingress - LAST DEPLOYED: Mon Jul 20 08:20:27 2020 - NAMESPACE: soans - STATUS: deployed - REVISION: 1 - TEST SUITE: None - ``` - {{% /expand %}} - -1. Deploy `ingress-per-domain` using Helm for `E2ESSL` configuration. - - ```bash - $ cd ${WORKDIR} - $ helm install soa-voyager-ingress charts/ingress-per-domain \ - --namespace soans \ - --values charts/ingress-per-domain/values.yaml \ - --set type=VOYAGER \ - --set sslType=E2ESSL - ``` - {{%expand "Click here to see the sample output of the above Commnad." %}} - ```bash - NAME: soa-voyager-ingress - LAST DEPLOYED: Mon Apr 20 08:20:27 2021 - NAMESPACE: soans - STATUS: deployed - REVISION: 1 - TEST SUITE: None - ``` - {{% /expand %}} - -1. For `NONSSL` access to the Oracle SOA Suite application, get the details of the services deployed by the above ingress: - - ```bash - $ kubectl describe ingress.voyager.appscode.com/soainfra-voyager -n soans - ``` - {{%expand "Click here to see the sample output of the services supported by the above deployed ingress." %}} - Sample output: - ```bash - Name: soainfra-voyager - Namespace: soans - Labels: - Annotations: ingress.appscode.com/affinity: cookie - ingress.appscode.com/default-timeout: {"connect": "1800s", "server": "1800s"} - ingress.appscode.com/stats: true - ingress.appscode.com/type: NodePort - API Version: voyager.appscode.com/v1beta1 - Kind: Ingress - Metadata: - Creation Timestamp: 2020-07-20T08:20:28Z - Generation: 1 - Managed Fields: - API Version: voyager.appscode.com/v1beta1 - Fields Type: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - .: - f:ingress.appscode.com/affinity: - f:ingress.appscode.com/default-timeout: - f:ingress.appscode.com/stats: - f:ingress.appscode.com/type: - f:spec: - .: - f:rules: - Manager: Go-http-client - Operation: Update - Time: 2020-07-20T08:20:28Z - Resource Version: 370484 - Self Link: /apis/voyager.appscode.com/v1beta1/namespaces/soans/ingresses/soainfra-voyager - UID: bb756966-cd7f-40a5-b08c-79f69e2b9440 - Spec: - Rules: - Host: * - Http: - Node Port: 30305 - Paths: - Backend: - Service Name: soainfra-adminserver - Service Port: 7001 - Path: /console - Backend: - Service Name: soainfra-adminserver - Service Port: 7001 - Path: /em - Backend: - Service Name: soainfra-adminserver - Service Port: 7001 - Path: /weblogic/ready - Backend: - Service Name: soainfra-cluster-soa-cluster - Service Port: 8001 - Path: / - Backend: - Service Name: soainfra-cluster-soa-cluster - Service Port: 8001 - Path: /soa-infra - Backend: - Service Name: soainfra-cluster-soa-cluster - Service Port: 8001 - Path: /soa/composer - Backend: - Service Name: soainfra-cluster-soa-cluster - Service Port: 8001 - Path: /integration/worklistapp - Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal ServiceReconcileSuccessful 4m30s voyager-operator Successfully created NodePort Service voyager-soainfra-voyager - Normal ConfigMapReconcileSuccessful 4m30s voyager-operator Successfully created ConfigMap voyager-soainfra-voyager - Normal RBACSuccessful 4m30s voyager-operator Successfully created ServiceAccount voyager-soainfra-voyager - Normal RBACSuccessful 4m30s voyager-operator Successfully created Role voyager-soainfra-voyager - Normal RBACSuccessful 4m30s voyager-operator Successfully created RoleBinding voyager-soainfra-voyager - Normal DeploymentReconcileSuccessful 4m30s voyager-operator Successfully created HAProxy Deployment voyager-soainfra-voyager - Normal StatsServiceReconcileSuccessful 4m30s voyager-operator Successfully created stats Service voyager-soainfra-voyager-stats - ``` - {{% /expand %}} - -1. For `SSL` access to the Oracle SOA Suite application, get the details of the services by the above deployed ingress: - - ```bash - $ kubectl describe ingress.voyager.appscode.com/soainfra-voyager -n soans - ``` - {{%expand "Click here to see all the services configured by the above deployed ingress." %}} - - ```bash - Name: soainfra-voyager - Namespace: soans - Labels: - Annotations: ingress.appscode.com/affinity: cookie - ingress.appscode.com/default-timeout: {"connect": "1800s", "server": "1800s"} - ingress.appscode.com/stats: true - ingress.appscode.com/type: NodePort - API Version: voyager.appscode.com/v1beta1 - Kind: Ingress - Metadata: - Creation Timestamp: 2020-07-20T08:20:28Z - Generation: 1 - Managed Fields: - API Version: voyager.appscode.com/v1beta1 - Fields Type: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - .: - f:ingress.appscode.com/affinity: - f:ingress.appscode.com/default-timeout: - f:ingress.appscode.com/stats: - f:ingress.appscode.com/type: - f:spec: - .: - f:rules: - Manager: Go-http-client - Operation: Update - Time: 2020-07-20T08:20:28Z - Resource Version: 370484 - Self Link: /apis/voyager.appscode.com/v1beta1/namespaces/soans/ingresses/soainfra-voyager - UID: bb756966-cd7f-40a5-b08c-79f69e2b9440 - Spec: - Frontend Rules: - Port: 443 - Rules: - http-request set-header WL-Proxy-SSL true - Rules: - Host: * - Http: - Node Port: 30305 - Paths: - Backend: - Service Name: soainfra-adminserver - Service Port: 7001 - Path: /console - Backend: - Service Name: soainfra-adminserver - Service Port: 7001 - Path : /em - Backend: - Service Name: soainfra-adminserver - Service Port: 7001 - Path: /weblogic/ready - Backend: - Service Name: soainfra-cluster-soa-cluster - Service Port: 8001 - Path: / - Backend: - Service Name: soainfra-cluster-soa-cluster - Service Port: 8001 - Path: /soa-infra - Backend: - Service Name: soainfra-cluster-soa-cluster - Service Port: 8001 - Path: /soa/composer - Backend: - Service Name: soainfra-cluster-soa-cluster - Service Port: 8001 - Path: /integration/worklistapp - Tls: - Hosts: - * - Secret Name: domain1-tls-cert - Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal ServiceReconcileSuccessful 22s voyager-operator Successfully created NodePort Service voyager-soainfra-voyager - Normal ConfigMapReconcileSuccessful 21s voyager-operator Successfully created ConfigMap voyager-soainfra-voyager - Normal RBACSuccessful 21s voyager-operator Successfully created ServiceAccount voyager-soainfra-voyager - Normal RBACSuccessful 21s voyager-operator Successfully created Role voyager-soainfra-voyager - Normal RBACSuccessful 21s voyager-operator Successfully created RoleBinding voyager-soainfra-voyager - Normal DeploymentReconcileSuccessful 21s voyager-operator Successfully created HAProxy Deployment voyager-soainfra-voyager - Normal StatsServiceReconcileSuccessful 21s voyager-operator Successfully created stats Service voyager-soainfra-voyager-stats - ``` - {{% /expand %}} -1. For `E2ESSL` access to the Oracle SOA Suite application, get the details of the services by the above deployed ingress: - - ```bash - $ kubectl describe ingress.voyager.appscode.com/soainfra-voyager-e2essl-admin -n soans - ``` - {{%expand "Click here to see all the services configured by the above deployed ingress." %}} - - ```bash - Name: soainfra-voyager-e2essl - Namespace: soans - Labels: app.kubernetes.io/managed-by=Helm - Annotations: ingress.appscode.com/affinity: cookie - ingress.appscode.com/ssl-passthrough: true - ingress.appscode.com/stats: true - ingress.appscode.com/type: NodePort - meta.helm.sh/release-name: soa-voyager-ingress - meta.helm.sh/release-namespace: soans - API Version: voyager.appscode.com/v1beta1 - Kind: Ingress - Metadata: - Creation Timestamp: 2021-04-09T07:04:07Z - Generation: 1 - Managed Fields: - API Version: voyager.appscode.com/v1beta1 - Fields Type: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - .: - f:ingress.appscode.com/affinity: - f:ingress.appscode.com/ssl-passthrough: - f:ingress.appscode.com/stats: - f:ingress.appscode.com/type: - f:meta.helm.sh/release-name: - f:meta.helm.sh/release-namespace: - f:labels: - .: - f:app.kubernetes.io/managed-by: - f:spec: - .: - f:rules: - f:tls: - Manager: Go-http-client - Operation: Update - Time: 2021-04-09T07:04:07Z - Resource Version: 526406 - Self Link: /apis/voyager.appscode.com/v1beta1/namespaces/soans/ingresses/soainfra-voyager-e2essl - UID: 0d315fa3-893e-4cde-b589-f87f5d5fd8ce - Spec: - Rules: - Host: * - Http: - Node Port: 30443 - Paths: - Backend: - Service Name: soainfra-adminserver - Service Port: 7002 - Path: / - Tls: - Hosts: - * - Secret Name: domain1-tls-cert - Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal ServiceReconcileSuccessful 3m16s voyager-operator Successfully created NodePort Service voyager-soainfra-voyager-e2essl - Normal ConfigMapReconcileSuccessful 3m16s voyager-operator Successfully created ConfigMap voyager-soainfra-voyager-e2essl - Normal RBACSuccessful 3m16s voyager-operator Successfully created ServiceAccount voyager-soainfra-voyager-e2essl - Normal RBACSuccessful 3m16s voyager-operator Successfully created Role voyager-soainfra-voyager-e2essl - Normal RBACSuccessful 3m16s voyager-operator Successfully created RoleBinding voyager-soainfra-voyager-e2essl - Normal DeploymentReconcileSuccessful 3m16s voyager-operator Successfully created HAProxy Deployment voyager-soainfra-voyager-e2essl - Normal StatsServiceReconcileSuccessful 3m16s voyager-operator Successfully created stats Service voyager-soainfra-voyager-e2essl-stats - ``` - {{% /expand %}} - -1. To confirm that the load balancer noticed the new ingress and is successfully routing to the domain's server pods, you can send a request to the URL for the "WebLogic ReadyApp framework" which should return a HTTP 200 status code, as follows: - - ```bash - $ curl -v http://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-PORT}/weblogic/ready - * About to connect() to localhost port 30305 (#0) - * Trying 127.0.0.1... - * Connected to localhost (127.0.0.1) port 30305 (#0) - > GET /weblogic/ready HTTP/1.1 - > User-Agent: curl/7.29.0 - > Accept: */* - > host: *****.com - > - < HTTP/1.1 200 OK - < Content-Length: 0 - < Date: Thu, 12 Mar 2020 10:16:43 GMT - < Vary: Accept-Encoding - < - * Connection #0 to host localhost left intact - ``` - -##### Verify domain application URL access - -After setting up the Voyager (ingress-based) load balancer, verify that the Oracle SOA Suite domain applications are accessible through the load balancer port 30305 for `NONSSL`, 30443 for `SSL` and on ports 30445(`admin`), 30447(`soa`) and 30449(`osb`) for `E2ESSL`. The application URLs for Oracle SOA Suite domain of type `soa` are: - -> Note: Port 30305 is the LOADBALANCER-Non-SSLPORT and 30443 is LOADBALANCER-SSLPORT. - -##### NONSSL configuration - - ```bash - http://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-Non-SSLPORT}/weblogic/ready - http://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-Non-SSLPORT}/console - http://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-Non-SSLPORT}/em - http://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-Non-SSLPORT}/soa-infra - http://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-Non-SSLPORT}/soa/composer - http://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-Non-SSLPORT}/integration/worklistapp - ``` -##### SSL configuration - - ```bash - https://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-SSLPORT}/weblogic/ready - https://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-SSLPORT}/console - https://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-SSLPORT}/em - https://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-SSLPORT}/soa-infra - https://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-SSLPORT}/soa/composer - https://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-SSLPORT}/integration/worklistapp - ``` -##### E2ESSL configuration - - ```bash - https://${LOADBALANCER-HOSTNAME}:30445/weblogic/ready - https://${LOADBALANCER-HOSTNAME}:30445/console - https://${LOADBALANCER-HOSTNAME}:30445/em - https://${LOADBALANCER-HOSTNAME}:30447/soa-infra - https://${LOADBALANCER-HOSTNAME}:30447/soa/composer - https://${LOADBALANCER-HOSTNAME}:30447/integration/worklistapp - ``` - -#### Uninstalling Voyager ingress - - To uninstall and delete the ingress deployment, enter the following command: - - ```bash - $ helm delete soa-voyager-ingress -n soans - ``` -#### Uninstall Voyager - - ```bash - $ helm delete voyager-operator -n voyager - ``` diff --git a/docs-source/content/soa-domains/adminguide/deploying-composites/deploy-artifacts.md b/docs-source/content/soa-domains/adminguide/deploying-composites/deploy-artifacts.md new file mode 100644 index 000000000..c76c4bd90 --- /dev/null +++ b/docs-source/content/soa-domains/adminguide/deploying-composites/deploy-artifacts.md @@ -0,0 +1,340 @@ +--- +title: "Deploy using composites in a persistent volume or image" +date: 2021-10-19T12:04:42-05:00 +draft: false +weight: 3 +pre : "c. " +description: "Deploy Oracle SOA Suite and Oracle Service Bus composite applications artifacts in a persistent volume or in an image." +--- + +Learn how to deploy Oracle SOA Suite and Oracle Service Bus composite applications artifacts in a Kubernetes persistent volume or in an image to an Oracle SOA Suite environment deployed using a WebLogic Kubernetes Operator. + +The deployment methods described in [Deploy using JDeveloper]({{< relref "/soa-domains/adminguide/deploying-composites/supportjdev.md" >}}) and [Deploy using Maven and Ant]({{< relref "/soa-domains/adminguide/deploying-composites/deploy-using-maven-ant.md" >}}) are manual processes. If you have the deployment artifacts (archives) already built, then you can package them either into a Kubernetes persistent volume or in an image and use this automated process to deploy the artifacts to an Oracle SOA Suite domain. + +#### Prepare to use the deploy artifacts script + +The sample scripts for deploying artifacts are available at `${WORKDIR}/create-soa-domain/domain-home-on-pv/` + +You must edit `deploy-artifacts-inputs.yaml` (or a copy of it) to provide the details of your domain and artifacts. +Refer to the configuration parameters below to understand the information that you must provide in this file. + +#### Configuration parameters +The following parameters can be provided in the inputs file. + +| Parameter | Definition | Default | +| --- | --- | --- | +| `adminPort` | Port number of the Administration Server inside the Kubernetes cluster. | `7001` | +| `adminServerName` | Name of the Administration Server. | `AdminServer` | +| `domainUID` | Unique ID that is used to identify the domain. This ID cannot contain any characters that are not valid in a Kubernetes service name. | `soainfra` | +| `domainType` | Type of the domain. Mandatory input for Oracle SOA Suite domains. You must provide one of the supported domain type values: `soa` (deploys artifacts into an Oracle SOA Suite domain), `osb` (deploys artifacts into an Oracle Service Bus domain), or `soaosb` (deploys artifacts into both Oracle SOA Suite and Oracle Service Bus domains). | `soa` +| `soaClusterName` | Name of the SOA WebLogic Server cluster instance in the domain. By default, the cluster name is `soa_cluster`. This configuration parameter is applicable only for `soa` and `soaosb` domain types.| `soa_cluster` | +| `image` | SOA Suite Docker image. The artifacts deployment process requires Oracle SOA Suite 12.2.1.4. Refer to [Obtain the Oracle SOA Suite Docker image]({{< relref "/soa-domains/installguide/prepare-your-environment#obtain-the-oracle-soa-suite-docker-image" >}}) for details on how to obtain or create the image. | `soasuite:12.2.1.4` | +| `imagePullPolicy` | Oracle SOA Suite Docker image pull policy. Valid values are `IfNotPresent`, `Always`, `Never`. | `IfNotPresent` | +| `imagePullSecretName` | Name of the Kubernetes secret to access the Docker Store to pull the Oracle SOA Suite Docker image. The presence of the secret will be validated when this parameter is specified. | | +| `weblogicCredentialsSecretName` | Name of the Kubernetes secret for the Administration Server's user name and password. If not specified, then the value is derived from the `domainUID` as `-weblogic-credentials`. | `soainfra-domain-credentials` | +| `namespace` | Kubernetes namespace in which the domain was created. | `soans` | +| `artifactsSourceType` | The deploy artifacts source type. Set to `PersistentVolume` for deploy artifacts available in a persistent volume and `Image` for deploy artifacts available as an image. | `Image` | +| `persistentVolumeClaimName` | Name of the persistent volume claim created that hosts the deployment artifacts. If not specified, the value is derived from the `domainUID` as `-deploy-artifacts-pvc`. | `soainfra-deploy-artifacts-pvc` | +| `artifactsImage` | Deploy artifacts image. Required if `artifactsSourceType` is `Image`. | `artifacts:12.2.1.4` | +| `artifactsImagePullPolicy` | Deploy artifacts image pull policy. Valid values are `IfNotPresent`, `Always`, `Never`. | `IfNotPresent` | +| `artifactsImagePullSecretName` | Name of the Kubernetes secret to access the deploy artifacts image. The presence of the secret will be validated when this parameter is specified. | | +| `deployScriptFilesDir` | Directory on the host machine to locate the required files to deploy artifacts to the Oracle SOA Suite domain, including the script that is specified in the `deployScriptName` parameter. By default, this directory is set to the relative path `deploy`. | `deploy` | +| `deployScriptsMountPath` | Mount path where the deploy artifacts scripts are located inside a pod. The `deploy-artifacts.sh` script creates a Kubernetes job to run the script (specified by the `deployScriptName` parameter) in a Kubernetes pod to deploy the artifacts. Files in the `deployScriptFilesDir` directory are mounted to this location in the pod, so that the Kubernetes pod can use the scripts and supporting files to deploy artifacts. | `/u01/weblogic` | +| `deployScriptName` | Script that the deploy artifacts script uses to deploy artifacts to the Oracle SOA Suite domain. For Oracle SOA Suite, the script placed in the `soa` directory is used. For Oracle Service Bus, the script placed in the `osb` directory is used. The `deploy-artifacts.sh` script creates a Kubernetes job to run this script to deploy artifacts. The script is located in the in-pod directory that is specified by the `deployScriptsMountPath` parameter. | `deploy.sh` | +| `soaArtifactsArchivePath` | Directory inside container where Oracle SOA Suite archives are placed. | `/u01/sarchives` | +| `osbArtifactsArchivePath` | Directory inside container where Oracle Service Bus archives are placed. | `/u01/sbarchives` | + + +The sample demonstrates how to deploy Oracle SOA Suite composites or Oracle Service Bus applications to an Oracle SOA Suite domain home. + +#### Run the deploy artifacts script + +Run the deploy artifacts script, specifying your inputs file and an output directory to store the +generated artifacts: + +``` +$ ./deploy-artifacts.sh \ + -i deploy-artifacts-inputs.yaml \ + -o +``` + +The script performs the following steps: + +* Creates a directory for the generated Kubernetes YAML files for the artifacts deployment process if it does not + already exist. The path name is `/weblogic-domains//`. + If the directory already exists, its contents must be removed before running this script. +* Creates a Kubernetes job that starts a utility Oracle SOA Suite container and run + scripts to deploy artifacts provided either in an image or in a persistent volume. + +##### Deploy artifacts from an image + +1. Create an image with artifacts + + a. A sample Dockerfile to create the artifacts in an image is available at `$WORKDIR/create-soa-domain/domain-home-on-pv/deploy-docker-file`. This expects the Oracle SOA Suite related archives to be available in the `soa` directory and Oracle Service Bus archives to be available in the `osb` directory. + + b. Create the `soa` directory and copy the Oracle SOA Suite archives to be deployed to the directory: + ``` + $ cd $WORKDIR/create-soa-domain/domain-home-on-pv/deploy-docker-file + $ mkdir soa + $ cp /path/sca_sampleBPEL.jar soa + ``` + c. Create the `osb` directory and copy the Oracle Service Bus archives to be deployed to the directory: + ``` + $ cd $WORKDIR/create-soa-domain/domain-home-on-pv/deploy-docker-file + $ mkdir osb + $ cp /path/simple_sbconfig.jar osb + ``` + d. Create the image using `build.sh`. This script creates the image with default tag 12.2.1.4 (`artifacts:12.2.1.4`): + ``` + $ cd $WORKDIR/create-soa-domain/domain-home-on-pv/deploy-docker-file + $ ./build.sh -h + Usage: build.sh -t [tag] + Builds a Docker Image with Oracle SOA/OSB artifacts + Parameters: + -h: view usage + -t: tag for image, default is 12.2.1.4 + ``` + + {{%expand "Click here to see sample output of script with tag 12.2.1.4-v1" %}} + ``` + $ ./build.sh -t 12.2.1.4-v1 + Sending build context to Docker daemon 36.35kB + Step 1/13 : FROM busybox + ---> 16ea53ea7c65 + Step 2/13 : ARG SOA_ARTIFACTS_ARCHIVE_PATH=/u01/sarchives + ---> Using cache + ---> 411edf07f267 + Step 3/13 : ARG OSB_ARTIFACTS_ARCHIVE_PATH=/u01/sbarchives + ---> Using cache + ---> c4214b9cf0ae + Step 4/13 : ARG USER=oracle + ---> Using cache + ---> c8ebcd5ee546 + Step 5/13 : ARG USERID=1000 + ---> Using cache + ---> 5780beb0c3cf + Step 6/13 : ARG GROUP=root + ---> Using cache + ---> 048e67c71f92 + Step 7/13 : ENV SOA_ARTIFACTS_ARCHIVE_PATH=${SOA_ARTIFACTS_ARCHIVE_PATH} + ---> Using cache + ---> 31ae33cfd9bb + Step 8/13 : ENV OSB_ARTIFACTS_ARCHIVE_PATH=${OSB_ARTIFACTS_ARCHIVE_PATH} + ---> Using cache + ---> 79602bf64dc0 + Step 9/13 : RUN adduser -D -u ${USERID} -G $GROUP $USER + ---> Using cache + ---> 07c12cea52f9 + Step 10/13 : COPY soa/ ${SOA_ARTIFACTS_ARCHIVE_PATH}/ + ---> bfeb138516d8 + Step 11/13 : COPY osb/ ${OSB_ARTIFACTS_ARCHIVE_PATH}/ + ---> 0359a11f8f76 + Step 12/13 : RUN chown -R $USER:$GROUP ${SOA_ARTIFACTS_ARCHIVE_PATH}/ ${OSB_ARTIFACTS_ARCHIVE_PATH}/ + ---> Running in 285fb2bd8434 + Removing intermediate container 285fb2bd8434 + ---> 2e8d8c337de0 + Step 13/13 : USER $USER + ---> Running in c9db494e46ab + Removing intermediate container c9db494e46ab + ---> 40295aa15317 + Successfully built 40295aa15317 + Successfully tagged artifacts:12.2.1.4-v1 + INFO: Artifacts image for Oracle SOA suite + is ready to be extended. + --> artifacts:12.2.1.4-v1 + INFO: Build completed in 4 seconds. + ``` + {{% /expand %}} + +1. Update the image details in `deploy-artifacts-inputs.yaml` for parameter `artifactsImage` and invoke `deploy-artifacts.sh` to perform deployment of artifacts. + + {{%expand "Click here to see sample output of deployment for domainType of soaosb" %}} + ``` + $ ./deploy-artifacts.sh -i deploy-artifacts-inputs.yaml -o out-deploy + Input parameters being used + export version="deploy-artifacts-inputs-v1" + export adminPort="7001" + export adminServerName="AdminServer" + export domainUID="soainfra" + export domainType="soaosb" + export soaClusterName="soa_cluster" + export soaManagedServerPort="8001" + export image="soasuite:12.2.1.4" + export imagePullPolicy="IfNotPresent" + export weblogicCredentialsSecretName="soainfra-domain-credentials" + export namespace="soans" + export artifactsSourceType="Image" + export artifactsImage="artifacts:12.2.1.4-v1" + export artifactsImagePullPolicy="IfNotPresent" + export deployScriptsMountPath="/u01/weblogic" + export deployScriptName="deploy.sh" + export deployScriptFilesDir="deploy" + export soaArtifactsArchivePath="/u01/sarchives" + export osbArtifactsArchivePath="/u01/sbarchives" + + Generating out-deploy/deploy-artifacts/soainfra/20211022-152335/deploy-artifacts-job.yaml + Checking to see if the secret soainfra-domain-credentials exists in namespace soans + configmap/soainfra-deploy-scripts-soa-job-cm created + Checking the configmap soainfra-deploy-scripts-soa-job-cm was created + configmap/soainfra-deploy-scripts-soa-job-cm labeled + configmap/soainfra-deploy-scripts-osb-job-cm created + Checking the configmap soainfra-deploy-scripts-osb-job-cm was created + configmap/soainfra-deploy-scripts-osb-job-cm labeled + Checking if object type job with name soainfra-deploy-artifacts-job-20211022-152335 exists + Deploying artifacts by creating the job out-deploy/deploy-artifacts/soainfra/20211022-152335/deploy-artifacts-job.yaml + job.batch/soainfra-deploy-artifacts-job-20211022-152335 created + Waiting for the job to complete... + status on iteration 1 of 20 for soainfra + pod soainfra-deploy-artifacts-job-20211022-152335-r7ffj status is NotReady + status on iteration 2 of 20 for soainfra + pod soainfra-deploy-artifacts-job-20211022-152335-r7ffj status is Completed + configmap "soainfra-deploy-scripts-soa-job-cm" deleted + configmap "soainfra-deploy-scripts-osb-job-cm" deleted + The following files were generated: + out-deploy/deploy-artifacts/soainfra/20211022-152335/deploy-artifacts-inputs.yaml + out-deploy/deploy-artifacts/soainfra/20211022-152335/deploy-artifacts-job.yaml + + + Completed + + $ kubectl get all -n soans|grep deploy + pod/soainfra-deploy-artifacts-job-20211022-152335-r7ffj 0/2 Completed 0 15m + job.batch/soainfra-deploy-artifacts-job-20211022-152335 1/1 43s 15m + $ + ``` + {{% /expand %}} + + > Note: When you are running the script for domainType `soaosb`, a deployment pod is created with two containers, one for Oracle SOA Suite artifacts deployments and another for Oracle Service Bus artifacts deployments. When the deployment completes for one container while other container is still running, the pod status will move from `Ready` to `NotReady`. Once both the deployments complete successfully, the status of the pod moves to `Completed`. + +##### Deploy artifacts from a persistent volume + +1. Copy the artifacts for Oracle SOA Suite to the `soa` directory and Oracle Service Bus to the `osb` directory at the share location. + For example, with location `/share`, artifacts for Oracle SOA Suite are in `/share/soa` and Oracle Service Bus are in `/share/osb`. + ``` + $ ls /share/soa + sca_sampleBPEL.jar + $ + $ ls /share/osb/ + simple_sbconfig.jar + $ + ``` + +1. Create a `PersistentVolume` with the sample provided (`artifacts-pv.yaml`): + ``` + apiVersion: v1 + kind: PersistentVolume + metadata: + name: soainfra-deploy-artifacts-pv + spec: + storageClassName: deploy-storage-class + capacity: + storage: 10Gi + accessModes: + - ReadOnlyMany + persistentVolumeReclaimPolicy: Retain + hostPath: + path: "/share" + ``` + + ``` + $ kubectl apply -f artifacts-pv.yaml + ``` + +1. Create a `PersistentVolumeClaim` with the sample provided (`artifacts-pvc.yaml`): + ``` + apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: soainfra-deploy-artifacts-pvc + namespace: soans + spec: + storageClassName: deploy-storage-class + accessModes: + - ReadOnlyMany + resources: + requests: + storage: 10Gi + ``` + + ``` + $ kubectl apply -f artifacts-pvc.yaml + ``` + +1. Update the `artifactsSourceType` to `PersistentVolume` and provide the name for `persistentVolumeClaimName` in `deploy-artifacts-inputs.yaml`. + +1. Invoke `deploy-artifacts.sh` to deploy artifacts for artifacts present in `persistentVolumeClaimName`. + + {{%expand "Click here to see sample output of deployment for domainType of soaosb" %}} + ``` + $ ./deploy-artifacts.sh -i deploy-artifacts-inputs.yaml -o out-deploy + Input parameters being used + export version="deploy-artifacts-inputs-v1" + export adminPort="7001" + export adminServerName="AdminServer" + export domainUID="soainfra" + export domainType="soaosb" + export soaClusterName="soa_cluster" + export soaManagedServerPort="8001" + export image="soasuite:12.2.1.4" + export imagePullPolicy="IfNotPresent" + export weblogicCredentialsSecretName="soainfra-domain-credentials" + export namespace="soans" + export artifactsSourceType="PersistentVolume" + export persistentVolumeClaimName="soainfra-deploy-artifacts-pvc" + export deployScriptsMountPath="/u01/weblogic" + export deployScriptName="deploy.sh" + export deployScriptFilesDir="deploy" + export soaArtifactsArchivePath="/u01/sarchives" + export osbArtifactsArchivePath="/u01/sbarchives" + + Generating out-deploy/deploy-artifacts/soainfra/20211022-164735/deploy-artifacts-job.yaml + Checking to see if the secret soainfra-domain-credentials exists in namespace soans + configmap/soainfra-deploy-scripts-soa-job-cm created + Checking the configmap soainfra-deploy-scripts-soa-job-cm was created + configmap/soainfra-deploy-scripts-soa-job-cm labeled + configmap/soainfra-deploy-scripts-osb-job-cm created + Checking the configmap soainfra-deploy-scripts-osb-job-cm was created + configmap/soainfra-deploy-scripts-osb-job-cm labeled + Checking if object type job with name soainfra-deploy-artifacts-job-20211022-164735 exists + Deploying artifacts by creating the job out-deploy/deploy-artifacts/soainfra/20211022-164735/deploy-artifacts-job.yaml + job.batch/soainfra-deploy-artifacts-job-20211022-164735 created + Waiting for the job to complete... + status on iteration 1 of 20 for soainfra + pod soainfra-deploy-artifacts-job-20211022-164735-66fvn status is NotReady + status on iteration 2 of 20 for soainfra + pod soainfra-deploy-artifacts-job-20211022-164735-66fvn status is Completed + configmap "soainfra-deploy-scripts-soa-job-cm" deleted + configmap "soainfra-deploy-scripts-osb-job-cm" deleted + The following files were generated: + out-deploy/deploy-artifacts/soainfra/20211022-164735/deploy-artifacts-inputs.yaml + out-deploy/deploy-artifacts/soainfra/20211022-164735/deploy-artifacts-job.yaml + + + Completed + + $ kubectl get all -n soans |grep deploy + pod/soainfra-deploy-artifacts-job-20211022-164735-66fvn 0/2 Completed 0 3m1s + job.batch/soainfra-deploy-artifacts-job-20211022-164735 1/1 37s 3m1s + $ + ``` + {{% /expand %}} + > Note: When you are running the script for domainType of `soaosb`, a deployment pod is created with two containers, one for Oracle SOA Suite artifacts deployments and one for Oracle Service Bus artifacts deployments. When the deployment completes for one container while other container is still running, the pod status moves from `Ready` to `NotReady`. Once both the deployments successfully complete, the status of the pod moves to `Completed`. + + +#### Verify the deployment logs + +To confirm the deployment of artifacts was successful, verify the output using the `kubectl logs` command: + +> Note: Replace ``, `` and `` with values for your environment. + +For Oracle SOA Suite artifacts: + +``` +$ kubectl logs job.batch/-deploy-artifacts-job- -n soa-deploy-artifacts-job +``` + +For Oracle Service Bus artifacts: + +``` +$ kubectl logs job.batch/-deploy-artifacts-job- -n osb-deploy-artifacts-job +``` diff --git a/docs-source/content/soa-domains/adminguide/deploying-composites/supportJDEV.md b/docs-source/content/soa-domains/adminguide/deploying-composites/supportJDEV.md index 095c1a58a..dd17d486e 100644 --- a/docs-source/content/soa-domains/adminguide/deploying-composites/supportJDEV.md +++ b/docs-source/content/soa-domains/adminguide/deploying-composites/supportJDEV.md @@ -29,7 +29,7 @@ If you miss enabling `exposeAdminT3Channel` during domain creation, follow [Expo ``` For example: ``` - $ kubectl get service soainfra-adminserver-external -n soana-o jsonpath='{.spec.ports[0].nodePort}' + $ kubectl get service soainfra-adminserver-external -n soans -o jsonpath='{.spec.ports[0].nodePort}' ``` 1. Oracle SOA Suite in the WebLogic Kubernetes Operator environment is deployed in a *Reference Configuration domain*. If a SOA project is developed in Classic mode JDeveloper displays a Mismatch notification in the Deploy Composite Wizard. By default, JDeveloper is in Classic mode. To develop SOA projects in Reference Configuration mode, you must manually enable this feature in JDeveloper: diff --git a/docs-source/content/soa-domains/adminguide/enable-additional-url-access.md b/docs-source/content/soa-domains/adminguide/enable-additional-url-access.md index 866cf85a1..97b068908 100644 --- a/docs-source/content/soa-domains/adminguide/enable-additional-url-access.md +++ b/docs-source/content/soa-domains/adminguide/enable-additional-url-access.md @@ -15,7 +15,8 @@ To extend an existing ingress with additional application URL access: 1. Update the template YAML file at `${WORKDIR}/charts/ingress-per-domain/templates/` to define additional path rules. - For example, to extend an existing NGINX-based ingress with additional paths `/path1` and `/path2` of an Oracle SOA Suite cluster, update `nginx-ingress.yaml` with additional paths: + For example, to extend an existing NGINX-based ingress with additional paths `/path1` and `/path2` of an Oracle SOA Suite cluster, update `nginx-ingress.yaml` (for the supported Kubernetes versions up to 1.18.x) with additional paths: + > Note: For Kubernetes versions, 1.19+, you need to update the `nginx-ingress-k8s1.19.yaml` file. ``` # Copyright (c) 2020, 2021, Oracle and/or its affiliates. diff --git a/docs-source/content/soa-domains/adminguide/enablingT3.md b/docs-source/content/soa-domains/adminguide/enablingT3.md index 3fe46e9ad..e19aff5bf 100644 --- a/docs-source/content/soa-domains/adminguide/enablingT3.md +++ b/docs-source/content/soa-domains/adminguide/enablingT3.md @@ -141,7 +141,7 @@ To create a custom T3/T3S channel for the Administration Server that has a liste $ /u01/oracle/oracle_common/common/bin/wlst.sh $ connect('weblogic','Welcome1','t3://soainfra-adminserver:7001') $ svc = getOpssService(name='KeyStoreService') - $ svc.exportKeyStoreCertificate(appStripe='system', name='demoidentity', password='DemoIdentityKeyStorePassPhrase', alias='DemoIdetityKeyStorePassPhrase', type='Certificate', filepath='/tmp/cert.txt/') + $ svc.exportKeyStoreCertificate(appStripe='system', name='demoidentity', password='DemoIdentityKeyStorePassPhrase', alias='DemoIdentity', type='Certificate', filepath='/tmp/cert.txt/') ``` These steps download the certificate at `/tmp/cert.txt`. @@ -287,7 +287,7 @@ To create a custom T3/T3S channel for all Managed Servers, with a listen port ** $ /u01/oracle/oracle_common/common/bin/wlst.sh $ connect('weblogic','Welcome1','t3://soainfra-adminserver:7001') $ svc = getOpssService(name='KeyStoreService') - $ svc.exportKeyStoreCertificate(appStripe='system', name='demoidentity', password='DemoIdentityKeyStorePassPhrase', alias='DemoIdetityKeyStorePassPhrase', type='Certificate', filepath='/tmp/cert.txt/') + $ svc.exportKeyStoreCertificate(appStripe='system', name='demoidentity', password='DemoIdentityKeyStorePassPhrase', alias='DemoIdentity', type='Certificate', filepath='/tmp/cert.txt/') ``` The above steps download the certificate at `/tmp/cert.txt`. diff --git a/docs-source/content/soa-domains/adminguide/monitoring-soa-domains.md b/docs-source/content/soa-domains/adminguide/monitoring-soa-domains.md index d36406ad2..95582b0e8 100644 --- a/docs-source/content/soa-domains/adminguide/monitoring-soa-domains.md +++ b/docs-source/content/soa-domains/adminguide/monitoring-soa-domains.md @@ -59,7 +59,7 @@ Refer to the compatibility matrix of [Kube Prometheus](https://github.com/coreos * `32102` is the external port for Alertmanager #### Set up monitoring -Follow these [steps](https://github.com/oracle/fmw-kubernetes/blob/v21.3.2/OracleSOASuite/kubernetes/monitoring-service/README.md) to set up monitoring for an Oracle SOA Suite instance. For more details on WebLogic Monitoring Exporter, see [here](https://github.com/oracle/weblogic-monitoring-exporter). +Follow these [steps](https://github.com/oracle/fmw-kubernetes/blob/v21.4.2/OracleSOASuite/kubernetes/monitoring-service/README.md) to set up monitoring for an Oracle SOA Suite instance. For more details on WebLogic Monitoring Exporter, see [here](https://github.com/oracle/weblogic-monitoring-exporter). ### Publish WebLogic Server logs into Elasticsearch diff --git a/docs-source/content/soa-domains/adminguide/performing-wlst-operations.md b/docs-source/content/soa-domains/adminguide/performing-wlst-operations.md index 362d0e5d4..227d54b97 100644 --- a/docs-source/content/soa-domains/adminguide/performing-wlst-operations.md +++ b/docs-source/content/soa-domains/adminguide/performing-wlst-operations.md @@ -46,7 +46,7 @@ Before creating a Kubernetes helper pod, make sure that the Oracle SOA Suite Doc 1. Create a helper pod. - For Kubernetes 1.18.10+, and 1.19.7+: + For Kubernetes 1.18.10+, 1.19.7+, and 1.20.6+: ``` $ kubectl run helper \ --image \ diff --git a/docs-source/content/soa-domains/appendix/quickstart-deployment-on-prem.md b/docs-source/content/soa-domains/appendix/quickstart-deployment-on-prem.md index 57a3c8cb1..09d8f4215 100644 --- a/docs-source/content/soa-domains/appendix/quickstart-deployment-on-prem.md +++ b/docs-source/content/soa-domains/appendix/quickstart-deployment-on-prem.md @@ -15,7 +15,7 @@ refer to the [Install Guide]({{< relref "/soa-domains/installguide/_index.md" >} The Linux kernel supported for deploying and running Oracle SOA Suite domains with the operator is Oracle Linux 7 (UL6+) and Red Hat Enterprise Linux 7 (UL3+ only with standalone Kubernetes). Refer to the [prerequisites]({{< relref "/soa-domains/installguide/prerequisites/_index.md" >}}) for more details. -For this exercise, the minimum hardware requirements to create a single-node Kubernetes cluster and then deploy the `soaosb` (SOA, Oracle Service Bus, and Enterprise Scheduler (ESS)) domain type with one Managed Server for SOA and one for the Oracle Service Bus cluster, along with Oracle Database running as a container are: +For this exercise, the minimum hardware requirements to create a single-node Kubernetes cluster and then deploy the `soaosb` (Oracle SOA Suite, Oracle Service Bus, and Enterprise Scheduler (ESS)) domain type with one Managed Server for Oracle SOA Suite and one for the Oracle Service Bus cluster, along with Oracle Database running as a container are: Hardware|Size --|-- @@ -26,7 +26,7 @@ For this exercise, the minimum hardware requirements to create a single-node Kub See [here]({{< relref "/soa-domains/appendix/soa-cluster-sizing-info.md" >}}) for resource sizing information for Oracle SOA Suite domains set up on a Kubernetes cluster. ### Set up Oracle SOA Suite in an on-premise environment -Use the steps in this topic to create a single-instance on-premise Kubernetes cluster and then create an Oracle SOA Suite `soaosb` domain type, which deploys a domain with SOA, Oracle Service Bus, and Oracle Enterprise Scheduler (ESS). +Use the steps in this topic to create a single-instance on-premise Kubernetes cluster and then create an Oracle SOA Suite `soaosb` domain type, which deploys a domain with Oracle SOA Suite, Oracle Service Bus, and Oracle Enterprise Scheduler (ESS). * [Step 1 - Prepare a virtual machine for the Kubernetes cluster](#1-prepare-a-virtual-machine-for-the-kubernetes-cluster) * [Step 2 - Set up a single instance Kubernetes cluster](#2-set-up-a-single-instance-kubernetes-cluster) @@ -442,11 +442,11 @@ Follow [these steps]({{< relref "/soa-domains/installguide/prepare-your-environm #### 3.2 Get required Docker images and add them to your local registry -1. Pull the operator image: +1. Pull the WebLogic Kubernetes Operator image: ```shell - $ docker pull ghcr.io/oracle/weblogic-kubernetes-operator:3.2.1 - $ docker tag ghcr.io/oracle/weblogic-kubernetes-operator:3.2.1 oracle/weblogic-kubernetes-operator:3.2.1 + $ docker pull ghcr.io/oracle/weblogic-kubernetes-operator:3.3.0 + $ docker tag ghcr.io/oracle/weblogic-kubernetes-operator:3.3.0 oracle/weblogic-kubernetes-operator:3.3.0 ``` 1. Obtain the Oracle Database image and Oracle SOA Suite Docker image from the [Oracle Container Registry](https://container-registry.oracle.com): @@ -494,7 +494,7 @@ Use Helm to install and start the operator from the directory you just cloned: $ cd ${WORKDIR} $ helm install weblogic-kubernetes-operator charts/weblogic-operator \ --namespace opns \ - --set image=oracle/weblogic-kubernetes-operator:3.2.1 \ + --set image=oracle/weblogic-kubernetes-operator:3.3.0 \ --set serviceAccount=op-sa \ --set "domainNamespaces={}" \ --wait @@ -511,11 +511,11 @@ Use Helm to install and start the operator from the directory you just cloned: $ kubectl logs -n opns -c weblogic-operator deployments/weblogic-operator ``` -The WebLogic Kubernetes Operator v3.2.1 has been installed. Continue with the load balancer and Oracle SOA Suite domain setup. +The WebLogic Kubernetes Operator v3.3.0 has been installed. Continue with the load balancer and Oracle SOA Suite domain setup. ### 5. Install the Traefik (ingress-based) load balancer -The WebLogic Kubernetes Operator supports three load balancers: Traefik, Voyager, and Apache. Samples are provided in the documentation. +The WebLogic Kubernetes Operator supports these load balancers: Traefik, NGINX, and Apache. Samples are provided in the documentation. This Quick Start demonstrates how to install the Traefik ingress controller to provide load balancing for an Oracle SOA Suite domain. @@ -667,6 +667,7 @@ This Quick Start demonstrates how to install the Traefik ingress controller to p * `-q ` * `-r ` * `-c ` + * `-l ` For example: diff --git a/docs-source/content/soa-domains/cleanup-domain-setup.md b/docs-source/content/soa-domains/cleanup-domain-setup.md index 2f561cff2..4f5d99460 100644 --- a/docs-source/content/soa-domains/cleanup-domain-setup.md +++ b/docs-source/content/soa-domains/cleanup-domain-setup.md @@ -134,5 +134,5 @@ To remove the domain home that is generated using the `create-domain.sh` script, For example, for the domain's persistent volume of type `host_path`: ``` -$ rm -rf /scratch/k8s_dir/SOA +$ rm -rf /scratch/k8s_dir/SOA/* ``` diff --git a/docs-source/content/soa-domains/create-or-update-image/_index.md b/docs-source/content/soa-domains/create-or-update-image/_index.md index 51819c7d9..7980fe963 100644 --- a/docs-source/content/soa-domains/create-or-update-image/_index.md +++ b/docs-source/content/soa-domains/create-or-update-image/_index.md @@ -121,10 +121,10 @@ After [setting up the WebLogic Image Tool]({{< relref "/soa-domains/create-or-up You must download the required Oracle SOA Suite installation binaries and patches as listed below from the [Oracle Software Delivery Cloud](https://edelivery.oracle.com/) and save them in a directory of your choice. In these steps, this directory is `download location`. -The installation binaries and patches required for release 21.3.2 are: +The installation binaries and patches required for release 21.4.2 are: * JDK: - * jdk-8u301-linux-x64-24315225.tar.gz + * jdk-8u311-linux-x64.tar.gz * Fusion Middleware Infrastructure installer: * fmw_12.2.1.4.0_infrastructure.jar @@ -139,24 +139,23 @@ In this release, Oracle B2B is not supported to be configured, but the installer {{% /notice %}} * Fusion Middleware Infrastructure patches: - * p28186730_139426_Generic.zip (OPATCH 13.9.4.2.6 FOR FMW/WLS 12.2.1.3.0, 12.2.1.4.0 AND 14.1.1.0.0) - * p33059296_122140_Generic.zip (WLS PATCH SET UPDATE 12.2.1.4.210629) + * p28186730_139427_Generic.zip (OPATCH 13.9.4.2.7 FOR EM 13.4, 13.5 AND FMW/WLS 12.2.1.3.0, 12.2.1.4.0 AND 14.1.1.0.0) + * p33416868_122140_Generic.zip (WLS PATCH SET UPDATE 12.2.1.4.210930) * p32880070_122140_Generic.zip (FMW COMMON THIRDPARTY SPU 12.2.1.4.0 FOR APRIL2021CPU) * p32784652_122140_Generic.zip (OPSS BUNDLE PATCH 12.2.1.4.210418) * p32905339_122140_Generic.zip (OWSM BUNDLE PATCH 12.2.1.4.210520) - * p33084721_122140_Generic.zip (ADF BUNDLE PATCH 12.2.1.4.210706) - * p32973297_122140_Generic.zip (Coherence 12.2.1.4 Cumulative Patch 10 (12.2.1.4.10)) + * p33313802_122140_Generic.zip (ADF BUNDLE PATCH 12.2.1.4.210903) + * p33286160_122140_Generic.zip (Coherence 12.2.1.4 Cumulative Patch 11 (12.2.1.4.11)) * p33093748_122140_Generic.zip (FMW PLATFORM 12.2.1.4.0 SPU FOR APRCPU2021) - * p31544353_122140_Linux-x86-64-23673193.zip (ADR FOR WEBLOGIC SERVER 12.2.1.4.0 JULY CPU 2020) - * p31287540_122140_Generic.zip (WLS One-off) - * p31918617_122140_Generic.zip (WLS One-off SSL certificate) + * p31544353_122140_Linux-x86-64.zip (ADR FOR WEBLOGIC SERVER 12.2.1.4.0 JULY CPU 2020) * Oracle SOA Suite and Oracle Service Bus patches - * p32957445_122140_Generic.zip (SOA BUNDLE PATCH 12.2.1.4.210602) + * p33408307_122140_Generic.zip (SOA BUNDLE PATCH 12.2.1.4.210928) * p32121987_122140_Generic.zip (Oracle Service Bus BUNDLE PATCH 12.2.1.4.201105) + * p33404495_122140_Generic.zip (SOA One-off) * p31857456_122140_Generic.zip (Oracle Service Bus One-off) * p30741105_122140_Generic.zip (Oracle Service Bus One-off) - * p31713053_122140_Linux-x86-64-23753213.zip (One-off patch) + * p31713053_122140_Linux-x86-64.zip (One-off patch) ##### Update required build files @@ -186,70 +185,69 @@ The following files in the code repository location `/ 1. Add a JDK package to the WebLogic Image Tool cache: ``` bash - $ imagetool cache addInstaller --type jdk --version 8u291 --path /jdk-8u291-linux-x64.tar.gz + $ imagetool cache addInstaller --type jdk --version 8u311 --path /jdk-8u311-linux-x64.tar.gz ``` 1. Add the downloaded installation binaries to the WebLogic Image Tool cache: ``` bash - $ imagetool cache addInstaller --type jdk --version 8u301 --path /jdk-8u301-linux-x64.tar.gz - $ imagetool cache addInstaller --type fmw --version 12.2.1.4.0 --path /fmw_12.2.1.4.0_infrastructure.jar - + $ imagetool cache addInstaller --type soa --version 12.2.1.4.0 --path /fmw_12.2.1.4.0_soa.jar - + $ imagetool cache addInstaller --type osb --version 12.2.1.4.0 --path /fmw_12.2.1.4.0_osb.jar - + $ imagetool cache addInstaller --type b2b --version 12.2.1.4.0 --path /fmw_12.2.1.4.0_b2bhealthcare.jar ``` 1. Add the downloaded OPatch patch to the WebLogic Image Tool cache: ``` bash - $ imagetool cache addEntry --key 28186730_13.9.4.2.6 --value /p28186730_139426_Generic.zip + $ imagetool cache addEntry --key 28186730_13.9.4.2.7 --value /p28186730_139427_Generic.zip ``` 1. Append the `--opatchBugNumber` flag and the OPatch patch key to the `create` command in the `buildArgs` file: ``` bash - --opatchBugNumber 28186730_13.9.4.2.6 + --opatchBugNumber 28186730_13.9.4.2.7 ``` 1. Add the downloaded product patches to the WebLogic Image Tool cache: ``` bash - $ imagetool cache addEntry --key 28186730_13.9.4.2.6 --value /p28186730_139426_Generic.zip - $ imagetool cache addEntry --key 30741105_12.2.1.4.0 --value /p30741105_122140_Generic.zip - - $ imagetool cache addEntry --key 31287540_12.2.1.4.0 --value /p31287540_122140_Generic.zip - - $ imagetool cache addEntry --key 31544353_12.2.1.4.0 --value /p31544353_122140_Linux-x86-64-23673193.zip - - $ imagetool cache addEntry --key 31713053_12.2.1.4.0 --value /p31713053_122140_Linux-x86-64-23753213.zip - + + $ imagetool cache addEntry --key 31544353_12.2.1.4.0 --value /p31544353_122140_Linux-x86-64.zip + + $ imagetool cache addEntry --key 31713053_12.2.1.4.0 --value /p31713053_122140_Linux-x86-64.zip + $ imagetool cache addEntry --key 31857456_12.2.1.4.0 --value /p31857456_122140_Generic.zip - - $ imagetool cache addEntry --key 31918617_12.2.1.4.0 --value /p31918617_122140_Generic.zip - + $ imagetool cache addEntry --key 32121987_12.2.1.4.0 --value /p32121987_122140_Generic.zip - + $ imagetool cache addEntry --key 32784652_12.2.1.4.0 --value /p32784652_122140_Generic.zip - + + $ imagetool cache addEntry --key 32808126_12.2.1.4.0 --value /p32808126_122140_Generic.zip + + $ imagetool cache addEntry --key 32827327_12.2.1.4.0 --value /p32827327_122140_Generic.zip + $ imagetool cache addEntry --key 32880070_12.2.1.4.0 --value /p32880070_122140_Generic.zip - + $ imagetool cache addEntry --key 32905339_12.2.1.4.0 --value /p32905339_122140_Generic.zip - - $ imagetool cache addEntry --key 32957445_12.2.1.4.0 --value /p32957445_122140_Generic.zip - - $ imagetool cache addEntry --key 32973297_12.2.1.4.0 --value /p32973297_122140_Generic.zip - - $ imagetool cache addEntry --key 33059296_12.2.1.4.0 --value /p33059296_122140_Generic.zip - - $ imagetool cache addEntry --key 33084721_12.2.1.4.0 --value /p33084721_122140_Generic.zip - + $ imagetool cache addEntry --key 33093748_12.2.1.4.0 --value /p33093748_122140_Generic.zip + $ imagetool cache addEntry --key 33286160_12.2.1.4.0 --value /p33286160_122140_Generic.zip + + $ imagetool cache addEntry --key 33313802_12.2.1.4.0 --value /p33313802_122140_Generic.zip + + $ imagetool cache addEntry --key 33404495_12.2.1.4.0 --value /p33404495_122140_Generic.zip + + $ imagetool cache addEntry --key 33408307_12.2.1.4.0 --value /p33408307_122140_Generic.zip + + $ imagetool cache addEntry --key 33416868_12.2.1.4.0 --value /p33416868_122140_Generic.zip + + ``` 1. Append the `--patches` flag and the product patch keys to the `create` command in the `buildArgs` file. The `--patches` list must be a comma-separated collection of patch `--key` values used in the `imagetool cache addEntry` commands above. @@ -257,14 +255,14 @@ The following files in the code repository location `/ Sample `--patches` list for the product patches added in to the cache: ``` - --patches 30741105_12.2.1.4.0,31287540_12.2.1.4.0,31544353_12.2.1.4.0,31713053_12.2.1.4.0,31857456_12.2.1.4.0,31918617_12.2.1.4.0,32121987_12.2.1.4.0,32784652_12.2.1.4.0,32880070_12.2.1.4.0,32905339_12.2.1.4.0,32957445_12.2.1.4.0,32973297_12.2.1.4.0,33059296_12.2.1.4.0,33084721_12.2.1.4.0,33093748_12.2.1.4.0 + --patches 30741105_12.2.1.4.0,31544353_12.2.1.4.0,31713053_12.2.1.4.0,31857456_12.2.1.4.0,32121987_12.2.1.4.0,32784652_12.2.1.4.0,32808126_12.2.1.4.0,32827327_12.2.1.4.0,32880070_12.2.1.4.0,32905339_12.2.1.4.0,33093748_12.2.1.4.0,33286160_12.2.1.4.0,33313802_12.2.1.4.0,33404495_12.2.1.4.0,33408307_12.2.1.4.0,33416868_12.2.1.4.0 ``` Example `buildArgs` file after appending the OPatch patch and product patches: ``` create - --jdkVersion 8u291 + --jdkVersion 8u311 --type soa_osb_b2b --version 12.2.1.4.0 --tag oracle/soasuite:12.2.1.4.0 @@ -273,8 +271,7 @@ The following files in the code repository location `/ --additionalBuildCommands /docker-images/OracleSOASuite/imagetool/12.2.1.4.0/additionalBuildCmds.txt --additionalBuildFiles /docker-images/OracleSOASuite/dockerfiles/12.2.1.4/container-scripts --installerResponseFile /docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file,/docker-images/OracleSOASuite/dockerfiles/12.2.1.4/install/soasuite.response,/docker-images/OracleSOASuite/dockerfiles/12.2.1.4/install/osb.response,/docker-images/OracleSOASuite/dockerfiles/12.2.1.4/install/b2b.response - --patches 30741105_12.2.1.4.0,31287540_12.2.1.4.0,31544353_12.2.1.4.0,31713053_12.2.1.4.0,31857456_12.2.1.4.0,31918617_12.2.1.4.0,32121987_12.2.1.4.0,32784652_12.2.1.4.0,32880070_12.2.1.4.0,32905339_12.2.1.4.0,32957445_12.2.1.4.0,32973297_12.2.1.4.0,33059296_12.2.1.4.0,33084721_12.2.1.4.0,33093748_12.2.1.4.0 - --opatchBugNumber 28186730_13.9.4.2.6 + --patches 30741105_12.2.1.4.0,31544353_12.2.1.4.0,31713053_12.2.1.4.0,31857456_12.2.1.4.0,32121987_12.2.1.4.0,32784652_12.2.1.4.0,32808126_12.2.1.4.0,32827327_12.2.1.4.0,32880070_12.2.1.4.0,32905339_12.2.1.4.0,33093748_12.2.1.4.0,33286160_12.2.1.4.0,33313802_12.2.1.4.0,33404495_12.2.1.4.0,33408307_12.2.1.4.0,33416868_12.2.1.4.0 ``` >Note: In the `buildArgs` file: > * `--jdkVersion` value must match the `--version` value used in the `imagetool cache addInstaller` command for `--type jdk`. @@ -313,7 +310,7 @@ The following files in the code repository location `/ # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. # # Ensure necessary OS packages are installed - RUN yum -y --downloaddir=/tmp/imagetool install gzip tar unzip libaio jq findutils diffutils hostname ant \ + RUN yum -y --downloaddir=/tmp/imagetool install gzip tar unzip libaio jq findutils diffutils hostname \ && yum -y --downloaddir=/tmp/imagetool clean all \ && rm -rf /var/cache/yum/* \ && rm -rf /tmp/imagetool @@ -453,7 +450,7 @@ The following files in the code repository location `/ SCRIPT_FILE=/u01/oracle/container-scripts/* \ HEALTH_SCRIPT_FILE=/u01/oracle/container-scripts/get_healthcheck_url.sh \ JAVA_OPTIONS="-Doracle.jdbc.fanEnabled=false -Dweblogic.StdoutDebugEnabled=false" \ - PATH=$PATH:/usr/java/default/bin:/u01/oracle/oracle_common/common/bin:/u01/oracle/wlserver/common/bin:/u01/oracle/container-scripts + PATH=$PATH:/u01/oracle/container-scripts:/u01/oracle/oracle_common/modules/thirdparty/org.apache.ant/1.10.5.0.0/apache-ant-1.10.5/bin USER root RUN mkdir -p $VOLUME_DIR && chown oracle:root /u01 $VOLUME_DIR && \ @@ -490,7 +487,7 @@ After [setting up the WebLogic Image Tool]({{< relref "/soa-domains/create-or-up 1. Enter the following command to add the OPatch patch to the WebLogic Image Tool cache: ```bash - $ imagetool cache addEntry --key 28186730_13.9.4.2.5 --value /p28186730_139425_Generic.zip + $ imagetool cache addEntry --key 28186730_13.9.4.2.7 --value /p28186730_139427_Generic.zip ``` 1. Execute the `imagetool cache addEntry` command for each patch to add the required patch(es) to the WebLogic Image Tool cache. For example, to add patch `p30761841_122140_Generic.zip`: diff --git a/docs-source/content/soa-domains/installguide/create-soa-domains/_index.md b/docs-source/content/soa-domains/installguide/create-soa-domains/_index.md index d74168fb0..032d96f7a 100644 --- a/docs-source/content/soa-domains/installguide/create-soa-domains/_index.md +++ b/docs-source/content/soa-domains/installguide/create-soa-domains/_index.md @@ -15,7 +15,7 @@ Before you begin, complete the following steps: 1. Review the [Domain resource](https://oracle.github.io/weblogic-kubernetes-operator/userguide/managing-domains/domain-resource) documentation. 1. Review the [requirements and limitations]({{< relref "/soa-domains/installguide/prerequisites" >}}). 1. Ensure that you have executed all the preliminary steps in [Prepare your environment]({{< relref "/soa-domains/installguide/prepare-your-environment" >}}). -1. Ensure that the database and the WebLogic Server Kubernetes Operator are running. +1. Ensure that the database and the WebLogic Kubernetes Operator are running. #### Prepare to use the create domain script @@ -77,14 +77,13 @@ The following parameters can be provided in the inputs file. | `persistentStore` | The persistent store for 'JMS servers' and 'Transaction log store' in the domain. Valid values are `jdbc`, `file`. | `jdbc` | Note that the names of the Kubernetes resources in the generated YAML files may be formed with the -value of some of the properties specified in the `create-inputs.yaml` file. Those properties include -the `adminServerName`, `clusterName`, and `managedServerNameBase`. If those values contain any +value of some of the properties specified in the `create-domain-inputs.yaml` file. Those properties include +the `adminServerName`, `soaClusterName`, and `soaManagedServerNameBase` etc. If those values contain any characters that are invalid in a Kubernetes service name, those characters are converted to valid values in the generated YAML files. For example, an uppercase letter is converted to a lowercase letter and an underscore `("_")` is converted to a hyphen `("-")`. -The sample demonstrates how to create an Oracle SOA Suite domain home and associated Kubernetes resources for a domain -that has one cluster only. In addition, the sample provides the capability for users to supply their own scripts +The sample demonstrates how to create an Oracle SOA Suite domain home and associated Kubernetes resources for the domain. In addition, the sample provides the capability for users to supply their own scripts to create the domain home for other use cases. The generated domain YAML file could also be modified to cover more use cases. #### Run the create domain script @@ -125,11 +124,15 @@ The default domain created by the script has the following characteristics: * A configured cluster named `soa_cluster` of size 5. * Two Managed Servers, named `soa_server1` and `soa_server2`, listening on port `8001`. * Log files that are located in `/shared/logs/`. -* SOA Infra, SOA composer and WorklistApp applications deployed. +* SOA Infra, SOA Composer, and WorklistApp applications deployed. + +{{% notice note %}} +Refer to the [troubleshooting]({{< relref "/soa-domains/troubleshooting/" >}}) page to troubleshoot issues during the domain creation. +{{% /notice %}} #### Verify the results -The create domain script will verify that the domain was created, and will report failure if there was any error. +The create domain script verifies that the domain was created, and reports failure if there is an error. However, it may be desirable to manually verify the domain, even if just to gain familiarity with the various Kubernetes objects that were created by the script. diff --git a/docs-source/content/soa-domains/installguide/prepare-your-environment/_index.md b/docs-source/content/soa-domains/installguide/prepare-your-environment/_index.md index 9b15532f7..7fc0d4555 100644 --- a/docs-source/content/soa-domains/installguide/prepare-your-environment/_index.md +++ b/docs-source/content/soa-domains/installguide/prepare-your-environment/_index.md @@ -6,7 +6,11 @@ pre = " " description = "Prepare for creating Oracle SOA Suite domains, including required secrets creation, persistent volume and volume claim creation, database creation, and database schema creation." +++ -To prepare your Oracle SOA Suite in Kubernetes environment, complete the following steps: +To prepare your Oracle SOA Suite in Kubernetes environment, complete the following steps. + +{{% notice note %}} +Refer to the [troubleshooting]({{< relref "/soa-domains/troubleshooting/" >}}) page to troubleshoot issues during the domain deployment process. +{{% /notice %}} 1. [Set up your Kubernetes cluster](#set-up-your-kubernetes-cluster) 1. [Install Helm](#install-helm) @@ -49,15 +53,15 @@ Obtain dependent images and add them to your local registry. Log in to the Oracle Container Registry (`container-registry.oracle.com`) from your Docker client: - ```bash + ```shell $ docker login container-registry.oracle.com ``` 1. Pull the operator image: ```bash - $ docker pull ghcr.io/oracle/weblogic-kubernetes-operator:3.2.1 - $ docker tag ghcr.io/oracle/weblogic-kubernetes-operator:3.2.1 oracle/weblogic-kubernetes-operator:3.2.1 + $ docker pull ghcr.io/oracle/weblogic-kubernetes-operator:3.3.0 + $ docker tag ghcr.io/oracle/weblogic-kubernetes-operator:3.3.0 oracle/weblogic-kubernetes-operator:3.3.0 ``` ### Set up the code repository to deploy Oracle SOA Suite domains @@ -66,36 +70,36 @@ Oracle SOA Suite domain deployment on Kubernetes leverages the WebLogic Kubernet 1. Create a working directory to set up the source code: ```bash - $ mkdir $HOME/soa_21.3.2 - $ cd $HOME/soa_21.3.2 + $ mkdir $HOME/soa_21.4.2 + $ cd $HOME/soa_21.4.2 ``` 1. Download the WebLogic Kubernetes Operator source code and Oracle SOA Suite Kubernetes deployment scripts from the SOA [repository](https://github.com/oracle/fmw-kubernetes.git). Required artifacts are available at `OracleSOASuite/kubernetes`. ``` bash - $ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/21.3.2 - $ export WORKDIR=$HOME/soa_21.3.2/OracleSOASuite/kubernetes + $ git clone https://github.com/oracle/fmw-kubernetes.git + $ export WORKDIR=$HOME/soa_21.4.2/OracleSOASuite/kubernetes ``` ### Obtain the Oracle SOA Suite Docker image The Oracle SOA Suite image with latest bundle patch and required interim patches can be obtained from My Oracle Support (MOS). This is the only image supported for production deployments. Follow the below steps to download the Oracle SOA Suite image from My Oracle Support. -1. Download patch [33125465](https://support.oracle.com/epmos/faces/ui/patch/PatchDetail.jspx?patchId=33125465) from My Oracle Support (MOS). +1. Download patch [33467899](https://support.oracle.com/epmos/faces/ui/patch/PatchDetail.jspx?patchId=33467899) from My Oracle Support (MOS). 1. Unzip the downloaded patch zip file. 1. Load the image archive using the `docker load` command. For example: ```bash - $ docker load < soasuite-12.2.1.4.0-8-ol7-210726.1017.tar - Loaded image: oracle/soasuite:12.2.1.4.0-8-ol7-210726.1017 + $ docker load < soasuite-12.2.1.4.0-8-ol7-211129.1734.tar + Loaded image: oracle/soasuite:12.2.1.4.0-8-ol7-211129.1734 $ ``` -1. Run the `docker inspect` command to verify that the downloaded image is the latest released image. The value of label `com.oracle.weblogic.imagetool.buildid` must match to `dae045ba-378d-4cdb-b010-85003db61cde`. +1. Run the `docker inspect` command to verify that the downloaded image is the latest released image. The value of label `com.oracle.weblogic.imagetool.buildid` must match to `2fd643ce-8ada-4841-9a0a-ed369cc08023`. For example: ```bash - $ docker inspect --format='{{ index .Config.Labels "com.oracle.weblogic.imagetool.buildid" }}' oracle/soasuite:12.2.1.4.0-8-ol7-210726.1017 - dae045ba-378d-4cdb-b010-85003db61cde + $ docker inspect --format='{{ index .Config.Labels "com.oracle.weblogic.imagetool.buildid" }}' oracle/soasuite:12.2.1.4.0-8-ol7-211129.1734 + 2fd643ce-8ada-4841-9a0a-ed369cc08023 $ ``` @@ -107,15 +111,15 @@ If you want to build and use an Oracle SOA Suite Docker image with any additiona ### Install the WebLogic Kubernetes Operator -The WebLogic Kubernetes Operator supports the deployment of Oracle SOA Suite domains in the Kubernetes environment. Follow the steps in [this document](https://github.com/oracle/weblogic-kubernetes-operator/blob/v3.2.1/documentation/3.2/content/quickstart/install.md#install-the-operator) to install the operator. -> Note: Optionally, you can execute these [steps](https://oracle.github.io/weblogic-kubernetes-operator/samples/simple/elastic-stack/operator/) to send the contents of the operator’s logs to Elasticsearch. +The WebLogic Kubernetes Operator supports the deployment of Oracle SOA Suite domains in the Kubernetes environment. Follow the steps in [this document](https://github.com/oracle/weblogic-kubernetes-operator/blob/v3.3.0/documentation/3.3/content/quickstart/install.md#install-the-operator) to install the operator. +> Note: Optionally, you can execute these [steps](https://oracle.github.io/weblogic-kubernetes-operator/samples/elastic-stack/operator/) to send the contents of the operator’s logs to Elasticsearch. -In the following example commands to install the WebLogic Kubernetes Operator, `opns` is the namespace and `op-sa` is the service account created for the operator: +In the following example commands to install the WebLogic Kubernetes Operator, `opns` is the namespace and `op-sa` is the service account created for the Operator: ``` $ kubectl create namespace opns $ kubectl create serviceaccount -n opns op-sa $ cd ${WORKDIR} - $ helm install weblogic-kubernetes-operator charts/weblogic-operator --namespace opns --set image=oracle/weblogic-kubernetes-operator:3.2.1 --set serviceAccount=op-sa --set "domainNamespaces={}" --set "javaLoggingLevel=FINE" --wait + $ helm install weblogic-kubernetes-operator charts/weblogic-operator --namespace opns --set image=oracle/weblogic-kubernetes-operator:3.3.0 --set serviceAccount=op-sa --set "domainNamespaces={}" --set "javaLoggingLevel=FINE" --wait ``` ### Prepare the environment for Oracle SOA Suite domains @@ -166,7 +170,7 @@ For details, see [Prepare to run a domain](https://oracle.github.io/weblogic-kub $ ./create-weblogic-credentials.sh -u weblogic -p Welcome1 -n soans -d soainfra -s soainfra-domain-credentials ``` - For more details, see [this document](https://github.com/oracle/weblogic-kubernetes-operator/blob/v3.2.1/kubernetes/samples/scripts/create-weblogic-domain-credentials/README.md). + For more details, see [this document](https://github.com/oracle/weblogic-kubernetes-operator/blob/v3.3.0/kubernetes/samples/scripts/create-weblogic-domain-credentials/README.md). You can check the secret with the `kubectl get secret` command. @@ -313,28 +317,30 @@ For example: $ cd ${WORKDIR}/create-rcu-schema $ ./create-rcu-schema.sh -h - usage: /create-rcu-schema.sh -s -t -d -i -u -p -n -q -r -o -c [-h] + usage: ./create-rcu-schema.sh -s -t -d -i -u -p -n -q -r -o -c [-l] [-h] -s RCU Schema Prefix (required) -t RCU Schema Type (optional) - (supported values: osb,soa,soaosb) + (supported values: osb,soa,soaosb) -d RCU Oracle Database URL (optional) - (default: oracle-db.default.svc.cluster.local:1521/devpdb.k8s) - -p FMW Infrastructure ImagePullSecret (optional) - (default: none) - -i FMW Infrastructure Image (optional) - (default: soasuite:12.2.1.4) - -u FMW Infrastructure ImagePullPolicy (optional) - (default: IfNotPresent) + (default: oracle-db.default.svc.cluster.local:1521/devpdb.k8s) + -p OracleSOASuite ImagePullSecret (optional) + (default: none) + -i OracleSOASuite Image (optional) + (default: soasuite:12.2.1.4) + -u OracleSOASuite ImagePullPolicy (optional) + (default: IfNotPresent) -n Namespace for RCU pod (optional) - (default: default) + (default: default) -q password for database SYSDBA user. (optional) - (default: Oradoc_db1) + (default: Oradoc_db1) -r password for all schema owner (regular user). (optional) - (default: Oradoc_db1) + (default: Oradoc_db1) -o Output directory for the generated YAML file. (optional) - (default: rcuoutput) + (default: rcuoutput) -c Comma-separated custom variables in the format variablename=value. (optional). - (default: none) + (default: none) + -l Timeout limit in seconds. (optional). + (default: 300) -h Help $ ./create-rcu-schema.sh \ diff --git a/docs-source/content/soa-domains/installguide/prerequisites/_index.md b/docs-source/content/soa-domains/installguide/prerequisites/_index.md index 6a1a17a5b..a05d45388 100644 --- a/docs-source/content/soa-domains/installguide/prerequisites/_index.md +++ b/docs-source/content/soa-domains/installguide/prerequisites/_index.md @@ -10,23 +10,24 @@ This section provides information about the system requirements and limitations ### System requirements for Oracle SOA Suite domains -For the current production release 21.3.2: +For the current production release 21.4.2: * Operating systems supported: - * Oracle Linux 7 (UL6+) + * Oracle Linux 7 (UL6+) * Red Hat Enterprise Linux 7 (UL3+ only with standalone Kubernetes) - * Oracle Linux Cloud Native Environment (OLCNE) version 1.1.2 -* Kubernetes 1.16.15+, 1.17.13+, 1.18.10+, and 1.19.7+ (check with `kubectl version`). -* Docker 18.09.1ce, 19.03.1 (check with `docker version`) or CRI-O 1.17.0 (check with `crictl version | grep RuntimeVersion`). + * Oracle Linux Cloud Native Environment (OLCNE) version 1.3. +* Kubernetes 1.16.15+, 1.17.13+, 1.18.10+, 1.19.7+, and 1.20.6+ (check with `kubectl version`). +* Docker 18.9.1 or 19.03.1+ (check with `docker version`) or CRI-O 1.20.2+ (check with `crictl version | grep RuntimeVersion`). * Flannel networking v0.9.1-amd64 or later (check with `docker images | grep flannel`), Calico networking v3.16.1 or later. * Helm 3.3.4+ (check with `helm version --client --short`). -* WebLogic Kubernetes Operator 3.2.1 (see [operator releases](https://github.com/oracle/weblogic-kubernetes-operator/releases/tag/v3.2.1) page). -* Oracle SOA Suite 12.2.1.4 Docker image downloaded from My Oracle Support (MOS patch [33125465](https://support.oracle.com/epmos/faces/ui/patch/PatchDetail.jspx?patchId=33125465)). This image contains the latest bundle patch and one-off patches for Oracle SOA Suite. +* WebLogic Kubernetes Operator 3.3.0 (see the [operator releases](https://github.com/oracle/weblogic-kubernetes-operator/releases/tag/v3.3.0) page). +* Oracle SOA Suite 12.2.1.4 Docker image downloaded from My Oracle Support (MOS patch [33467899](https://support.oracle.com/epmos/faces/ui/patch/PatchDetail.jspx?patchId=33467899)). This image contains the latest bundle patch and one-off patches for Oracle SOA Suite. * You must have the `cluster-admin` role to install the operator. The operator does not need the `cluster-admin` role at runtime. + For more information, see the role-based access control (RBAC) [documentation](https://oracle.github.io/weblogic-kubernetes-operator/security/rbac/). * We do not currently support running SOA in non-Linux containers. * Additionally, see the Oracle SOA Suite [documentation](https://docs.oracle.com/en/middleware/fusion-middleware/12.2.1.4/insoa/preparing-install-and-configure-product.html#GUID-E2D4D481-BE80-4600-8078-FD9C03A30210) for other requirements such as database version. -See [here]({{< relref "/soa-domains/appendix/soa-cluster-sizing-info.md" >}}) for resourse sizing information for Oracle SOA Suite domains setup on Kubernetes cluster. +See [here]({{< relref "/soa-domains/appendix/soa-cluster-sizing-info.md" >}}) for resource sizing information for Oracle SOA Suite domains set up on a Kubernetes cluster. ### Limitations diff --git a/docs-source/content/soa-domains/patch_and_upgrade/patch-an-image/index.md b/docs-source/content/soa-domains/patch_and_upgrade/patch-an-image/index.md index e43eb9738..d90270b28 100644 --- a/docs-source/content/soa-domains/patch_and_upgrade/patch-an-image/index.md +++ b/docs-source/content/soa-domains/patch_and_upgrade/patch-an-image/index.md @@ -43,7 +43,7 @@ Before applying the patch, stop all servers in the domain: #### Update user permissions of the domain PV storage -The Oracle SOA Suite image for release 21.3.2 has an oracle user with UID 1000, with the default group set to `root`. Before applying the patched image, update the user permissions of the domain persistent volume (PV) to set the group to `root`: +The Oracle SOA Suite image for release 21.4.2 has an oracle user with UID 1000, with the default group set to `root`. Before applying the patched image, update the user permissions of the domain persistent volume (PV) to set the group to `root`: ``` $ sudo chown -R 1000:0 /scratch/k8s_dir/SOA diff --git a/docs-source/content/soa-domains/patch_and_upgrade/upgrade-k8s-cluster.md b/docs-source/content/soa-domains/patch_and_upgrade/upgrade-k8s-cluster.md index d9a247cf1..89421becf 100644 --- a/docs-source/content/soa-domains/patch_and_upgrade/upgrade-k8s-cluster.md +++ b/docs-source/content/soa-domains/patch_and_upgrade/upgrade-k8s-cluster.md @@ -23,8 +23,6 @@ It is expected that there will be a down time during the upgrade of the Kubernet For example, you can upgrade from 1.x to 1.x+1, but not from 1.x to 1.x+2. To upgrade a Kubernetes version, first all the master nodes of the Kubernetes cluster must be upgraded sequentially, followed by the sequential upgrade of each worker node. -* See [here](https://v1-15.docs.kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade-1-15/) for Kubernetes official documentation to upgrade from v1.14.x to v1.15.x. -* See [here](https://v1-16.docs.kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/) for Kubernetes official documentation to upgrade from v1.15.x to v1.16.x. * See [here](https://v1-17.docs.kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/) for Kubernetes official documentation to upgrade from v1.16.x to v1.17.x. * See [here](https://v1-18.docs.kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/) for Kubernetes official documentation to upgrade from v1.17.x to v1.18.x. * See [here](https://v1-19.docs.kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/) for Kubernetes official documentation to upgrade from v1.18.x to v1.19.x. diff --git a/docs-source/content/soa-domains/patch_and_upgrade/upgrade-operator-release.md b/docs-source/content/soa-domains/patch_and_upgrade/upgrade-operator-release.md index 26b9f92dc..4b4085b47 100644 --- a/docs-source/content/soa-domains/patch_and_upgrade/upgrade-operator-release.md +++ b/docs-source/content/soa-domains/patch_and_upgrade/upgrade-operator-release.md @@ -7,24 +7,20 @@ pre: "b. " description: "Upgrade the WebLogic Kubernetes Operator release to a newer version." --- -These instructions apply to upgrading operators within the 3.x release family -as additional versions are released. - -To upgrade the Kubernetes operator, use the `helm upgrade` command. Make sure that the `weblogic-kubernetes-operator` repository on your local machine is at the operator release to which you are upgrading. See the steps [here]({{< relref "/soa-domains/installguide/prepare-your-environment/#get-dependent-images" >}}) to pull the image and set up the `weblogic-kubernetes-operator` repository. When upgrading the operator, -the `helm upgrade` command requires that you supply a new Helm chart and image. For example: +To upgrade the WebLogic Kubernetes operator, use the `helm upgrade` command with new Helm chart and operator image. See the steps [here]({{< relref "/soa-domains/installguide/prepare-your-environment/#get-dependent-images" >}}) to pull the operator image and set up the Oracle SOA Suite repository that contains the operator chart. To upgrade the operator run the following command: ``` $ cd ${WORKDIR} $ helm upgrade \ --reuse-values \ - --set image=oracle/weblogic-kubernetes-operator:3.2.1 \ + --set image=oracle/weblogic-kubernetes-operator:3.3.0 \ --namespace weblogic-operator-namespace \ --wait \ weblogic-kubernetes-operator \ charts/weblogic-operator ``` -> Note: When the WebLogic Kubernetes Operator is upgraded from release version 3.1.1 to 3.2.1 or later, it is expected that the Administration Server pod in the domain gets restarted. +> Note: When the WebLogic Kubernetes Operator is upgraded from release version 3.2.1 to 3.3.0 or later, it may be expected that the Administration Server pod in the domain gets restarted. #### Post upgrade steps diff --git a/docs-source/content/soa-domains/release-notes.md b/docs-source/content/soa-domains/release-notes.md index dacb9fab4..e22290e6e 100644 --- a/docs-source/content/soa-domains/release-notes.md +++ b/docs-source/content/soa-domains/release-notes.md @@ -12,6 +12,7 @@ Review the latest changes and known issues for Oracle SOA Suite on Kubernetes. | Date | Version | Change | | --- | --- | --- | +|November 30, 2021 | 21.4.2 | Supports Oracle SOA Suite 12.2.1.4 domains deployment using October 2021 PSU and known bug fixes. Oracle SOA Suite 12.2.1.4 Docker image for this release can be downloaded from My Oracle Support (MOS patch [33467899](https://support.oracle.com/epmos/faces/ui/patch/PatchDetail.jspx?patchId=33467899)). |August 6, 2021 | 21.3.2 | Supports Oracle SOA Suite 12.2.1.4 domains deployment using July 2021 PSU and known bug fixes. Oracle SOA Suite 12.2.1.4 Docker image for this release can be downloaded from My Oracle Support (MOS patch [33125465](https://support.oracle.com/epmos/faces/ui/patch/PatchDetail.jspx?patchId=33125465)). | May 31, 2021 | 21.2.2 | Supports Oracle SOA Suite 12.2.1.4 domains deployment using April 2021 PSU and known bug fixes. Oracle SOA Suite 12.2.1.4 Docker image for this release can be downloaded from My Oracle Support (MOS patch [32794257](https://support.oracle.com/epmos/faces/ui/patch/PatchDetail.jspx?patchId=32794257)). | February 28, 2021 | 21.1.2 | Supports Oracle SOA Suite 12.2.1.4 domains deployment using January 2021 PSU and known bug fixes. Oracle SOA Suite 12.2.1.4 Docker image for this release can be downloaded from My Oracle Support (MOS patch [32398542](https://support.oracle.com/epmos/faces/ui/patch/PatchDetail.jspx?patchId=32398542)). diff --git a/docs-source/content/soa-domains/troubleshooting/_index.md b/docs-source/content/soa-domains/troubleshooting/_index.md new file mode 100644 index 000000000..94afb3cac --- /dev/null +++ b/docs-source/content/soa-domains/troubleshooting/_index.md @@ -0,0 +1,185 @@ ++++ +title = "Troubleshooting" +weight = 9 +pre = "9. " +description = "Describes common issues that may occur during Oracle SOA Suite deployment on Kubernetes and the steps to troubleshoot them." ++++ + +This document describes common issues that may occur during the deployment of Oracle SOA Suite on Kubernetes and the steps to troubleshoot them. Also refer to the [FAQs](https://oracle.github.io/fmw-kubernetes/soa-domains/faq/) page for frequent issues and steps to resolve them. + +* [WebLogic Kubernetes Operator installation failure](#weblogic-kubernetes-operator-installation-failure) +* [RCU schema creation failure](#rcu-schema-creation-failure) +* [Domain creation failure](#domain-creation-failure) +* [Common domain creation issues](#common-domain-creation-issues) +* [Server pods not started after applying domain configuration file](#server-pods-not-started-after-applying-domain-configuration-file) +* [Ingress controller not serving the domain urls](#ingress-controller-not-serving-the-domain-urls) + +#### WebLogic Kubernetes Operator installation failure +If the WebLogic Kubernetes Operator installation failed with timing out: + - Check the status of the operator Helm release using the command `helm ls -n `. + - Check if the operator pod is successfully created in the operator namespace. + - Describe the operator pod using `kubectl describe pod -n ` to identify any obvious errors. + +#### RCU schema creation failure +When creating the RCU schema using `create-rcu-schema.sh`, the possible causes for RCU schema creation failure are: + - Database is not up and running + - Incorrect database connection URL used + - Invalid database credentials used + - Schema prefix already exists + +Make sure that all the above causes are reviewed and corrected as needed. +Also [drop the existing schema]({{< relref "/soa-domains/cleanup-domain-setup#drop-the-rcu-schemas" >}}) with the same prefix before rerunning the `create-rcu-schema.sh` with correct values. + +#### Domain creation failure +If the Oracle SOA Suite domain creation fails when running `create-domain.sh`, perform the following steps to diagnose the issue: + +1. Run the following command to diagnose the create domain job: + + ```bash + $ kubectl logs jobs/ -n + ``` + + For example: + + ```bash + $ kubectl logs jobs/soainfra-create-soa-infra-domain-job -n soans + ``` + + Also run: + + ```bash + $ kubectl describe pod -n + ``` + + For example: + + ```bash + $ kubectl describe pod soainfra-create-soa-infra-domain-job-mcc6v -n soans + ``` + + Use the output to diagnose the problem and resolve the issue. + +1. Clean up the failed domain creation: + 1. Delete the failed domain creation job in the domain namespace using the command `kubectl delete job -n `. + 1. [Delete the contents of the domain home directory]({{< relref "/soa-domains/cleanup-domain-setup#delete-the-domain-home" >}}) + 1. [Drop the existing RCU schema]({{< relref "/soa-domains/cleanup-domain-setup#drop-the-rcu-schemas" >}}) + +1. Recreate the domain: + 1. [Recreate the RCU schema]({{< relref "/soa-domains/installguide/prepare-your-environment#run-the-repository-creation-utility-to-set-up-your-database-schemas" >}}) + 1. Make sure the Persistent Volume and Persistent Volume Claim used for the domain are created with correct permissions and bound together. + 1. [Rerun the create domain script]({{< relref "/soa-domains/installguide/create-soa-domains/#run-the-create-domain-script" >}}) + +#### Common domain creation issues +A common domain creation issue is error `Failed to build JDBC Connection object` in the create domain job logs. + + {{%expand "Click here to see the error stack trace:" %}} + ``` + Configuring the Service Table DataSource... + fmwDatabase jdbc:oracle:thin:@orclcdb.soainfra-domain-ns-293-10202010:1521/orclpdb1 + Getting Database Defaults... + Error: getDatabaseDefaults() failed. Do dumpStack() to see details. + Error: runCmd() failed. Do dumpStack() to see details. + Problem invoking WLST - Traceback (innermost last): + File "/u01/weblogic/..2021_10_20_20_29_37.256759996/createSOADomain.py", line 943, in ? + File "/u01/weblogic/..2021_10_20_20_29_37.256759996/createSOADomain.py", line 75, in createSOADomain + File "/u01/weblogic/..2021_10_20_20_29_37.256759996/createSOADomain.py", line 695, in extendSoaB2BDomain + File "/u01/weblogic/..2021_10_20_20_29_37.256759996/createSOADomain.py", line 588, in configureJDBCTemplates + File "/tmp/WLSTOfflineIni956349269221112379.py", line 267, in getDatabaseDefaults + File "/tmp/WLSTOfflineIni956349269221112379.py", line 19, in command + Failed to build JDBC Connection object: + at com.oracle.cie.domain.script.jython.CommandExceptionHandler.handleException(CommandExceptionHandler.java:69) + at com.oracle.cie.domain.script.jython.WLScriptContext.handleException(WLScriptContext.java:3085) + at com.oracle.cie.domain.script.jython.WLScriptContext.runCmd(WLScriptContext.java:738) + at sun.reflect.GeneratedMethodAccessor152.invoke(Unknown Source) + at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) + at java.lang.reflect.Method.invoke(Method.java:498) + com.oracle.cie.domain.script.jython.WLSTException: com.oracle.cie.domain.script.jython.WLSTException: Got exception when auto configuring the schema component(s) with data obtained from shadow table: + Failed to build JDBC Connection object: + ERROR: /u01/weblogic/create-domain-script.sh failed. + ``` + {{% /expand %}} + + This error is reported when there is an issue with database schema access during domain creation. The possible causes are: + * Incorrect schema name specified in `create-domain-inputs.yaml`. + * RCU schema credentials specified in the secret `soainfra-rcu-credentials` are different from the credentials specified while creating the RCU schema using `create-rcu-schema.sh`. + + To resolve these possible causes, check that the schema name and credentials used during the domain creation are the same as when the RCU schema was created. + +#### Server pods not started after applying domain configuration file +This issue usually happens when the WebLogic Kubernetes Operator is not configured to manage the domain namespace. You can verify the configuration by running the command `helm get values -n ` and checking the values under the `domainNamespaces` section. + +For example: +``` +$ helm get values weblogic-kubernetes-operator -n opns +USER-SUPPLIED VALUES: +domainNamespaces: +- soans +image: ghcr.io/oracle/weblogic-kubernetes-operator:3.3.0 +javaLoggingLevel: FINE +serviceAccount: op-sa +$ +``` +If you don't see the domain namespace value under the `domainNamespaces` section, run the `helm upgrade` command in the operator namespace with appropriate values to configure the operator to manage the domain namespace. + +``` +$ helm upgrade --reuse-values --namespace opns --set "domainNamespaces={soans}" --wait weblogic-kubernetes-operator charts/weblogic-operator +``` + +#### Ingress controller not serving the domain URLs +To diagnose this issue: +1. Verify that the Ingress controller is installed successfully. + For example, to verify the `Traefik` Ingress controller status, run the following command: + ``` + $ helm list -n traefik + NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION + traefik-operator traefik 2 2021-10-27 11:24:29.317003398 +0000 UTC deployed traefik-9.1.1 2.2.8 + $ + ``` +1. Verify that the Ingress controller is setup to monitor the domain namespace. + For example, to verify the `Traefik` Ingress controller manages the `soans` domain namespace, run the following command and check the values under `namespaces` section. + ``` + $ helm get values traefik-operator -n traefik + USER-SUPPLIED VALUES: + kubernetes: + namespaces: + - traefik + - soans + $ + ``` +1. Verify that the Ingress chart is installed correctly in domain namespace. For example, run the following command: + ``` + $ helm list -n soans + NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION + soainfra-traefik soans 1 2021-10-27 11:24:31.7572791 +0000 UTC deployed ingress-per-domain-0.1.0 1.0 + $ + ``` +1. Verify that the Ingress URL paths and hostnames are configured correctly by running the following commands: + {{%expand "Click here to see the sample commands and output" %}} + ``` + $ kubectl get ingress soainfra-traefik -n soans + NAME CLASS HOSTS ADDRESS PORTS AGE + soainfra-traefik 80 20h + $ + $ kubectl describe ingress soainfra-traefik -n soans + Name: soainfra-traefik + Namespace: soans + Address: + Default backend: default-http-backend:80 () + Rules: + Host Path Backends + ---- ---- -------- + + /console soainfra-adminserver:7001 (10.244.0.123:7001) + /em soainfra-adminserver:7001 (10.244.0.123:7001) + /weblogic/ready soainfra-adminserver:7001 (10.244.0.123:7001) + /soa-infra soainfra-cluster-soa-cluster:8001 (10.244.0.126:8001,10.244.0.127:8001) + /soa/composer soainfra-cluster-soa-cluster:8001 (10.244.0.126:8001,10.244.0.127:8001) + /integration/worklistapp soainfra-cluster-soa-cluster:8001 (10.244.0.126:8001,10.244.0.127:8001) + /EssHealthCheck soainfra-cluster-soa-cluster:8001 (10.244.0.126:8001,10.244.0.127:8001) + Annotations: kubernetes.io/ingress.class: traefik + meta.helm.sh/release-name: soainfra-traefik + meta.helm.sh/release-namespace: soans + Events: + $ + ``` + {{% /expand %}} diff --git a/docs-source/content/wccontent-domains/_index.md b/docs-source/content/wccontent-domains/_index.md index 8c5d1cd01..69be0375a 100644 --- a/docs-source/content/wccontent-domains/_index.md +++ b/docs-source/content/wccontent-domains/_index.md @@ -1,7 +1,7 @@ --- title: "Oracle WebCenter Content" date: 2020-11-27T16:43:45-05:00 -weight: 6 +weight: 7 description: "The Oracle WebLogic Server Kubernetes Operator (the “operator”) supports deployment of Oracle WebCenter Content servers such as Oracle WebCenter Content(Content Server) and Oracle WebCenter Content(Inbound Refinery Server). Follow the instructions in this guide to set up Oracle WebCenter Content domain on Kubernetes." --- diff --git a/docs-source/content/wcportal-domains/_index.md b/docs-source/content/wcportal-domains/_index.md index 77372658f..af95a8ca1 100644 --- a/docs-source/content/wcportal-domains/_index.md +++ b/docs-source/content/wcportal-domains/_index.md @@ -1,7 +1,7 @@ --- title: "Oracle WebCenter Portal" date: 2021 -weight: 7 +weight: 8 description: "The WebLogic Kubernetes operator (the “operator”) supports deployment of Oracle WebCenter Portal. Follow the instructions in this guide to set up Oracle WebCenter Portal domain on Kubernetes." --- diff --git a/docs-source/content/wcsites-domains/_index.md b/docs-source/content/wcsites-domains/_index.md index 2a10bbc85..355b47e12 100644 --- a/docs-source/content/wcsites-domains/_index.md +++ b/docs-source/content/wcsites-domains/_index.md @@ -1,7 +1,7 @@ --- title: "Oracle WebCenter Sites" date: 2019-02-23T16:43:45-05:00 -weight: 8 +weight: 9 description: "The WebLogic Kubernetes Operator supports deployment of Oracle WebCenter Sites. Follow the instructions in this guide to set up Oracle WebCenter Sites domains on Kubernetes." --- diff --git a/docs/21.4.1/404.html b/docs/21.4.1/404.html index 6ad102301..d2ae6f17a 100644 --- a/docs/21.4.1/404.html +++ b/docs/21.4.1/404.html @@ -9,13 +9,13 @@ 404 Page not found - - - - - - - + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + Release Notes +

+ + + + + + +

Review the latest changes and known issues for Oracle Internet Directory on Kubernetes.

+

Recent changes

+ + + + + + + + + + + + + + + +
DateVersionChange
October, 202121.4.1Initial release of Oracle Identity Directory on Kubernetes.
+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/21.4.1/oid/troubleshooting/index.html b/docs/21.4.1/oid/troubleshooting/index.html index e88547960..92fabdae9 100644 --- a/docs/21.4.1/oid/troubleshooting/index.html +++ b/docs/21.4.1/oid/troubleshooting/index.html @@ -12,18 +12,18 @@ Troubleshooting :: Oracle Fusion Middleware on Kubernetes - - - - - - - - - + + + + + + + + + - + + + + + + +
+
+
+
+

Error

+

+

+

Woops. Looks like this page doesn't exist ¯\_(ツ)_/¯.

+

+

Go to homepage

+

Page not found!

+
+
+ +
+ + + diff --git a/docs/21.4.2/categories/index.html b/docs/21.4.2/categories/index.html new file mode 100644 index 000000000..45280a022 --- /dev/null +++ b/docs/21.4.2/categories/index.html @@ -0,0 +1,4725 @@ + + + + + + + + + + + + Categories :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + Categories +

+ + + + + + + + + +
    + +
+ + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/21.4.2/categories/index.xml b/docs/21.4.2/categories/index.xml new file mode 100644 index 000000000..26055144d --- /dev/null +++ b/docs/21.4.2/categories/index.xml @@ -0,0 +1,14 @@ + + + + Categories on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/21.4.2/categories/ + Recent content in Categories on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/21.4.2/css/atom-one-dark-reasonable.css b/docs/21.4.2/css/atom-one-dark-reasonable.css new file mode 100644 index 000000000..fd41c996a --- /dev/null +++ b/docs/21.4.2/css/atom-one-dark-reasonable.css @@ -0,0 +1,77 @@ +/* + +Atom One Dark With support for ReasonML by Gidi Morris, based off work by Daniel Gamage + +Original One Dark Syntax theme from https://github.com/atom/one-dark-syntax + +*/ +.hljs { + display: block; + overflow-x: auto; + padding: 0.5em; + line-height: 1.3em; + color: #abb2bf; + background: #282c34; + border-radius: 5px; +} +.hljs-keyword, .hljs-operator { + color: #F92672; +} +.hljs-pattern-match { + color: #F92672; +} +.hljs-pattern-match .hljs-constructor { + color: #61aeee; +} +.hljs-function { + color: #61aeee; +} +.hljs-function .hljs-params { + color: #A6E22E; +} +.hljs-function .hljs-params .hljs-typing { + color: #FD971F; +} +.hljs-module-access .hljs-module { + color: #7e57c2; +} +.hljs-constructor { + color: #e2b93d; +} +.hljs-constructor .hljs-string { + color: #9CCC65; +} +.hljs-comment, .hljs-quote { + color: #b18eb1; + font-style: italic; +} +.hljs-doctag, .hljs-formula { + color: #c678dd; +} +.hljs-section, .hljs-name, .hljs-selector-tag, .hljs-deletion, .hljs-subst { + color: #e06c75; +} +.hljs-literal { + color: #56b6c2; +} +.hljs-string, .hljs-regexp, .hljs-addition, .hljs-attribute, .hljs-meta-string { + color: #98c379; +} +.hljs-built_in, .hljs-class .hljs-title { + color: #e6c07b; +} +.hljs-attr, .hljs-variable, .hljs-template-variable, .hljs-type, .hljs-selector-class, .hljs-selector-attr, .hljs-selector-pseudo, .hljs-number { + color: #d19a66; +} +.hljs-symbol, .hljs-bullet, .hljs-link, .hljs-meta, .hljs-selector-id, .hljs-title { + color: #61aeee; +} +.hljs-emphasis { + font-style: italic; +} +.hljs-strong { + font-weight: bold; +} +.hljs-link { + text-decoration: underline; +} diff --git a/docs/21.4.2/css/auto-complete.css b/docs/21.4.2/css/auto-complete.css new file mode 100644 index 000000000..ac6979ad3 --- /dev/null +++ b/docs/21.4.2/css/auto-complete.css @@ -0,0 +1,47 @@ +.autocomplete-suggestions { + text-align: left; + cursor: default; + border: 1px solid #ccc; + border-top: 0; + background: #fff; + box-shadow: -1px 1px 3px rgba(0,0,0,.1); + + /* core styles should not be changed */ + position: absolute; + display: none; + z-index: 9999; + max-height: 254px; + overflow: hidden; + overflow-y: auto; + box-sizing: border-box; + +} +.autocomplete-suggestion { + position: relative; + cursor: pointer; + padding: 7px; + line-height: 23px; + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; + color: #333; +} + +.autocomplete-suggestion b { + font-weight: normal; + color: #1f8dd6; +} + +.autocomplete-suggestion.selected { + background: #333; + color: #fff; +} + +.autocomplete-suggestion:hover { + background: #444; + color: #fff; +} + +.autocomplete-suggestion > .context { + font-size: 12px; +} diff --git a/docs/21.4.2/css/featherlight.min.css b/docs/21.4.2/css/featherlight.min.css new file mode 100644 index 000000000..1b00c7861 --- /dev/null +++ b/docs/21.4.2/css/featherlight.min.css @@ -0,0 +1,8 @@ +/** + * Featherlight - ultra slim jQuery lightbox + * Version 1.7.13 - http://noelboss.github.io/featherlight/ + * + * Copyright (c) 2015, Noël Raoul Bossart (http://www.noelboss.com) + * MIT Licensed. +**/ +html.with-featherlight{overflow:hidden}.featherlight{display:none;position:fixed;top:0;right:0;bottom:0;left:0;z-index:2147483647;text-align:center;white-space:nowrap;cursor:pointer;background:#333;background:rgba(0,0,0,0)}.featherlight:last-of-type{background:rgba(0,0,0,.8)}.featherlight:before{content:'';display:inline-block;height:100%;vertical-align:middle}.featherlight .featherlight-content{position:relative;text-align:left;vertical-align:middle;display:inline-block;overflow:auto;padding:25px 25px 0;border-bottom:25px solid transparent;margin-left:5%;margin-right:5%;max-height:95%;background:#fff;cursor:auto;white-space:normal}.featherlight .featherlight-inner{display:block}.featherlight link.featherlight-inner,.featherlight script.featherlight-inner,.featherlight style.featherlight-inner{display:none}.featherlight .featherlight-close-icon{position:absolute;z-index:9999;top:0;right:0;line-height:25px;width:25px;cursor:pointer;text-align:center;font-family:Arial,sans-serif;background:#fff;background:rgba(255,255,255,.3);color:#000;border:0;padding:0}.featherlight .featherlight-close-icon::-moz-focus-inner{border:0;padding:0}.featherlight .featherlight-image{width:100%}.featherlight-iframe .featherlight-content{border-bottom:0;padding:0;-webkit-overflow-scrolling:touch}.featherlight iframe{border:0}.featherlight *{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}@media only screen and (max-width:1024px){.featherlight .featherlight-content{margin-left:0;margin-right:0;max-height:98%;padding:10px 10px 0;border-bottom:10px solid transparent}}@media print{html.with-featherlight>*>:not(.featherlight){display:none}} \ No newline at end of file diff --git a/docs/21.4.2/css/fontawesome-all.min.css b/docs/21.4.2/css/fontawesome-all.min.css new file mode 100644 index 000000000..de5647372 --- /dev/null +++ b/docs/21.4.2/css/fontawesome-all.min.css @@ -0,0 +1 @@ +.fa,.fab,.fal,.far,.fas{-moz-osx-font-smoothing:grayscale;-webkit-font-smoothing:antialiased;display:inline-block;font-style:normal;font-variant:normal;text-rendering:auto;line-height:1}.fa-lg{font-size:1.33333em;line-height:.75em;vertical-align:-.0667em}.fa-xs{font-size:.75em}.fa-sm{font-size:.875em}.fa-1x{font-size:1em}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-6x{font-size:6em}.fa-7x{font-size:7em}.fa-8x{font-size:8em}.fa-9x{font-size:9em}.fa-10x{font-size:10em}.fa-fw{text-align:center;width:1.25em}.fa-ul{list-style-type:none;margin-left:2.5em;padding-left:0}.fa-ul>li{position:relative}.fa-li{left:-2em;position:absolute;text-align:center;width:2em;line-height:inherit}.fa-border{border:.08em solid #eee;border-radius:.1em;padding:.2em .25em .15em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa.fa-pull-left,.fab.fa-pull-left,.fal.fa-pull-left,.far.fa-pull-left,.fas.fa-pull-left{margin-right:.3em}.fa.fa-pull-right,.fab.fa-pull-right,.fal.fa-pull-right,.far.fa-pull-right,.fas.fa-pull-right{margin-left:.3em}.fa-spin{animation:fa-spin 2s infinite linear}.fa-pulse{animation:fa-spin 1s infinite steps(8)}@keyframes fa-spin{0%{transform:rotate(0deg)}to{transform:rotate(1turn)}}.fa-rotate-90{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";transform:rotate(90deg)}.fa-rotate-180{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";transform:rotate(180deg)}.fa-rotate-270{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";transform:rotate(270deg)}.fa-flip-horizontal{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";transform:scaleX(-1)}.fa-flip-vertical{transform:scaleY(-1)}.fa-flip-horizontal.fa-flip-vertical,.fa-flip-vertical{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)"}.fa-flip-horizontal.fa-flip-vertical{transform:scale(-1)}:root .fa-flip-horizontal,:root .fa-flip-vertical,:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270{filter:none}.fa-stack{display:inline-block;height:2em;line-height:2em;position:relative;vertical-align:middle;width:2.5em}.fa-stack-1x,.fa-stack-2x{left:0;position:absolute;text-align:center;width:100%}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-500px:before{content:"\f26e"}.fa-accessible-icon:before{content:"\f368"}.fa-accusoft:before{content:"\f369"}.fa-acquisitions-incorporated:before{content:"\f6af"}.fa-ad:before{content:"\f641"}.fa-address-book:before{content:"\f2b9"}.fa-address-card:before{content:"\f2bb"}.fa-adjust:before{content:"\f042"}.fa-adn:before{content:"\f170"}.fa-adobe:before{content:"\f778"}.fa-adversal:before{content:"\f36a"}.fa-affiliatetheme:before{content:"\f36b"}.fa-air-freshener:before{content:"\f5d0"}.fa-algolia:before{content:"\f36c"}.fa-align-center:before{content:"\f037"}.fa-align-justify:before{content:"\f039"}.fa-align-left:before{content:"\f036"}.fa-align-right:before{content:"\f038"}.fa-alipay:before{content:"\f642"}.fa-allergies:before{content:"\f461"}.fa-amazon:before{content:"\f270"}.fa-amazon-pay:before{content:"\f42c"}.fa-ambulance:before{content:"\f0f9"}.fa-american-sign-language-interpreting:before{content:"\f2a3"}.fa-amilia:before{content:"\f36d"}.fa-anchor:before{content:"\f13d"}.fa-android:before{content:"\f17b"}.fa-angellist:before{content:"\f209"}.fa-angle-double-down:before{content:"\f103"}.fa-angle-double-left:before{content:"\f100"}.fa-angle-double-right:before{content:"\f101"}.fa-angle-double-up:before{content:"\f102"}.fa-angle-down:before{content:"\f107"}.fa-angle-left:before{content:"\f104"}.fa-angle-right:before{content:"\f105"}.fa-angle-up:before{content:"\f106"}.fa-angry:before{content:"\f556"}.fa-angrycreative:before{content:"\f36e"}.fa-angular:before{content:"\f420"}.fa-ankh:before{content:"\f644"}.fa-app-store:before{content:"\f36f"}.fa-app-store-ios:before{content:"\f370"}.fa-apper:before{content:"\f371"}.fa-apple:before{content:"\f179"}.fa-apple-alt:before{content:"\f5d1"}.fa-apple-pay:before{content:"\f415"}.fa-archive:before{content:"\f187"}.fa-archway:before{content:"\f557"}.fa-arrow-alt-circle-down:before{content:"\f358"}.fa-arrow-alt-circle-left:before{content:"\f359"}.fa-arrow-alt-circle-right:before{content:"\f35a"}.fa-arrow-alt-circle-up:before{content:"\f35b"}.fa-arrow-circle-down:before{content:"\f0ab"}.fa-arrow-circle-left:before{content:"\f0a8"}.fa-arrow-circle-right:before{content:"\f0a9"}.fa-arrow-circle-up:before{content:"\f0aa"}.fa-arrow-down:before{content:"\f063"}.fa-arrow-left:before{content:"\f060"}.fa-arrow-right:before{content:"\f061"}.fa-arrow-up:before{content:"\f062"}.fa-arrows-alt:before{content:"\f0b2"}.fa-arrows-alt-h:before{content:"\f337"}.fa-arrows-alt-v:before{content:"\f338"}.fa-artstation:before{content:"\f77a"}.fa-assistive-listening-systems:before{content:"\f2a2"}.fa-asterisk:before{content:"\f069"}.fa-asymmetrik:before{content:"\f372"}.fa-at:before{content:"\f1fa"}.fa-atlas:before{content:"\f558"}.fa-atlassian:before{content:"\f77b"}.fa-atom:before{content:"\f5d2"}.fa-audible:before{content:"\f373"}.fa-audio-description:before{content:"\f29e"}.fa-autoprefixer:before{content:"\f41c"}.fa-avianex:before{content:"\f374"}.fa-aviato:before{content:"\f421"}.fa-award:before{content:"\f559"}.fa-aws:before{content:"\f375"}.fa-baby:before{content:"\f77c"}.fa-baby-carriage:before{content:"\f77d"}.fa-backspace:before{content:"\f55a"}.fa-backward:before{content:"\f04a"}.fa-balance-scale:before{content:"\f24e"}.fa-ban:before{content:"\f05e"}.fa-band-aid:before{content:"\f462"}.fa-bandcamp:before{content:"\f2d5"}.fa-barcode:before{content:"\f02a"}.fa-bars:before{content:"\f0c9"}.fa-baseball-ball:before{content:"\f433"}.fa-basketball-ball:before{content:"\f434"}.fa-bath:before{content:"\f2cd"}.fa-battery-empty:before{content:"\f244"}.fa-battery-full:before{content:"\f240"}.fa-battery-half:before{content:"\f242"}.fa-battery-quarter:before{content:"\f243"}.fa-battery-three-quarters:before{content:"\f241"}.fa-bed:before{content:"\f236"}.fa-beer:before{content:"\f0fc"}.fa-behance:before{content:"\f1b4"}.fa-behance-square:before{content:"\f1b5"}.fa-bell:before{content:"\f0f3"}.fa-bell-slash:before{content:"\f1f6"}.fa-bezier-curve:before{content:"\f55b"}.fa-bible:before{content:"\f647"}.fa-bicycle:before{content:"\f206"}.fa-bimobject:before{content:"\f378"}.fa-binoculars:before{content:"\f1e5"}.fa-biohazard:before{content:"\f780"}.fa-birthday-cake:before{content:"\f1fd"}.fa-bitbucket:before{content:"\f171"}.fa-bitcoin:before{content:"\f379"}.fa-bity:before{content:"\f37a"}.fa-black-tie:before{content:"\f27e"}.fa-blackberry:before{content:"\f37b"}.fa-blender:before{content:"\f517"}.fa-blender-phone:before{content:"\f6b6"}.fa-blind:before{content:"\f29d"}.fa-blog:before{content:"\f781"}.fa-blogger:before{content:"\f37c"}.fa-blogger-b:before{content:"\f37d"}.fa-bluetooth:before{content:"\f293"}.fa-bluetooth-b:before{content:"\f294"}.fa-bold:before{content:"\f032"}.fa-bolt:before{content:"\f0e7"}.fa-bomb:before{content:"\f1e2"}.fa-bone:before{content:"\f5d7"}.fa-bong:before{content:"\f55c"}.fa-book:before{content:"\f02d"}.fa-book-dead:before{content:"\f6b7"}.fa-book-open:before{content:"\f518"}.fa-book-reader:before{content:"\f5da"}.fa-bookmark:before{content:"\f02e"}.fa-bowling-ball:before{content:"\f436"}.fa-box:before{content:"\f466"}.fa-box-open:before{content:"\f49e"}.fa-boxes:before{content:"\f468"}.fa-braille:before{content:"\f2a1"}.fa-brain:before{content:"\f5dc"}.fa-briefcase:before{content:"\f0b1"}.fa-briefcase-medical:before{content:"\f469"}.fa-broadcast-tower:before{content:"\f519"}.fa-broom:before{content:"\f51a"}.fa-brush:before{content:"\f55d"}.fa-btc:before{content:"\f15a"}.fa-bug:before{content:"\f188"}.fa-building:before{content:"\f1ad"}.fa-bullhorn:before{content:"\f0a1"}.fa-bullseye:before{content:"\f140"}.fa-burn:before{content:"\f46a"}.fa-buromobelexperte:before{content:"\f37f"}.fa-bus:before{content:"\f207"}.fa-bus-alt:before{content:"\f55e"}.fa-business-time:before{content:"\f64a"}.fa-buysellads:before{content:"\f20d"}.fa-calculator:before{content:"\f1ec"}.fa-calendar:before{content:"\f133"}.fa-calendar-alt:before{content:"\f073"}.fa-calendar-check:before{content:"\f274"}.fa-calendar-day:before{content:"\f783"}.fa-calendar-minus:before{content:"\f272"}.fa-calendar-plus:before{content:"\f271"}.fa-calendar-times:before{content:"\f273"}.fa-calendar-week:before{content:"\f784"}.fa-camera:before{content:"\f030"}.fa-camera-retro:before{content:"\f083"}.fa-campground:before{content:"\f6bb"}.fa-canadian-maple-leaf:before{content:"\f785"}.fa-candy-cane:before{content:"\f786"}.fa-cannabis:before{content:"\f55f"}.fa-capsules:before{content:"\f46b"}.fa-car:before{content:"\f1b9"}.fa-car-alt:before{content:"\f5de"}.fa-car-battery:before{content:"\f5df"}.fa-car-crash:before{content:"\f5e1"}.fa-car-side:before{content:"\f5e4"}.fa-caret-down:before{content:"\f0d7"}.fa-caret-left:before{content:"\f0d9"}.fa-caret-right:before{content:"\f0da"}.fa-caret-square-down:before{content:"\f150"}.fa-caret-square-left:before{content:"\f191"}.fa-caret-square-right:before{content:"\f152"}.fa-caret-square-up:before{content:"\f151"}.fa-caret-up:before{content:"\f0d8"}.fa-carrot:before{content:"\f787"}.fa-cart-arrow-down:before{content:"\f218"}.fa-cart-plus:before{content:"\f217"}.fa-cash-register:before{content:"\f788"}.fa-cat:before{content:"\f6be"}.fa-cc-amazon-pay:before{content:"\f42d"}.fa-cc-amex:before{content:"\f1f3"}.fa-cc-apple-pay:before{content:"\f416"}.fa-cc-diners-club:before{content:"\f24c"}.fa-cc-discover:before{content:"\f1f2"}.fa-cc-jcb:before{content:"\f24b"}.fa-cc-mastercard:before{content:"\f1f1"}.fa-cc-paypal:before{content:"\f1f4"}.fa-cc-stripe:before{content:"\f1f5"}.fa-cc-visa:before{content:"\f1f0"}.fa-centercode:before{content:"\f380"}.fa-centos:before{content:"\f789"}.fa-certificate:before{content:"\f0a3"}.fa-chair:before{content:"\f6c0"}.fa-chalkboard:before{content:"\f51b"}.fa-chalkboard-teacher:before{content:"\f51c"}.fa-charging-station:before{content:"\f5e7"}.fa-chart-area:before{content:"\f1fe"}.fa-chart-bar:before{content:"\f080"}.fa-chart-line:before{content:"\f201"}.fa-chart-pie:before{content:"\f200"}.fa-check:before{content:"\f00c"}.fa-check-circle:before{content:"\f058"}.fa-check-double:before{content:"\f560"}.fa-check-square:before{content:"\f14a"}.fa-chess:before{content:"\f439"}.fa-chess-bishop:before{content:"\f43a"}.fa-chess-board:before{content:"\f43c"}.fa-chess-king:before{content:"\f43f"}.fa-chess-knight:before{content:"\f441"}.fa-chess-pawn:before{content:"\f443"}.fa-chess-queen:before{content:"\f445"}.fa-chess-rook:before{content:"\f447"}.fa-chevron-circle-down:before{content:"\f13a"}.fa-chevron-circle-left:before{content:"\f137"}.fa-chevron-circle-right:before{content:"\f138"}.fa-chevron-circle-up:before{content:"\f139"}.fa-chevron-down:before{content:"\f078"}.fa-chevron-left:before{content:"\f053"}.fa-chevron-right:before{content:"\f054"}.fa-chevron-up:before{content:"\f077"}.fa-child:before{content:"\f1ae"}.fa-chrome:before{content:"\f268"}.fa-church:before{content:"\f51d"}.fa-circle:before{content:"\f111"}.fa-circle-notch:before{content:"\f1ce"}.fa-city:before{content:"\f64f"}.fa-clipboard:before{content:"\f328"}.fa-clipboard-check:before{content:"\f46c"}.fa-clipboard-list:before{content:"\f46d"}.fa-clock:before{content:"\f017"}.fa-clone:before{content:"\f24d"}.fa-closed-captioning:before{content:"\f20a"}.fa-cloud:before{content:"\f0c2"}.fa-cloud-download-alt:before{content:"\f381"}.fa-cloud-meatball:before{content:"\f73b"}.fa-cloud-moon:before{content:"\f6c3"}.fa-cloud-moon-rain:before{content:"\f73c"}.fa-cloud-rain:before{content:"\f73d"}.fa-cloud-showers-heavy:before{content:"\f740"}.fa-cloud-sun:before{content:"\f6c4"}.fa-cloud-sun-rain:before{content:"\f743"}.fa-cloud-upload-alt:before{content:"\f382"}.fa-cloudscale:before{content:"\f383"}.fa-cloudsmith:before{content:"\f384"}.fa-cloudversify:before{content:"\f385"}.fa-cocktail:before{content:"\f561"}.fa-code:before{content:"\f121"}.fa-code-branch:before{content:"\f126"}.fa-codepen:before{content:"\f1cb"}.fa-codiepie:before{content:"\f284"}.fa-coffee:before{content:"\f0f4"}.fa-cog:before{content:"\f013"}.fa-cogs:before{content:"\f085"}.fa-coins:before{content:"\f51e"}.fa-columns:before{content:"\f0db"}.fa-comment:before{content:"\f075"}.fa-comment-alt:before{content:"\f27a"}.fa-comment-dollar:before{content:"\f651"}.fa-comment-dots:before{content:"\f4ad"}.fa-comment-slash:before{content:"\f4b3"}.fa-comments:before{content:"\f086"}.fa-comments-dollar:before{content:"\f653"}.fa-compact-disc:before{content:"\f51f"}.fa-compass:before{content:"\f14e"}.fa-compress:before{content:"\f066"}.fa-compress-arrows-alt:before{content:"\f78c"}.fa-concierge-bell:before{content:"\f562"}.fa-confluence:before{content:"\f78d"}.fa-connectdevelop:before{content:"\f20e"}.fa-contao:before{content:"\f26d"}.fa-cookie:before{content:"\f563"}.fa-cookie-bite:before{content:"\f564"}.fa-copy:before{content:"\f0c5"}.fa-copyright:before{content:"\f1f9"}.fa-couch:before{content:"\f4b8"}.fa-cpanel:before{content:"\f388"}.fa-creative-commons:before{content:"\f25e"}.fa-creative-commons-by:before{content:"\f4e7"}.fa-creative-commons-nc:before{content:"\f4e8"}.fa-creative-commons-nc-eu:before{content:"\f4e9"}.fa-creative-commons-nc-jp:before{content:"\f4ea"}.fa-creative-commons-nd:before{content:"\f4eb"}.fa-creative-commons-pd:before{content:"\f4ec"}.fa-creative-commons-pd-alt:before{content:"\f4ed"}.fa-creative-commons-remix:before{content:"\f4ee"}.fa-creative-commons-sa:before{content:"\f4ef"}.fa-creative-commons-sampling:before{content:"\f4f0"}.fa-creative-commons-sampling-plus:before{content:"\f4f1"}.fa-creative-commons-share:before{content:"\f4f2"}.fa-creative-commons-zero:before{content:"\f4f3"}.fa-credit-card:before{content:"\f09d"}.fa-critical-role:before{content:"\f6c9"}.fa-crop:before{content:"\f125"}.fa-crop-alt:before{content:"\f565"}.fa-cross:before{content:"\f654"}.fa-crosshairs:before{content:"\f05b"}.fa-crow:before{content:"\f520"}.fa-crown:before{content:"\f521"}.fa-css3:before{content:"\f13c"}.fa-css3-alt:before{content:"\f38b"}.fa-cube:before{content:"\f1b2"}.fa-cubes:before{content:"\f1b3"}.fa-cut:before{content:"\f0c4"}.fa-cuttlefish:before{content:"\f38c"}.fa-d-and-d:before{content:"\f38d"}.fa-d-and-d-beyond:before{content:"\f6ca"}.fa-dashcube:before{content:"\f210"}.fa-database:before{content:"\f1c0"}.fa-deaf:before{content:"\f2a4"}.fa-delicious:before{content:"\f1a5"}.fa-democrat:before{content:"\f747"}.fa-deploydog:before{content:"\f38e"}.fa-deskpro:before{content:"\f38f"}.fa-desktop:before{content:"\f108"}.fa-dev:before{content:"\f6cc"}.fa-deviantart:before{content:"\f1bd"}.fa-dharmachakra:before{content:"\f655"}.fa-dhl:before{content:"\f790"}.fa-diagnoses:before{content:"\f470"}.fa-diaspora:before{content:"\f791"}.fa-dice:before{content:"\f522"}.fa-dice-d20:before{content:"\f6cf"}.fa-dice-d6:before{content:"\f6d1"}.fa-dice-five:before{content:"\f523"}.fa-dice-four:before{content:"\f524"}.fa-dice-one:before{content:"\f525"}.fa-dice-six:before{content:"\f526"}.fa-dice-three:before{content:"\f527"}.fa-dice-two:before{content:"\f528"}.fa-digg:before{content:"\f1a6"}.fa-digital-ocean:before{content:"\f391"}.fa-digital-tachograph:before{content:"\f566"}.fa-directions:before{content:"\f5eb"}.fa-discord:before{content:"\f392"}.fa-discourse:before{content:"\f393"}.fa-divide:before{content:"\f529"}.fa-dizzy:before{content:"\f567"}.fa-dna:before{content:"\f471"}.fa-dochub:before{content:"\f394"}.fa-docker:before{content:"\f395"}.fa-dog:before{content:"\f6d3"}.fa-dollar-sign:before{content:"\f155"}.fa-dolly:before{content:"\f472"}.fa-dolly-flatbed:before{content:"\f474"}.fa-donate:before{content:"\f4b9"}.fa-door-closed:before{content:"\f52a"}.fa-door-open:before{content:"\f52b"}.fa-dot-circle:before{content:"\f192"}.fa-dove:before{content:"\f4ba"}.fa-download:before{content:"\f019"}.fa-draft2digital:before{content:"\f396"}.fa-drafting-compass:before{content:"\f568"}.fa-dragon:before{content:"\f6d5"}.fa-draw-polygon:before{content:"\f5ee"}.fa-dribbble:before{content:"\f17d"}.fa-dribbble-square:before{content:"\f397"}.fa-dropbox:before{content:"\f16b"}.fa-drum:before{content:"\f569"}.fa-drum-steelpan:before{content:"\f56a"}.fa-drumstick-bite:before{content:"\f6d7"}.fa-drupal:before{content:"\f1a9"}.fa-dumbbell:before{content:"\f44b"}.fa-dumpster:before{content:"\f793"}.fa-dumpster-fire:before{content:"\f794"}.fa-dungeon:before{content:"\f6d9"}.fa-dyalog:before{content:"\f399"}.fa-earlybirds:before{content:"\f39a"}.fa-ebay:before{content:"\f4f4"}.fa-edge:before{content:"\f282"}.fa-edit:before{content:"\f044"}.fa-eject:before{content:"\f052"}.fa-elementor:before{content:"\f430"}.fa-ellipsis-h:before{content:"\f141"}.fa-ellipsis-v:before{content:"\f142"}.fa-ello:before{content:"\f5f1"}.fa-ember:before{content:"\f423"}.fa-empire:before{content:"\f1d1"}.fa-envelope:before{content:"\f0e0"}.fa-envelope-open:before{content:"\f2b6"}.fa-envelope-open-text:before{content:"\f658"}.fa-envelope-square:before{content:"\f199"}.fa-envira:before{content:"\f299"}.fa-equals:before{content:"\f52c"}.fa-eraser:before{content:"\f12d"}.fa-erlang:before{content:"\f39d"}.fa-ethereum:before{content:"\f42e"}.fa-ethernet:before{content:"\f796"}.fa-etsy:before{content:"\f2d7"}.fa-euro-sign:before{content:"\f153"}.fa-exchange-alt:before{content:"\f362"}.fa-exclamation:before{content:"\f12a"}.fa-exclamation-circle:before{content:"\f06a"}.fa-exclamation-triangle:before{content:"\f071"}.fa-expand:before{content:"\f065"}.fa-expand-arrows-alt:before{content:"\f31e"}.fa-expeditedssl:before{content:"\f23e"}.fa-external-link-alt:before{content:"\f35d"}.fa-external-link-square-alt:before{content:"\f360"}.fa-eye:before{content:"\f06e"}.fa-eye-dropper:before{content:"\f1fb"}.fa-eye-slash:before{content:"\f070"}.fa-facebook:before{content:"\f09a"}.fa-facebook-f:before{content:"\f39e"}.fa-facebook-messenger:before{content:"\f39f"}.fa-facebook-square:before{content:"\f082"}.fa-fantasy-flight-games:before{content:"\f6dc"}.fa-fast-backward:before{content:"\f049"}.fa-fast-forward:before{content:"\f050"}.fa-fax:before{content:"\f1ac"}.fa-feather:before{content:"\f52d"}.fa-feather-alt:before{content:"\f56b"}.fa-fedex:before{content:"\f797"}.fa-fedora:before{content:"\f798"}.fa-female:before{content:"\f182"}.fa-fighter-jet:before{content:"\f0fb"}.fa-figma:before{content:"\f799"}.fa-file:before{content:"\f15b"}.fa-file-alt:before{content:"\f15c"}.fa-file-archive:before{content:"\f1c6"}.fa-file-audio:before{content:"\f1c7"}.fa-file-code:before{content:"\f1c9"}.fa-file-contract:before{content:"\f56c"}.fa-file-csv:before{content:"\f6dd"}.fa-file-download:before{content:"\f56d"}.fa-file-excel:before{content:"\f1c3"}.fa-file-export:before{content:"\f56e"}.fa-file-image:before{content:"\f1c5"}.fa-file-import:before{content:"\f56f"}.fa-file-invoice:before{content:"\f570"}.fa-file-invoice-dollar:before{content:"\f571"}.fa-file-medical:before{content:"\f477"}.fa-file-medical-alt:before{content:"\f478"}.fa-file-pdf:before{content:"\f1c1"}.fa-file-powerpoint:before{content:"\f1c4"}.fa-file-prescription:before{content:"\f572"}.fa-file-signature:before{content:"\f573"}.fa-file-upload:before{content:"\f574"}.fa-file-video:before{content:"\f1c8"}.fa-file-word:before{content:"\f1c2"}.fa-fill:before{content:"\f575"}.fa-fill-drip:before{content:"\f576"}.fa-film:before{content:"\f008"}.fa-filter:before{content:"\f0b0"}.fa-fingerprint:before{content:"\f577"}.fa-fire:before{content:"\f06d"}.fa-fire-alt:before{content:"\f7e4"}.fa-fire-extinguisher:before{content:"\f134"}.fa-firefox:before{content:"\f269"}.fa-first-aid:before{content:"\f479"}.fa-first-order:before{content:"\f2b0"}.fa-first-order-alt:before{content:"\f50a"}.fa-firstdraft:before{content:"\f3a1"}.fa-fish:before{content:"\f578"}.fa-fist-raised:before{content:"\f6de"}.fa-flag:before{content:"\f024"}.fa-flag-checkered:before{content:"\f11e"}.fa-flag-usa:before{content:"\f74d"}.fa-flask:before{content:"\f0c3"}.fa-flickr:before{content:"\f16e"}.fa-flipboard:before{content:"\f44d"}.fa-flushed:before{content:"\f579"}.fa-fly:before{content:"\f417"}.fa-folder:before{content:"\f07b"}.fa-folder-minus:before{content:"\f65d"}.fa-folder-open:before{content:"\f07c"}.fa-folder-plus:before{content:"\f65e"}.fa-font:before{content:"\f031"}.fa-font-awesome:before{content:"\f2b4"}.fa-font-awesome-alt:before{content:"\f35c"}.fa-font-awesome-flag:before{content:"\f425"}.fa-font-awesome-logo-full:before{content:"\f4e6"}.fa-fonticons:before{content:"\f280"}.fa-fonticons-fi:before{content:"\f3a2"}.fa-football-ball:before{content:"\f44e"}.fa-fort-awesome:before{content:"\f286"}.fa-fort-awesome-alt:before{content:"\f3a3"}.fa-forumbee:before{content:"\f211"}.fa-forward:before{content:"\f04e"}.fa-foursquare:before{content:"\f180"}.fa-free-code-camp:before{content:"\f2c5"}.fa-freebsd:before{content:"\f3a4"}.fa-frog:before{content:"\f52e"}.fa-frown:before{content:"\f119"}.fa-frown-open:before{content:"\f57a"}.fa-fulcrum:before{content:"\f50b"}.fa-funnel-dollar:before{content:"\f662"}.fa-futbol:before{content:"\f1e3"}.fa-galactic-republic:before{content:"\f50c"}.fa-galactic-senate:before{content:"\f50d"}.fa-gamepad:before{content:"\f11b"}.fa-gas-pump:before{content:"\f52f"}.fa-gavel:before{content:"\f0e3"}.fa-gem:before{content:"\f3a5"}.fa-genderless:before{content:"\f22d"}.fa-get-pocket:before{content:"\f265"}.fa-gg:before{content:"\f260"}.fa-gg-circle:before{content:"\f261"}.fa-ghost:before{content:"\f6e2"}.fa-gift:before{content:"\f06b"}.fa-gifts:before{content:"\f79c"}.fa-git:before{content:"\f1d3"}.fa-git-square:before{content:"\f1d2"}.fa-github:before{content:"\f09b"}.fa-github-alt:before{content:"\f113"}.fa-github-square:before{content:"\f092"}.fa-gitkraken:before{content:"\f3a6"}.fa-gitlab:before{content:"\f296"}.fa-gitter:before{content:"\f426"}.fa-glass-cheers:before{content:"\f79f"}.fa-glass-martini:before{content:"\f000"}.fa-glass-martini-alt:before{content:"\f57b"}.fa-glass-whiskey:before{content:"\f7a0"}.fa-glasses:before{content:"\f530"}.fa-glide:before{content:"\f2a5"}.fa-glide-g:before{content:"\f2a6"}.fa-globe:before{content:"\f0ac"}.fa-globe-africa:before{content:"\f57c"}.fa-globe-americas:before{content:"\f57d"}.fa-globe-asia:before{content:"\f57e"}.fa-globe-europe:before{content:"\f7a2"}.fa-gofore:before{content:"\f3a7"}.fa-golf-ball:before{content:"\f450"}.fa-goodreads:before{content:"\f3a8"}.fa-goodreads-g:before{content:"\f3a9"}.fa-google:before{content:"\f1a0"}.fa-google-drive:before{content:"\f3aa"}.fa-google-play:before{content:"\f3ab"}.fa-google-plus:before{content:"\f2b3"}.fa-google-plus-g:before{content:"\f0d5"}.fa-google-plus-square:before{content:"\f0d4"}.fa-google-wallet:before{content:"\f1ee"}.fa-gopuram:before{content:"\f664"}.fa-graduation-cap:before{content:"\f19d"}.fa-gratipay:before{content:"\f184"}.fa-grav:before{content:"\f2d6"}.fa-greater-than:before{content:"\f531"}.fa-greater-than-equal:before{content:"\f532"}.fa-grimace:before{content:"\f57f"}.fa-grin:before{content:"\f580"}.fa-grin-alt:before{content:"\f581"}.fa-grin-beam:before{content:"\f582"}.fa-grin-beam-sweat:before{content:"\f583"}.fa-grin-hearts:before{content:"\f584"}.fa-grin-squint:before{content:"\f585"}.fa-grin-squint-tears:before{content:"\f586"}.fa-grin-stars:before{content:"\f587"}.fa-grin-tears:before{content:"\f588"}.fa-grin-tongue:before{content:"\f589"}.fa-grin-tongue-squint:before{content:"\f58a"}.fa-grin-tongue-wink:before{content:"\f58b"}.fa-grin-wink:before{content:"\f58c"}.fa-grip-horizontal:before{content:"\f58d"}.fa-grip-lines:before{content:"\f7a4"}.fa-grip-lines-vertical:before{content:"\f7a5"}.fa-grip-vertical:before{content:"\f58e"}.fa-gripfire:before{content:"\f3ac"}.fa-grunt:before{content:"\f3ad"}.fa-guitar:before{content:"\f7a6"}.fa-gulp:before{content:"\f3ae"}.fa-h-square:before{content:"\f0fd"}.fa-hacker-news:before{content:"\f1d4"}.fa-hacker-news-square:before{content:"\f3af"}.fa-hackerrank:before{content:"\f5f7"}.fa-hammer:before{content:"\f6e3"}.fa-hamsa:before{content:"\f665"}.fa-hand-holding:before{content:"\f4bd"}.fa-hand-holding-heart:before{content:"\f4be"}.fa-hand-holding-usd:before{content:"\f4c0"}.fa-hand-lizard:before{content:"\f258"}.fa-hand-paper:before{content:"\f256"}.fa-hand-peace:before{content:"\f25b"}.fa-hand-point-down:before{content:"\f0a7"}.fa-hand-point-left:before{content:"\f0a5"}.fa-hand-point-right:before{content:"\f0a4"}.fa-hand-point-up:before{content:"\f0a6"}.fa-hand-pointer:before{content:"\f25a"}.fa-hand-rock:before{content:"\f255"}.fa-hand-scissors:before{content:"\f257"}.fa-hand-spock:before{content:"\f259"}.fa-hands:before{content:"\f4c2"}.fa-hands-helping:before{content:"\f4c4"}.fa-handshake:before{content:"\f2b5"}.fa-hanukiah:before{content:"\f6e6"}.fa-hashtag:before{content:"\f292"}.fa-hat-wizard:before{content:"\f6e8"}.fa-haykal:before{content:"\f666"}.fa-hdd:before{content:"\f0a0"}.fa-heading:before{content:"\f1dc"}.fa-headphones:before{content:"\f025"}.fa-headphones-alt:before{content:"\f58f"}.fa-headset:before{content:"\f590"}.fa-heart:before{content:"\f004"}.fa-heart-broken:before{content:"\f7a9"}.fa-heartbeat:before{content:"\f21e"}.fa-helicopter:before{content:"\f533"}.fa-highlighter:before{content:"\f591"}.fa-hiking:before{content:"\f6ec"}.fa-hippo:before{content:"\f6ed"}.fa-hips:before{content:"\f452"}.fa-hire-a-helper:before{content:"\f3b0"}.fa-history:before{content:"\f1da"}.fa-hockey-puck:before{content:"\f453"}.fa-holly-berry:before{content:"\f7aa"}.fa-home:before{content:"\f015"}.fa-hooli:before{content:"\f427"}.fa-hornbill:before{content:"\f592"}.fa-horse:before{content:"\f6f0"}.fa-horse-head:before{content:"\f7ab"}.fa-hospital:before{content:"\f0f8"}.fa-hospital-alt:before{content:"\f47d"}.fa-hospital-symbol:before{content:"\f47e"}.fa-hot-tub:before{content:"\f593"}.fa-hotel:before{content:"\f594"}.fa-hotjar:before{content:"\f3b1"}.fa-hourglass:before{content:"\f254"}.fa-hourglass-end:before{content:"\f253"}.fa-hourglass-half:before{content:"\f252"}.fa-hourglass-start:before{content:"\f251"}.fa-house-damage:before{content:"\f6f1"}.fa-houzz:before{content:"\f27c"}.fa-hryvnia:before{content:"\f6f2"}.fa-html5:before{content:"\f13b"}.fa-hubspot:before{content:"\f3b2"}.fa-i-cursor:before{content:"\f246"}.fa-icicles:before{content:"\f7ad"}.fa-id-badge:before{content:"\f2c1"}.fa-id-card:before{content:"\f2c2"}.fa-id-card-alt:before{content:"\f47f"}.fa-igloo:before{content:"\f7ae"}.fa-image:before{content:"\f03e"}.fa-images:before{content:"\f302"}.fa-imdb:before{content:"\f2d8"}.fa-inbox:before{content:"\f01c"}.fa-indent:before{content:"\f03c"}.fa-industry:before{content:"\f275"}.fa-infinity:before{content:"\f534"}.fa-info:before{content:"\f129"}.fa-info-circle:before{content:"\f05a"}.fa-instagram:before{content:"\f16d"}.fa-intercom:before{content:"\f7af"}.fa-internet-explorer:before{content:"\f26b"}.fa-invision:before{content:"\f7b0"}.fa-ioxhost:before{content:"\f208"}.fa-italic:before{content:"\f033"}.fa-itunes:before{content:"\f3b4"}.fa-itunes-note:before{content:"\f3b5"}.fa-java:before{content:"\f4e4"}.fa-jedi:before{content:"\f669"}.fa-jedi-order:before{content:"\f50e"}.fa-jenkins:before{content:"\f3b6"}.fa-jira:before{content:"\f7b1"}.fa-joget:before{content:"\f3b7"}.fa-joint:before{content:"\f595"}.fa-joomla:before{content:"\f1aa"}.fa-journal-whills:before{content:"\f66a"}.fa-js:before{content:"\f3b8"}.fa-js-square:before{content:"\f3b9"}.fa-jsfiddle:before{content:"\f1cc"}.fa-kaaba:before{content:"\f66b"}.fa-kaggle:before{content:"\f5fa"}.fa-key:before{content:"\f084"}.fa-keybase:before{content:"\f4f5"}.fa-keyboard:before{content:"\f11c"}.fa-keycdn:before{content:"\f3ba"}.fa-khanda:before{content:"\f66d"}.fa-kickstarter:before{content:"\f3bb"}.fa-kickstarter-k:before{content:"\f3bc"}.fa-kiss:before{content:"\f596"}.fa-kiss-beam:before{content:"\f597"}.fa-kiss-wink-heart:before{content:"\f598"}.fa-kiwi-bird:before{content:"\f535"}.fa-korvue:before{content:"\f42f"}.fa-landmark:before{content:"\f66f"}.fa-language:before{content:"\f1ab"}.fa-laptop:before{content:"\f109"}.fa-laptop-code:before{content:"\f5fc"}.fa-laravel:before{content:"\f3bd"}.fa-lastfm:before{content:"\f202"}.fa-lastfm-square:before{content:"\f203"}.fa-laugh:before{content:"\f599"}.fa-laugh-beam:before{content:"\f59a"}.fa-laugh-squint:before{content:"\f59b"}.fa-laugh-wink:before{content:"\f59c"}.fa-layer-group:before{content:"\f5fd"}.fa-leaf:before{content:"\f06c"}.fa-leanpub:before{content:"\f212"}.fa-lemon:before{content:"\f094"}.fa-less:before{content:"\f41d"}.fa-less-than:before{content:"\f536"}.fa-less-than-equal:before{content:"\f537"}.fa-level-down-alt:before{content:"\f3be"}.fa-level-up-alt:before{content:"\f3bf"}.fa-life-ring:before{content:"\f1cd"}.fa-lightbulb:before{content:"\f0eb"}.fa-line:before{content:"\f3c0"}.fa-link:before{content:"\f0c1"}.fa-linkedin:before{content:"\f08c"}.fa-linkedin-in:before{content:"\f0e1"}.fa-linode:before{content:"\f2b8"}.fa-linux:before{content:"\f17c"}.fa-lira-sign:before{content:"\f195"}.fa-list:before{content:"\f03a"}.fa-list-alt:before{content:"\f022"}.fa-list-ol:before{content:"\f0cb"}.fa-list-ul:before{content:"\f0ca"}.fa-location-arrow:before{content:"\f124"}.fa-lock:before{content:"\f023"}.fa-lock-open:before{content:"\f3c1"}.fa-long-arrow-alt-down:before{content:"\f309"}.fa-long-arrow-alt-left:before{content:"\f30a"}.fa-long-arrow-alt-right:before{content:"\f30b"}.fa-long-arrow-alt-up:before{content:"\f30c"}.fa-low-vision:before{content:"\f2a8"}.fa-luggage-cart:before{content:"\f59d"}.fa-lyft:before{content:"\f3c3"}.fa-magento:before{content:"\f3c4"}.fa-magic:before{content:"\f0d0"}.fa-magnet:before{content:"\f076"}.fa-mail-bulk:before{content:"\f674"}.fa-mailchimp:before{content:"\f59e"}.fa-male:before{content:"\f183"}.fa-mandalorian:before{content:"\f50f"}.fa-map:before{content:"\f279"}.fa-map-marked:before{content:"\f59f"}.fa-map-marked-alt:before{content:"\f5a0"}.fa-map-marker:before{content:"\f041"}.fa-map-marker-alt:before{content:"\f3c5"}.fa-map-pin:before{content:"\f276"}.fa-map-signs:before{content:"\f277"}.fa-markdown:before{content:"\f60f"}.fa-marker:before{content:"\f5a1"}.fa-mars:before{content:"\f222"}.fa-mars-double:before{content:"\f227"}.fa-mars-stroke:before{content:"\f229"}.fa-mars-stroke-h:before{content:"\f22b"}.fa-mars-stroke-v:before{content:"\f22a"}.fa-mask:before{content:"\f6fa"}.fa-mastodon:before{content:"\f4f6"}.fa-maxcdn:before{content:"\f136"}.fa-medal:before{content:"\f5a2"}.fa-medapps:before{content:"\f3c6"}.fa-medium:before{content:"\f23a"}.fa-medium-m:before{content:"\f3c7"}.fa-medkit:before{content:"\f0fa"}.fa-medrt:before{content:"\f3c8"}.fa-meetup:before{content:"\f2e0"}.fa-megaport:before{content:"\f5a3"}.fa-meh:before{content:"\f11a"}.fa-meh-blank:before{content:"\f5a4"}.fa-meh-rolling-eyes:before{content:"\f5a5"}.fa-memory:before{content:"\f538"}.fa-mendeley:before{content:"\f7b3"}.fa-menorah:before{content:"\f676"}.fa-mercury:before{content:"\f223"}.fa-meteor:before{content:"\f753"}.fa-microchip:before{content:"\f2db"}.fa-microphone:before{content:"\f130"}.fa-microphone-alt:before{content:"\f3c9"}.fa-microphone-alt-slash:before{content:"\f539"}.fa-microphone-slash:before{content:"\f131"}.fa-microscope:before{content:"\f610"}.fa-microsoft:before{content:"\f3ca"}.fa-minus:before{content:"\f068"}.fa-minus-circle:before{content:"\f056"}.fa-minus-square:before{content:"\f146"}.fa-mitten:before{content:"\f7b5"}.fa-mix:before{content:"\f3cb"}.fa-mixcloud:before{content:"\f289"}.fa-mizuni:before{content:"\f3cc"}.fa-mobile:before{content:"\f10b"}.fa-mobile-alt:before{content:"\f3cd"}.fa-modx:before{content:"\f285"}.fa-monero:before{content:"\f3d0"}.fa-money-bill:before{content:"\f0d6"}.fa-money-bill-alt:before{content:"\f3d1"}.fa-money-bill-wave:before{content:"\f53a"}.fa-money-bill-wave-alt:before{content:"\f53b"}.fa-money-check:before{content:"\f53c"}.fa-money-check-alt:before{content:"\f53d"}.fa-monument:before{content:"\f5a6"}.fa-moon:before{content:"\f186"}.fa-mortar-pestle:before{content:"\f5a7"}.fa-mosque:before{content:"\f678"}.fa-motorcycle:before{content:"\f21c"}.fa-mountain:before{content:"\f6fc"}.fa-mouse-pointer:before{content:"\f245"}.fa-mug-hot:before{content:"\f7b6"}.fa-music:before{content:"\f001"}.fa-napster:before{content:"\f3d2"}.fa-neos:before{content:"\f612"}.fa-network-wired:before{content:"\f6ff"}.fa-neuter:before{content:"\f22c"}.fa-newspaper:before{content:"\f1ea"}.fa-nimblr:before{content:"\f5a8"}.fa-nintendo-switch:before{content:"\f418"}.fa-node:before{content:"\f419"}.fa-node-js:before{content:"\f3d3"}.fa-not-equal:before{content:"\f53e"}.fa-notes-medical:before{content:"\f481"}.fa-npm:before{content:"\f3d4"}.fa-ns8:before{content:"\f3d5"}.fa-nutritionix:before{content:"\f3d6"}.fa-object-group:before{content:"\f247"}.fa-object-ungroup:before{content:"\f248"}.fa-odnoklassniki:before{content:"\f263"}.fa-odnoklassniki-square:before{content:"\f264"}.fa-oil-can:before{content:"\f613"}.fa-old-republic:before{content:"\f510"}.fa-om:before{content:"\f679"}.fa-opencart:before{content:"\f23d"}.fa-openid:before{content:"\f19b"}.fa-opera:before{content:"\f26a"}.fa-optin-monster:before{content:"\f23c"}.fa-osi:before{content:"\f41a"}.fa-otter:before{content:"\f700"}.fa-outdent:before{content:"\f03b"}.fa-page4:before{content:"\f3d7"}.fa-pagelines:before{content:"\f18c"}.fa-paint-brush:before{content:"\f1fc"}.fa-paint-roller:before{content:"\f5aa"}.fa-palette:before{content:"\f53f"}.fa-palfed:before{content:"\f3d8"}.fa-pallet:before{content:"\f482"}.fa-paper-plane:before{content:"\f1d8"}.fa-paperclip:before{content:"\f0c6"}.fa-parachute-box:before{content:"\f4cd"}.fa-paragraph:before{content:"\f1dd"}.fa-parking:before{content:"\f540"}.fa-passport:before{content:"\f5ab"}.fa-pastafarianism:before{content:"\f67b"}.fa-paste:before{content:"\f0ea"}.fa-patreon:before{content:"\f3d9"}.fa-pause:before{content:"\f04c"}.fa-pause-circle:before{content:"\f28b"}.fa-paw:before{content:"\f1b0"}.fa-paypal:before{content:"\f1ed"}.fa-peace:before{content:"\f67c"}.fa-pen:before{content:"\f304"}.fa-pen-alt:before{content:"\f305"}.fa-pen-fancy:before{content:"\f5ac"}.fa-pen-nib:before{content:"\f5ad"}.fa-pen-square:before{content:"\f14b"}.fa-pencil-alt:before{content:"\f303"}.fa-pencil-ruler:before{content:"\f5ae"}.fa-penny-arcade:before{content:"\f704"}.fa-people-carry:before{content:"\f4ce"}.fa-percent:before{content:"\f295"}.fa-percentage:before{content:"\f541"}.fa-periscope:before{content:"\f3da"}.fa-person-booth:before{content:"\f756"}.fa-phabricator:before{content:"\f3db"}.fa-phoenix-framework:before{content:"\f3dc"}.fa-phoenix-squadron:before{content:"\f511"}.fa-phone:before{content:"\f095"}.fa-phone-slash:before{content:"\f3dd"}.fa-phone-square:before{content:"\f098"}.fa-phone-volume:before{content:"\f2a0"}.fa-php:before{content:"\f457"}.fa-pied-piper:before{content:"\f2ae"}.fa-pied-piper-alt:before{content:"\f1a8"}.fa-pied-piper-hat:before{content:"\f4e5"}.fa-pied-piper-pp:before{content:"\f1a7"}.fa-piggy-bank:before{content:"\f4d3"}.fa-pills:before{content:"\f484"}.fa-pinterest:before{content:"\f0d2"}.fa-pinterest-p:before{content:"\f231"}.fa-pinterest-square:before{content:"\f0d3"}.fa-place-of-worship:before{content:"\f67f"}.fa-plane:before{content:"\f072"}.fa-plane-arrival:before{content:"\f5af"}.fa-plane-departure:before{content:"\f5b0"}.fa-play:before{content:"\f04b"}.fa-play-circle:before{content:"\f144"}.fa-playstation:before{content:"\f3df"}.fa-plug:before{content:"\f1e6"}.fa-plus:before{content:"\f067"}.fa-plus-circle:before{content:"\f055"}.fa-plus-square:before{content:"\f0fe"}.fa-podcast:before{content:"\f2ce"}.fa-poll:before{content:"\f681"}.fa-poll-h:before{content:"\f682"}.fa-poo:before{content:"\f2fe"}.fa-poo-storm:before{content:"\f75a"}.fa-poop:before{content:"\f619"}.fa-portrait:before{content:"\f3e0"}.fa-pound-sign:before{content:"\f154"}.fa-power-off:before{content:"\f011"}.fa-pray:before{content:"\f683"}.fa-praying-hands:before{content:"\f684"}.fa-prescription:before{content:"\f5b1"}.fa-prescription-bottle:before{content:"\f485"}.fa-prescription-bottle-alt:before{content:"\f486"}.fa-print:before{content:"\f02f"}.fa-procedures:before{content:"\f487"}.fa-product-hunt:before{content:"\f288"}.fa-project-diagram:before{content:"\f542"}.fa-pushed:before{content:"\f3e1"}.fa-puzzle-piece:before{content:"\f12e"}.fa-python:before{content:"\f3e2"}.fa-qq:before{content:"\f1d6"}.fa-qrcode:before{content:"\f029"}.fa-question:before{content:"\f128"}.fa-question-circle:before{content:"\f059"}.fa-quidditch:before{content:"\f458"}.fa-quinscape:before{content:"\f459"}.fa-quora:before{content:"\f2c4"}.fa-quote-left:before{content:"\f10d"}.fa-quote-right:before{content:"\f10e"}.fa-quran:before{content:"\f687"}.fa-r-project:before{content:"\f4f7"}.fa-radiation:before{content:"\f7b9"}.fa-radiation-alt:before{content:"\f7ba"}.fa-rainbow:before{content:"\f75b"}.fa-random:before{content:"\f074"}.fa-raspberry-pi:before{content:"\f7bb"}.fa-ravelry:before{content:"\f2d9"}.fa-react:before{content:"\f41b"}.fa-reacteurope:before{content:"\f75d"}.fa-readme:before{content:"\f4d5"}.fa-rebel:before{content:"\f1d0"}.fa-receipt:before{content:"\f543"}.fa-recycle:before{content:"\f1b8"}.fa-red-river:before{content:"\f3e3"}.fa-reddit:before{content:"\f1a1"}.fa-reddit-alien:before{content:"\f281"}.fa-reddit-square:before{content:"\f1a2"}.fa-redhat:before{content:"\f7bc"}.fa-redo:before{content:"\f01e"}.fa-redo-alt:before{content:"\f2f9"}.fa-registered:before{content:"\f25d"}.fa-renren:before{content:"\f18b"}.fa-reply:before{content:"\f3e5"}.fa-reply-all:before{content:"\f122"}.fa-replyd:before{content:"\f3e6"}.fa-republican:before{content:"\f75e"}.fa-researchgate:before{content:"\f4f8"}.fa-resolving:before{content:"\f3e7"}.fa-restroom:before{content:"\f7bd"}.fa-retweet:before{content:"\f079"}.fa-rev:before{content:"\f5b2"}.fa-ribbon:before{content:"\f4d6"}.fa-ring:before{content:"\f70b"}.fa-road:before{content:"\f018"}.fa-robot:before{content:"\f544"}.fa-rocket:before{content:"\f135"}.fa-rocketchat:before{content:"\f3e8"}.fa-rockrms:before{content:"\f3e9"}.fa-route:before{content:"\f4d7"}.fa-rss:before{content:"\f09e"}.fa-rss-square:before{content:"\f143"}.fa-ruble-sign:before{content:"\f158"}.fa-ruler:before{content:"\f545"}.fa-ruler-combined:before{content:"\f546"}.fa-ruler-horizontal:before{content:"\f547"}.fa-ruler-vertical:before{content:"\f548"}.fa-running:before{content:"\f70c"}.fa-rupee-sign:before{content:"\f156"}.fa-sad-cry:before{content:"\f5b3"}.fa-sad-tear:before{content:"\f5b4"}.fa-safari:before{content:"\f267"}.fa-sass:before{content:"\f41e"}.fa-satellite:before{content:"\f7bf"}.fa-satellite-dish:before{content:"\f7c0"}.fa-save:before{content:"\f0c7"}.fa-schlix:before{content:"\f3ea"}.fa-school:before{content:"\f549"}.fa-screwdriver:before{content:"\f54a"}.fa-scribd:before{content:"\f28a"}.fa-scroll:before{content:"\f70e"}.fa-sd-card:before{content:"\f7c2"}.fa-search:before{content:"\f002"}.fa-search-dollar:before{content:"\f688"}.fa-search-location:before{content:"\f689"}.fa-search-minus:before{content:"\f010"}.fa-search-plus:before{content:"\f00e"}.fa-searchengin:before{content:"\f3eb"}.fa-seedling:before{content:"\f4d8"}.fa-sellcast:before{content:"\f2da"}.fa-sellsy:before{content:"\f213"}.fa-server:before{content:"\f233"}.fa-servicestack:before{content:"\f3ec"}.fa-shapes:before{content:"\f61f"}.fa-share:before{content:"\f064"}.fa-share-alt:before{content:"\f1e0"}.fa-share-alt-square:before{content:"\f1e1"}.fa-share-square:before{content:"\f14d"}.fa-shekel-sign:before{content:"\f20b"}.fa-shield-alt:before{content:"\f3ed"}.fa-ship:before{content:"\f21a"}.fa-shipping-fast:before{content:"\f48b"}.fa-shirtsinbulk:before{content:"\f214"}.fa-shoe-prints:before{content:"\f54b"}.fa-shopping-bag:before{content:"\f290"}.fa-shopping-basket:before{content:"\f291"}.fa-shopping-cart:before{content:"\f07a"}.fa-shopware:before{content:"\f5b5"}.fa-shower:before{content:"\f2cc"}.fa-shuttle-van:before{content:"\f5b6"}.fa-sign:before{content:"\f4d9"}.fa-sign-in-alt:before{content:"\f2f6"}.fa-sign-language:before{content:"\f2a7"}.fa-sign-out-alt:before{content:"\f2f5"}.fa-signal:before{content:"\f012"}.fa-signature:before{content:"\f5b7"}.fa-sim-card:before{content:"\f7c4"}.fa-simplybuilt:before{content:"\f215"}.fa-sistrix:before{content:"\f3ee"}.fa-sitemap:before{content:"\f0e8"}.fa-sith:before{content:"\f512"}.fa-skating:before{content:"\f7c5"}.fa-sketch:before{content:"\f7c6"}.fa-skiing:before{content:"\f7c9"}.fa-skiing-nordic:before{content:"\f7ca"}.fa-skull:before{content:"\f54c"}.fa-skull-crossbones:before{content:"\f714"}.fa-skyatlas:before{content:"\f216"}.fa-skype:before{content:"\f17e"}.fa-slack:before{content:"\f198"}.fa-slack-hash:before{content:"\f3ef"}.fa-slash:before{content:"\f715"}.fa-sleigh:before{content:"\f7cc"}.fa-sliders-h:before{content:"\f1de"}.fa-slideshare:before{content:"\f1e7"}.fa-smile:before{content:"\f118"}.fa-smile-beam:before{content:"\f5b8"}.fa-smile-wink:before{content:"\f4da"}.fa-smog:before{content:"\f75f"}.fa-smoking:before{content:"\f48d"}.fa-smoking-ban:before{content:"\f54d"}.fa-sms:before{content:"\f7cd"}.fa-snapchat:before{content:"\f2ab"}.fa-snapchat-ghost:before{content:"\f2ac"}.fa-snapchat-square:before{content:"\f2ad"}.fa-snowboarding:before{content:"\f7ce"}.fa-snowflake:before{content:"\f2dc"}.fa-snowman:before{content:"\f7d0"}.fa-snowplow:before{content:"\f7d2"}.fa-socks:before{content:"\f696"}.fa-solar-panel:before{content:"\f5ba"}.fa-sort:before{content:"\f0dc"}.fa-sort-alpha-down:before{content:"\f15d"}.fa-sort-alpha-up:before{content:"\f15e"}.fa-sort-amount-down:before{content:"\f160"}.fa-sort-amount-up:before{content:"\f161"}.fa-sort-down:before{content:"\f0dd"}.fa-sort-numeric-down:before{content:"\f162"}.fa-sort-numeric-up:before{content:"\f163"}.fa-sort-up:before{content:"\f0de"}.fa-soundcloud:before{content:"\f1be"}.fa-sourcetree:before{content:"\f7d3"}.fa-spa:before{content:"\f5bb"}.fa-space-shuttle:before{content:"\f197"}.fa-speakap:before{content:"\f3f3"}.fa-spider:before{content:"\f717"}.fa-spinner:before{content:"\f110"}.fa-splotch:before{content:"\f5bc"}.fa-spotify:before{content:"\f1bc"}.fa-spray-can:before{content:"\f5bd"}.fa-square:before{content:"\f0c8"}.fa-square-full:before{content:"\f45c"}.fa-square-root-alt:before{content:"\f698"}.fa-squarespace:before{content:"\f5be"}.fa-stack-exchange:before{content:"\f18d"}.fa-stack-overflow:before{content:"\f16c"}.fa-stamp:before{content:"\f5bf"}.fa-star:before{content:"\f005"}.fa-star-and-crescent:before{content:"\f699"}.fa-star-half:before{content:"\f089"}.fa-star-half-alt:before{content:"\f5c0"}.fa-star-of-david:before{content:"\f69a"}.fa-star-of-life:before{content:"\f621"}.fa-staylinked:before{content:"\f3f5"}.fa-steam:before{content:"\f1b6"}.fa-steam-square:before{content:"\f1b7"}.fa-steam-symbol:before{content:"\f3f6"}.fa-step-backward:before{content:"\f048"}.fa-step-forward:before{content:"\f051"}.fa-stethoscope:before{content:"\f0f1"}.fa-sticker-mule:before{content:"\f3f7"}.fa-sticky-note:before{content:"\f249"}.fa-stop:before{content:"\f04d"}.fa-stop-circle:before{content:"\f28d"}.fa-stopwatch:before{content:"\f2f2"}.fa-store:before{content:"\f54e"}.fa-store-alt:before{content:"\f54f"}.fa-strava:before{content:"\f428"}.fa-stream:before{content:"\f550"}.fa-street-view:before{content:"\f21d"}.fa-strikethrough:before{content:"\f0cc"}.fa-stripe:before{content:"\f429"}.fa-stripe-s:before{content:"\f42a"}.fa-stroopwafel:before{content:"\f551"}.fa-studiovinari:before{content:"\f3f8"}.fa-stumbleupon:before{content:"\f1a4"}.fa-stumbleupon-circle:before{content:"\f1a3"}.fa-subscript:before{content:"\f12c"}.fa-subway:before{content:"\f239"}.fa-suitcase:before{content:"\f0f2"}.fa-suitcase-rolling:before{content:"\f5c1"}.fa-sun:before{content:"\f185"}.fa-superpowers:before{content:"\f2dd"}.fa-superscript:before{content:"\f12b"}.fa-supple:before{content:"\f3f9"}.fa-surprise:before{content:"\f5c2"}.fa-suse:before{content:"\f7d6"}.fa-swatchbook:before{content:"\f5c3"}.fa-swimmer:before{content:"\f5c4"}.fa-swimming-pool:before{content:"\f5c5"}.fa-synagogue:before{content:"\f69b"}.fa-sync:before{content:"\f021"}.fa-sync-alt:before{content:"\f2f1"}.fa-syringe:before{content:"\f48e"}.fa-table:before{content:"\f0ce"}.fa-table-tennis:before{content:"\f45d"}.fa-tablet:before{content:"\f10a"}.fa-tablet-alt:before{content:"\f3fa"}.fa-tablets:before{content:"\f490"}.fa-tachometer-alt:before{content:"\f3fd"}.fa-tag:before{content:"\f02b"}.fa-tags:before{content:"\f02c"}.fa-tape:before{content:"\f4db"}.fa-tasks:before{content:"\f0ae"}.fa-taxi:before{content:"\f1ba"}.fa-teamspeak:before{content:"\f4f9"}.fa-teeth:before{content:"\f62e"}.fa-teeth-open:before{content:"\f62f"}.fa-telegram:before{content:"\f2c6"}.fa-telegram-plane:before{content:"\f3fe"}.fa-temperature-high:before{content:"\f769"}.fa-temperature-low:before{content:"\f76b"}.fa-tencent-weibo:before{content:"\f1d5"}.fa-tenge:before{content:"\f7d7"}.fa-terminal:before{content:"\f120"}.fa-text-height:before{content:"\f034"}.fa-text-width:before{content:"\f035"}.fa-th:before{content:"\f00a"}.fa-th-large:before{content:"\f009"}.fa-th-list:before{content:"\f00b"}.fa-the-red-yeti:before{content:"\f69d"}.fa-theater-masks:before{content:"\f630"}.fa-themeco:before{content:"\f5c6"}.fa-themeisle:before{content:"\f2b2"}.fa-thermometer:before{content:"\f491"}.fa-thermometer-empty:before{content:"\f2cb"}.fa-thermometer-full:before{content:"\f2c7"}.fa-thermometer-half:before{content:"\f2c9"}.fa-thermometer-quarter:before{content:"\f2ca"}.fa-thermometer-three-quarters:before{content:"\f2c8"}.fa-think-peaks:before{content:"\f731"}.fa-thumbs-down:before{content:"\f165"}.fa-thumbs-up:before{content:"\f164"}.fa-thumbtack:before{content:"\f08d"}.fa-ticket-alt:before{content:"\f3ff"}.fa-times:before{content:"\f00d"}.fa-times-circle:before{content:"\f057"}.fa-tint:before{content:"\f043"}.fa-tint-slash:before{content:"\f5c7"}.fa-tired:before{content:"\f5c8"}.fa-toggle-off:before{content:"\f204"}.fa-toggle-on:before{content:"\f205"}.fa-toilet:before{content:"\f7d8"}.fa-toilet-paper:before{content:"\f71e"}.fa-toolbox:before{content:"\f552"}.fa-tools:before{content:"\f7d9"}.fa-tooth:before{content:"\f5c9"}.fa-torah:before{content:"\f6a0"}.fa-torii-gate:before{content:"\f6a1"}.fa-tractor:before{content:"\f722"}.fa-trade-federation:before{content:"\f513"}.fa-trademark:before{content:"\f25c"}.fa-traffic-light:before{content:"\f637"}.fa-train:before{content:"\f238"}.fa-tram:before{content:"\f7da"}.fa-transgender:before{content:"\f224"}.fa-transgender-alt:before{content:"\f225"}.fa-trash:before{content:"\f1f8"}.fa-trash-alt:before{content:"\f2ed"}.fa-tree:before{content:"\f1bb"}.fa-trello:before{content:"\f181"}.fa-tripadvisor:before{content:"\f262"}.fa-trophy:before{content:"\f091"}.fa-truck:before{content:"\f0d1"}.fa-truck-loading:before{content:"\f4de"}.fa-truck-monster:before{content:"\f63b"}.fa-truck-moving:before{content:"\f4df"}.fa-truck-pickup:before{content:"\f63c"}.fa-tshirt:before{content:"\f553"}.fa-tty:before{content:"\f1e4"}.fa-tumblr:before{content:"\f173"}.fa-tumblr-square:before{content:"\f174"}.fa-tv:before{content:"\f26c"}.fa-twitch:before{content:"\f1e8"}.fa-twitter:before{content:"\f099"}.fa-twitter-square:before{content:"\f081"}.fa-typo3:before{content:"\f42b"}.fa-uber:before{content:"\f402"}.fa-ubuntu:before{content:"\f7df"}.fa-uikit:before{content:"\f403"}.fa-umbrella:before{content:"\f0e9"}.fa-umbrella-beach:before{content:"\f5ca"}.fa-underline:before{content:"\f0cd"}.fa-undo:before{content:"\f0e2"}.fa-undo-alt:before{content:"\f2ea"}.fa-uniregistry:before{content:"\f404"}.fa-universal-access:before{content:"\f29a"}.fa-university:before{content:"\f19c"}.fa-unlink:before{content:"\f127"}.fa-unlock:before{content:"\f09c"}.fa-unlock-alt:before{content:"\f13e"}.fa-untappd:before{content:"\f405"}.fa-upload:before{content:"\f093"}.fa-ups:before{content:"\f7e0"}.fa-usb:before{content:"\f287"}.fa-user:before{content:"\f007"}.fa-user-alt:before{content:"\f406"}.fa-user-alt-slash:before{content:"\f4fa"}.fa-user-astronaut:before{content:"\f4fb"}.fa-user-check:before{content:"\f4fc"}.fa-user-circle:before{content:"\f2bd"}.fa-user-clock:before{content:"\f4fd"}.fa-user-cog:before{content:"\f4fe"}.fa-user-edit:before{content:"\f4ff"}.fa-user-friends:before{content:"\f500"}.fa-user-graduate:before{content:"\f501"}.fa-user-injured:before{content:"\f728"}.fa-user-lock:before{content:"\f502"}.fa-user-md:before{content:"\f0f0"}.fa-user-minus:before{content:"\f503"}.fa-user-ninja:before{content:"\f504"}.fa-user-plus:before{content:"\f234"}.fa-user-secret:before{content:"\f21b"}.fa-user-shield:before{content:"\f505"}.fa-user-slash:before{content:"\f506"}.fa-user-tag:before{content:"\f507"}.fa-user-tie:before{content:"\f508"}.fa-user-times:before{content:"\f235"}.fa-users:before{content:"\f0c0"}.fa-users-cog:before{content:"\f509"}.fa-usps:before{content:"\f7e1"}.fa-ussunnah:before{content:"\f407"}.fa-utensil-spoon:before{content:"\f2e5"}.fa-utensils:before{content:"\f2e7"}.fa-vaadin:before{content:"\f408"}.fa-vector-square:before{content:"\f5cb"}.fa-venus:before{content:"\f221"}.fa-venus-double:before{content:"\f226"}.fa-venus-mars:before{content:"\f228"}.fa-viacoin:before{content:"\f237"}.fa-viadeo:before{content:"\f2a9"}.fa-viadeo-square:before{content:"\f2aa"}.fa-vial:before{content:"\f492"}.fa-vials:before{content:"\f493"}.fa-viber:before{content:"\f409"}.fa-video:before{content:"\f03d"}.fa-video-slash:before{content:"\f4e2"}.fa-vihara:before{content:"\f6a7"}.fa-vimeo:before{content:"\f40a"}.fa-vimeo-square:before{content:"\f194"}.fa-vimeo-v:before{content:"\f27d"}.fa-vine:before{content:"\f1ca"}.fa-vk:before{content:"\f189"}.fa-vnv:before{content:"\f40b"}.fa-volleyball-ball:before{content:"\f45f"}.fa-volume-down:before{content:"\f027"}.fa-volume-mute:before{content:"\f6a9"}.fa-volume-off:before{content:"\f026"}.fa-volume-up:before{content:"\f028"}.fa-vote-yea:before{content:"\f772"}.fa-vr-cardboard:before{content:"\f729"}.fa-vuejs:before{content:"\f41f"}.fa-walking:before{content:"\f554"}.fa-wallet:before{content:"\f555"}.fa-warehouse:before{content:"\f494"}.fa-water:before{content:"\f773"}.fa-weebly:before{content:"\f5cc"}.fa-weibo:before{content:"\f18a"}.fa-weight:before{content:"\f496"}.fa-weight-hanging:before{content:"\f5cd"}.fa-weixin:before{content:"\f1d7"}.fa-whatsapp:before{content:"\f232"}.fa-whatsapp-square:before{content:"\f40c"}.fa-wheelchair:before{content:"\f193"}.fa-whmcs:before{content:"\f40d"}.fa-wifi:before{content:"\f1eb"}.fa-wikipedia-w:before{content:"\f266"}.fa-wind:before{content:"\f72e"}.fa-window-close:before{content:"\f410"}.fa-window-maximize:before{content:"\f2d0"}.fa-window-minimize:before{content:"\f2d1"}.fa-window-restore:before{content:"\f2d2"}.fa-windows:before{content:"\f17a"}.fa-wine-bottle:before{content:"\f72f"}.fa-wine-glass:before{content:"\f4e3"}.fa-wine-glass-alt:before{content:"\f5ce"}.fa-wix:before{content:"\f5cf"}.fa-wizards-of-the-coast:before{content:"\f730"}.fa-wolf-pack-battalion:before{content:"\f514"}.fa-won-sign:before{content:"\f159"}.fa-wordpress:before{content:"\f19a"}.fa-wordpress-simple:before{content:"\f411"}.fa-wpbeginner:before{content:"\f297"}.fa-wpexplorer:before{content:"\f2de"}.fa-wpforms:before{content:"\f298"}.fa-wpressr:before{content:"\f3e4"}.fa-wrench:before{content:"\f0ad"}.fa-x-ray:before{content:"\f497"}.fa-xbox:before{content:"\f412"}.fa-xing:before{content:"\f168"}.fa-xing-square:before{content:"\f169"}.fa-y-combinator:before{content:"\f23b"}.fa-yahoo:before{content:"\f19e"}.fa-yandex:before{content:"\f413"}.fa-yandex-international:before{content:"\f414"}.fa-yarn:before{content:"\f7e3"}.fa-yelp:before{content:"\f1e9"}.fa-yen-sign:before{content:"\f157"}.fa-yin-yang:before{content:"\f6ad"}.fa-yoast:before{content:"\f2b1"}.fa-youtube:before{content:"\f167"}.fa-youtube-square:before{content:"\f431"}.fa-zhihu:before{content:"\f63f"}.sr-only{border:0;clip:rect(0,0,0,0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.sr-only-focusable:active,.sr-only-focusable:focus{clip:auto;height:auto;margin:0;overflow:visible;position:static;width:auto}@font-face{font-family:"Font Awesome 5 Brands";font-style:normal;font-weight:normal;src:url(../webfonts/fa-brands-400.eot);src:url(../webfonts/fa-brands-400.eot?#iefix) format("embedded-opentype"),url(../webfonts/fa-brands-400.woff2) format("woff2"),url(../webfonts/fa-brands-400.woff) format("woff"),url(../webfonts/fa-brands-400.ttf) format("truetype"),url(../webfonts/fa-brands-400.svg#fontawesome) format("svg")}.fab{font-family:"Font Awesome 5 Brands"}@font-face{font-family:"Font Awesome 5 Free";font-style:normal;font-weight:400;src:url(../webfonts/fa-regular-400.eot);src:url(../webfonts/fa-regular-400.eot?#iefix) format("embedded-opentype"),url(../webfonts/fa-regular-400.woff2) format("woff2"),url(../webfonts/fa-regular-400.woff) format("woff"),url(../webfonts/fa-regular-400.ttf) format("truetype"),url(../webfonts/fa-regular-400.svg#fontawesome) format("svg")}.far{font-weight:400}@font-face{font-family:"Font Awesome 5 Free";font-style:normal;font-weight:900;src:url(../webfonts/fa-solid-900.eot);src:url(../webfonts/fa-solid-900.eot?#iefix) format("embedded-opentype"),url(../webfonts/fa-solid-900.woff2) format("woff2"),url(../webfonts/fa-solid-900.woff) format("woff"),url(../webfonts/fa-solid-900.ttf) format("truetype"),url(../webfonts/fa-solid-900.svg#fontawesome) format("svg")}.fa,.far,.fas{font-family:"Font Awesome 5 Free"}.fa,.fas{font-weight:900} \ No newline at end of file diff --git a/docs/21.4.2/css/hugo-theme.css b/docs/21.4.2/css/hugo-theme.css new file mode 100644 index 000000000..741cab196 --- /dev/null +++ b/docs/21.4.2/css/hugo-theme.css @@ -0,0 +1,254 @@ +/* Insert here special css for hugo theme, on top of any other imported css */ + + +/* Table of contents */ + +.progress ul { + list-style: none; + margin: 0; + padding: 0 5px; +} + +#TableOfContents { + font-size: 13px !important; + max-height: 85vh; + overflow: auto; + padding: 15px !important; +} + + +#TableOfContents > ul > li > ul > li > ul li { + margin-right: 8px; +} + +#TableOfContents > ul > li > a { + font-weight: bold; padding: 0 18px; margin: 0 2px; +} + +#TableOfContents > ul > li > ul > li > a { + font-weight: bold; +} + +#TableOfContents > ul > li > ul > li > ul > li > ul > li > ul > li { + display: none; +} + +body { + font-size: 16px !important; + color: #323232 !important; +} + +#body a.highlight, #body a.highlight:hover, #body a.highlight:focus { + text-decoration: none; + outline: none; + outline: 0; +} +#body a.highlight { + line-height: 1.1; + display: inline-block; +} +#body a.highlight:after { + display: block; + content: ""; + height: 1px; + width: 0%; + background-color: #0082a7; /*#CE3B2F*/ + -webkit-transition: width 0.5s ease; + -moz-transition: width 0.5s ease; + -ms-transition: width 0.5s ease; + transition: width 0.5s ease; +} +#body a.highlight:hover:after, #body a.highlight:focus:after { + width: 100%; +} +.progress { + position:absolute; + background-color: rgba(246, 246, 246, 0.97); + width: auto; + border: thin solid #ECECEC; + display:none; + z-index:200; +} + +#toc-menu { + border-right: thin solid #DAD8D8 !important; + padding-right: 1rem !important; + margin-right: 0.5rem !important; +} + +#sidebar-toggle-span { + border-right: thin solid #DAD8D8 !important; + padding-right: 0.5rem !important; + margin-right: 1rem !important; +} + +.btn { + display: inline-block !important; + padding: 6px 12px !important; + margin-bottom: 0 !important; + font-size: 14px !important; + font-weight: normal !important; + line-height: 1.42857143 !important; + text-align: center !important; + white-space: nowrap !important; + vertical-align: middle !important; + -ms-touch-action: manipulation !important; + touch-action: manipulation !important; + cursor: pointer !important; + -webkit-user-select: none !important; + -moz-user-select: none !important; + -ms-user-select: none !important; + user-select: none !important; + background-image: none !important; + border: 1px solid transparent !important; + border-radius: 4px !important; + -webkit-transition: all 0.15s !important; + -moz-transition: all 0.15s !important; + transition: all 0.15s !important; +} +.btn:focus { + /*outline: thin dotted; + outline: 5px auto -webkit-focus-ring-color; + outline-offset: -2px;*/ + outline: none !important; +} +.btn:hover, +.btn:focus { + color: #2b2b2b !important; + text-decoration: none !important; +} + +.btn-default { + color: #333 !important; + background-color: #fff !important; + border-color: #ccc !important; +} +.btn-default:hover, +.btn-default:focus, +.btn-default:active { + color: #fff !important; + background-color: #9e9e9e !important; + border-color: #9e9e9e !important; +} +.btn-default:active { + background-image: none !important; +} + +/* anchors */ +.anchor { + color: #00bdf3; + font-size: 0.5em; + cursor:pointer; + visibility:hidden; + margin-left: 0.5em; + position: absolute; + margin-top:0.1em; +} + +h2:hover .anchor, h3:hover .anchor, h4:hover .anchor, h5:hover .anchor, h6:hover .anchor { + visibility:visible; +} + +/* Redfines headers style */ + +h2, h3, h4, h5, h6 { + font-weight: 400; + line-height: 1.1; +} + +h1 a, h2 a, h3 a, h4 a, h5 a, h6 a { + font-weight: inherit; +} + +h2 { + font-size: 2.5rem; + line-height: 110% !important; + margin: 2.5rem 0 1.5rem 0; +} + +h3 { + font-size: 2rem; + line-height: 110% !important; + margin: 2rem 0 1rem 0; +} + +h4 { + font-size: 1.5rem; + line-height: 110% !important; + margin: 1.5rem 0 0.75rem 0; +} + +h5 { + font-size: 1rem; + line-height: 110% !important; + margin: 1rem 0 0.2rem 0; +} + +h6 { + font-size: 0.5rem; + line-height: 110% !important; + margin: 0.5rem 0 0.2rem 0; +} + +p { + margin: 1rem 0; +} + +figcaption h4 { + font-weight: 300 !important; + opacity: .85; + font-size: 1em; + text-align: center; + margin-top: -1.5em; +} + +.select-style { + border: 0; + width: 150px; + border-radius: 0px; + overflow: hidden; + display: inline-flex; +} + +.select-style svg { + fill: #ccc; + width: 14px; + height: 14px; + pointer-events: none; + margin: auto; +} + +.select-style svg:hover { + fill: #e6e6e6; +} + +.select-style select { + padding: 0; + width: 130%; + border: none; + box-shadow: none; + background: transparent; + background-image: none; + -webkit-appearance: none; + margin: auto; + margin-left: 0px; + margin-right: -20px; +} + +.select-style select:focus { + outline: none; +} + +.select-style :hover { + cursor: pointer; +} + +@media only all and (max-width: 47.938em) { + #breadcrumbs .links, #top-github-link-text { + display: none; + } +} + +.is-sticky #top-bar { + box-shadow: -1px 2px 5px 1px rgba(0, 0, 0, 0.1); +} \ No newline at end of file diff --git a/docs/21.4.2/css/hybrid.css b/docs/21.4.2/css/hybrid.css new file mode 100644 index 000000000..29735a189 --- /dev/null +++ b/docs/21.4.2/css/hybrid.css @@ -0,0 +1,102 @@ +/* + +vim-hybrid theme by w0ng (https://github.com/w0ng/vim-hybrid) + +*/ + +/*background color*/ +.hljs { + display: block; + overflow-x: auto; + padding: 0.5em; + background: #1d1f21; +} + +/*selection color*/ +.hljs::selection, +.hljs span::selection { + background: #373b41; +} + +.hljs::-moz-selection, +.hljs span::-moz-selection { + background: #373b41; +} + +/*foreground color*/ +.hljs { + color: #c5c8c6; +} + +/*color: fg_yellow*/ +.hljs-title, +.hljs-name { + color: #f0c674; +} + +/*color: fg_comment*/ +.hljs-comment, +.hljs-meta, +.hljs-meta .hljs-keyword { + color: #707880; +} + +/*color: fg_red*/ +.hljs-number, +.hljs-symbol, +.hljs-literal, +.hljs-deletion, +.hljs-link { + color: #cc6666 +} + +/*color: fg_green*/ +.hljs-string, +.hljs-doctag, +.hljs-addition, +.hljs-regexp, +.hljs-selector-attr, +.hljs-selector-pseudo { + color: #b5bd68; +} + +/*color: fg_purple*/ +.hljs-attribute, +.hljs-code, +.hljs-selector-id { + color: #b294bb; +} + +/*color: fg_blue*/ +.hljs-keyword, +.hljs-selector-tag, +.hljs-bullet, +.hljs-tag { + color: #81a2be; +} + +/*color: fg_aqua*/ +.hljs-subst, +.hljs-variable, +.hljs-template-tag, +.hljs-template-variable { + color: #8abeb7; +} + +/*color: fg_orange*/ +.hljs-type, +.hljs-built_in, +.hljs-builtin-name, +.hljs-quote, +.hljs-section, +.hljs-selector-class { + color: #de935f; +} + +.hljs-emphasis { + font-style: italic; +} + +.hljs-strong { + font-weight: bold; +} diff --git a/docs/21.4.2/css/nucleus.css b/docs/21.4.2/css/nucleus.css new file mode 100644 index 000000000..1897fc5d6 --- /dev/null +++ b/docs/21.4.2/css/nucleus.css @@ -0,0 +1,615 @@ +*, *::before, *::after { + -webkit-box-sizing: border-box; + -moz-box-sizing: border-box; + box-sizing: border-box; } + +@-webkit-viewport { + width: device-width; } +@-moz-viewport { + width: device-width; } +@-ms-viewport { + width: device-width; } +@-o-viewport { + width: device-width; } +@viewport { + width: device-width; } +html { + font-size: 100%; + -ms-text-size-adjust: 100%; + -webkit-text-size-adjust: 100%; } + +body { + margin: 0; } + +article, +aside, +details, +figcaption, +figure, +footer, +header, +hgroup, +main, +nav, +section, +summary { + display: block; } + +audio, +canvas, +progress, +video { + display: inline-block; + vertical-align: baseline; } + +audio:not([controls]) { + display: none; + height: 0; } + +[hidden], +template { + display: none; } + +a { + background: transparent; + text-decoration: none; } + +a:active, +a:hover { + outline: 0; } + +abbr[title] { + border-bottom: 1px dotted; } + +b, +strong { + font-weight: bold; } + +dfn { + font-style: italic; } + +mark { + background: #FFFF27; + color: #333; } + +sub, +sup { + font-size: 0.8rem; + line-height: 0; + position: relative; + vertical-align: baseline; } + +sup { + top: -0.5em; } + +sub { + bottom: -0.25em; } + +img { + border: 0; + max-width: 100%; } + +svg:not(:root) { + overflow: hidden; } + +figure { + margin: 1em 40px; } + +hr { + height: 0; } + +pre { + overflow: auto; } + +button, +input, +optgroup, +select, +textarea { + color: inherit; + font: inherit; + margin: 0; } + +button { + overflow: visible; } + +button, +select { + text-transform: none; } + +button, +html input[type="button"], +input[type="reset"], +input[type="submit"] { + -webkit-appearance: button; + cursor: pointer; } + +button[disabled], +html input[disabled] { + cursor: default; } + +button::-moz-focus-inner, +input::-moz-focus-inner { + border: 0; + padding: 0; } + +input { + line-height: normal; } + +input[type="checkbox"], +input[type="radio"] { + padding: 0; } + +input[type="number"]::-webkit-inner-spin-button, +input[type="number"]::-webkit-outer-spin-button { + height: auto; } + +input[type="search"] { + -webkit-appearance: textfield; } + +input[type="search"]::-webkit-search-cancel-button, +input[type="search"]::-webkit-search-decoration { + -webkit-appearance: none; } + +legend { + border: 0; + padding: 0; } + +textarea { + overflow: auto; } + +optgroup { + font-weight: bold; } + +table { + border-collapse: collapse; + border-spacing: 0; + table-layout: fixed; + width: 100%; } + +tr, td, th { + vertical-align: middle; } + +th, td { + padding: 0.425rem 0; } + +th { + text-align: left; } + +.container { + width: 75em; + margin: 0 auto; + padding: 0; } + @media only all and (min-width: 60em) and (max-width: 74.938em) { + .container { + width: 60em; } } + @media only all and (min-width: 48em) and (max-width: 59.938em) { + .container { + width: 48em; } } + @media only all and (min-width: 30.063em) and (max-width: 47.938em) { + .container { + width: 30em; } } + @media only all and (max-width: 30em) { + .container { + width: 100%; } } + +.grid { + display: -webkit-box; + display: -moz-box; + display: box; + display: -webkit-flex; + display: -moz-flex; + display: -ms-flexbox; + display: flex; + -webkit-flex-flow: row; + -moz-flex-flow: row; + flex-flow: row; + list-style: none; + margin: 0; + padding: 0; } + @media only all and (max-width: 47.938em) { + .grid { + -webkit-flex-flow: row wrap; + -moz-flex-flow: row wrap; + flex-flow: row wrap; } } + +.block { + -webkit-box-flex: 1; + -moz-box-flex: 1; + box-flex: 1; + -webkit-flex: 1; + -moz-flex: 1; + -ms-flex: 1; + flex: 1; + min-width: 0; + min-height: 0; } + @media only all and (max-width: 47.938em) { + .block { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 100%; + -moz-flex: 0 100%; + -ms-flex: 0 100%; + flex: 0 100%; } } + +.content { + margin: 0.625rem; + padding: 0.938rem; } + +@media only all and (max-width: 47.938em) { + body [class*="size-"] { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 100%; + -moz-flex: 0 100%; + -ms-flex: 0 100%; + flex: 0 100%; } } + +.size-1-2 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 50%; + -moz-flex: 0 50%; + -ms-flex: 0 50%; + flex: 0 50%; } + +.size-1-3 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 33.33333%; + -moz-flex: 0 33.33333%; + -ms-flex: 0 33.33333%; + flex: 0 33.33333%; } + +.size-1-4 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 25%; + -moz-flex: 0 25%; + -ms-flex: 0 25%; + flex: 0 25%; } + +.size-1-5 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 20%; + -moz-flex: 0 20%; + -ms-flex: 0 20%; + flex: 0 20%; } + +.size-1-6 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 16.66667%; + -moz-flex: 0 16.66667%; + -ms-flex: 0 16.66667%; + flex: 0 16.66667%; } + +.size-1-7 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 14.28571%; + -moz-flex: 0 14.28571%; + -ms-flex: 0 14.28571%; + flex: 0 14.28571%; } + +.size-1-8 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 12.5%; + -moz-flex: 0 12.5%; + -ms-flex: 0 12.5%; + flex: 0 12.5%; } + +.size-1-9 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 11.11111%; + -moz-flex: 0 11.11111%; + -ms-flex: 0 11.11111%; + flex: 0 11.11111%; } + +.size-1-10 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 10%; + -moz-flex: 0 10%; + -ms-flex: 0 10%; + flex: 0 10%; } + +.size-1-11 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 9.09091%; + -moz-flex: 0 9.09091%; + -ms-flex: 0 9.09091%; + flex: 0 9.09091%; } + +.size-1-12 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 8.33333%; + -moz-flex: 0 8.33333%; + -ms-flex: 0 8.33333%; + flex: 0 8.33333%; } + +@media only all and (min-width: 48em) and (max-width: 59.938em) { + .size-tablet-1-2 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 50%; + -moz-flex: 0 50%; + -ms-flex: 0 50%; + flex: 0 50%; } + + .size-tablet-1-3 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 33.33333%; + -moz-flex: 0 33.33333%; + -ms-flex: 0 33.33333%; + flex: 0 33.33333%; } + + .size-tablet-1-4 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 25%; + -moz-flex: 0 25%; + -ms-flex: 0 25%; + flex: 0 25%; } + + .size-tablet-1-5 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 20%; + -moz-flex: 0 20%; + -ms-flex: 0 20%; + flex: 0 20%; } + + .size-tablet-1-6 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 16.66667%; + -moz-flex: 0 16.66667%; + -ms-flex: 0 16.66667%; + flex: 0 16.66667%; } + + .size-tablet-1-7 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 14.28571%; + -moz-flex: 0 14.28571%; + -ms-flex: 0 14.28571%; + flex: 0 14.28571%; } + + .size-tablet-1-8 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 12.5%; + -moz-flex: 0 12.5%; + -ms-flex: 0 12.5%; + flex: 0 12.5%; } + + .size-tablet-1-9 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 11.11111%; + -moz-flex: 0 11.11111%; + -ms-flex: 0 11.11111%; + flex: 0 11.11111%; } + + .size-tablet-1-10 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 10%; + -moz-flex: 0 10%; + -ms-flex: 0 10%; + flex: 0 10%; } + + .size-tablet-1-11 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 9.09091%; + -moz-flex: 0 9.09091%; + -ms-flex: 0 9.09091%; + flex: 0 9.09091%; } + + .size-tablet-1-12 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 8.33333%; + -moz-flex: 0 8.33333%; + -ms-flex: 0 8.33333%; + flex: 0 8.33333%; } } +@media only all and (max-width: 47.938em) { + @supports not (flex-wrap: wrap) { + .grid { + display: block; + -webkit-box-lines: inherit; + -moz-box-lines: inherit; + box-lines: inherit; + -webkit-flex-wrap: inherit; + -moz-flex-wrap: inherit; + -ms-flex-wrap: inherit; + flex-wrap: inherit; } + + .block { + display: block; + -webkit-box-flex: inherit; + -moz-box-flex: inherit; + box-flex: inherit; + -webkit-flex: inherit; + -moz-flex: inherit; + -ms-flex: inherit; + flex: inherit; } } } +.first-block { + -webkit-box-ordinal-group: 0; + -webkit-order: -1; + -ms-flex-order: -1; + order: -1; } + +.last-block { + -webkit-box-ordinal-group: 2; + -webkit-order: 1; + -ms-flex-order: 1; + order: 1; } + +.fixed-blocks { + -webkit-flex-flow: row wrap; + -moz-flex-flow: row wrap; + flex-flow: row wrap; } + .fixed-blocks .block { + -webkit-box-flex: inherit; + -moz-box-flex: inherit; + box-flex: inherit; + -webkit-flex: inherit; + -moz-flex: inherit; + -ms-flex: inherit; + flex: inherit; + width: 25%; } + @media only all and (min-width: 60em) and (max-width: 74.938em) { + .fixed-blocks .block { + width: 33.33333%; } } + @media only all and (min-width: 48em) and (max-width: 59.938em) { + .fixed-blocks .block { + width: 50%; } } + @media only all and (max-width: 47.938em) { + .fixed-blocks .block { + width: 100%; } } + +body { + font-size: 1.05rem; + line-height: 1.7; } + +h1, h2, h3, h4, h5, h6 { + margin: 0.85rem 0 1.7rem 0; + text-rendering: optimizeLegibility; } + +h1 { + font-size: 3.25rem; } + +h2 { + font-size: 2.55rem; } + +h3 { + font-size: 2.15rem; } + +h4 { + font-size: 1.8rem; } + +h5 { + font-size: 1.4rem; } + +h6 { + font-size: 0.9rem; } + +p { + margin: 1.7rem 0; } + +ul, ol { + margin-top: 1.7rem; + margin-bottom: 1.7rem; } + ul ul, ul ol, ol ul, ol ol { + margin-top: 0; + margin-bottom: 0; } + +blockquote { + margin: 1.7rem 0; + padding-left: 0.85rem; } + +cite { + display: block; + font-size: 0.925rem; } + cite:before { + content: "\2014 \0020"; } + +pre { + margin: 1.7rem 0; + padding: 0.938rem; } + +code { + vertical-align: bottom; } + +small { + font-size: 0.925rem; } + +hr { + border-left: none; + border-right: none; + border-top: none; + margin: 1.7rem 0; } + +fieldset { + border: 0; + padding: 0.938rem; + margin: 0 0 1.7rem 0; } + +input, +label, +select { + display: block; } + +label { + margin-bottom: 0.425rem; } + label.required:after { + content: "*"; } + label abbr { + display: none; } + +textarea, input[type="email"], input[type="number"], input[type="password"], input[type="search"], input[type="tel"], input[type="text"], input[type="url"], input[type="color"], input[type="date"], input[type="datetime"], input[type="datetime-local"], input[type="month"], input[type="time"], input[type="week"], select[multiple=multiple] { + -webkit-transition: border-color; + -moz-transition: border-color; + transition: border-color; + border-radius: 0.1875rem; + margin-bottom: 0.85rem; + padding: 0.425rem 0.425rem; + width: 100%; } + textarea:focus, input[type="email"]:focus, input[type="number"]:focus, input[type="password"]:focus, input[type="search"]:focus, input[type="tel"]:focus, input[type="text"]:focus, input[type="url"]:focus, input[type="color"]:focus, input[type="date"]:focus, input[type="datetime"]:focus, input[type="datetime-local"]:focus, input[type="month"]:focus, input[type="time"]:focus, input[type="week"]:focus, select[multiple=multiple]:focus { + outline: none; } + +textarea { + resize: vertical; } + +input[type="checkbox"], input[type="radio"] { + display: inline; + margin-right: 0.425rem; } + +input[type="file"] { + width: 100%; } + +select { + width: auto; + max-width: 100%; + margin-bottom: 1.7rem; } + +button, +input[type="submit"] { + cursor: pointer; + user-select: none; + vertical-align: middle; + white-space: nowrap; + border: inherit; } diff --git a/docs/21.4.2/css/perfect-scrollbar.min.css b/docs/21.4.2/css/perfect-scrollbar.min.css new file mode 100644 index 000000000..ebd2cb43b --- /dev/null +++ b/docs/21.4.2/css/perfect-scrollbar.min.css @@ -0,0 +1,2 @@ +/* perfect-scrollbar v0.6.13 */ +.ps-container{-ms-touch-action:auto;touch-action:auto;overflow:hidden !important;-ms-overflow-style:none}@supports (-ms-overflow-style: none){.ps-container{overflow:auto !important}}@media screen and (-ms-high-contrast: active), (-ms-high-contrast: none){.ps-container{overflow:auto !important}}.ps-container.ps-active-x>.ps-scrollbar-x-rail,.ps-container.ps-active-y>.ps-scrollbar-y-rail{display:block;background-color:transparent}.ps-container.ps-in-scrolling.ps-x>.ps-scrollbar-x-rail{background-color:#eee;opacity:.9}.ps-container.ps-in-scrolling.ps-x>.ps-scrollbar-x-rail>.ps-scrollbar-x{background-color:#999;height:11px}.ps-container.ps-in-scrolling.ps-y>.ps-scrollbar-y-rail{background-color:#eee;opacity:.9}.ps-container.ps-in-scrolling.ps-y>.ps-scrollbar-y-rail>.ps-scrollbar-y{background-color:#999;width:11px}.ps-container>.ps-scrollbar-x-rail{display:none;position:absolute;opacity:0;-webkit-transition:background-color .2s linear, opacity .2s linear;-o-transition:background-color .2s linear, opacity .2s linear;-moz-transition:background-color .2s linear, opacity .2s linear;transition:background-color .2s linear, opacity .2s linear;bottom:0px;height:15px}.ps-container>.ps-scrollbar-x-rail>.ps-scrollbar-x{position:absolute;background-color:#aaa;-webkit-border-radius:6px;-moz-border-radius:6px;border-radius:6px;-webkit-transition:background-color .2s linear, height .2s linear, width .2s ease-in-out, -webkit-border-radius .2s ease-in-out;transition:background-color .2s linear, height .2s linear, width .2s ease-in-out, -webkit-border-radius .2s ease-in-out;-o-transition:background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out;-moz-transition:background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out, -moz-border-radius .2s ease-in-out;transition:background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out;transition:background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out, -webkit-border-radius .2s ease-in-out, -moz-border-radius .2s ease-in-out;bottom:2px;height:6px}.ps-container>.ps-scrollbar-x-rail:hover>.ps-scrollbar-x,.ps-container>.ps-scrollbar-x-rail:active>.ps-scrollbar-x{height:11px}.ps-container>.ps-scrollbar-y-rail{display:none;position:absolute;opacity:0;-webkit-transition:background-color .2s linear, opacity .2s linear;-o-transition:background-color .2s linear, opacity .2s linear;-moz-transition:background-color .2s linear, opacity .2s linear;transition:background-color .2s linear, opacity .2s linear;right:0;width:15px}.ps-container>.ps-scrollbar-y-rail>.ps-scrollbar-y{position:absolute;background-color:#aaa;-webkit-border-radius:6px;-moz-border-radius:6px;border-radius:6px;-webkit-transition:background-color .2s linear, height .2s linear, width .2s ease-in-out, -webkit-border-radius .2s ease-in-out;transition:background-color .2s linear, height .2s linear, width .2s ease-in-out, -webkit-border-radius .2s ease-in-out;-o-transition:background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out;-moz-transition:background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out, -moz-border-radius .2s ease-in-out;transition:background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out;transition:background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out, -webkit-border-radius .2s ease-in-out, -moz-border-radius .2s ease-in-out;right:2px;width:6px}.ps-container>.ps-scrollbar-y-rail:hover>.ps-scrollbar-y,.ps-container>.ps-scrollbar-y-rail:active>.ps-scrollbar-y{width:11px}.ps-container:hover.ps-in-scrolling.ps-x>.ps-scrollbar-x-rail{background-color:#eee;opacity:.9}.ps-container:hover.ps-in-scrolling.ps-x>.ps-scrollbar-x-rail>.ps-scrollbar-x{background-color:#999;height:11px}.ps-container:hover.ps-in-scrolling.ps-y>.ps-scrollbar-y-rail{background-color:#eee;opacity:.9}.ps-container:hover.ps-in-scrolling.ps-y>.ps-scrollbar-y-rail>.ps-scrollbar-y{background-color:#999;width:11px}.ps-container:hover>.ps-scrollbar-x-rail,.ps-container:hover>.ps-scrollbar-y-rail{opacity:.6}.ps-container:hover>.ps-scrollbar-x-rail:hover{background-color:#eee;opacity:.9}.ps-container:hover>.ps-scrollbar-x-rail:hover>.ps-scrollbar-x{background-color:#999}.ps-container:hover>.ps-scrollbar-y-rail:hover{background-color:#eee;opacity:.9}.ps-container:hover>.ps-scrollbar-y-rail:hover>.ps-scrollbar-y{background-color:#999} diff --git a/docs/21.4.2/css/tags.css b/docs/21.4.2/css/tags.css new file mode 100644 index 000000000..495d2f9f7 --- /dev/null +++ b/docs/21.4.2/css/tags.css @@ -0,0 +1,49 @@ +/* Tags */ + +#head-tags{ + margin-left:1em; + margin-top:1em; +} + +#body .tags a.tag-link { + display: inline-block; + line-height: 2em; + font-size: 0.8em; + position: relative; + margin: 0 16px 8px 0; + padding: 0 10px 0 12px; + background: #8451a1; + + -webkit-border-bottom-right-radius: 3px; + border-bottom-right-radius: 3px; + -webkit-border-top-right-radius: 3px; + border-top-right-radius: 3px; + + -webkit-box-shadow: 0 1px 2px rgba(0,0,0,0.2); + box-shadow: 0 1px 2px rgba(0,0,0,0.2); + color: #fff; +} + +#body .tags a.tag-link:before { + content: ""; + position: absolute; + top:0; + left: -1em; + width: 0; + height: 0; + border-color: transparent #8451a1 transparent transparent; + border-style: solid; + border-width: 1em 1em 1em 0; +} + +#body .tags a.tag-link:after { + content: ""; + position: absolute; + top: 10px; + left: 1px; + width: 5px; + height: 5px; + -webkit-border-radius: 50%; + border-radius: 100%; + background: #fff; +} diff --git a/docs/21.4.2/css/theme-blue.css b/docs/21.4.2/css/theme-blue.css new file mode 100644 index 000000000..9771ae5e3 --- /dev/null +++ b/docs/21.4.2/css/theme-blue.css @@ -0,0 +1,111 @@ + +:root{ + + --MAIN-TEXT-color:#323232; /* Color of text by default */ + --MAIN-TITLES-TEXT-color: #5e5e5e; /* Color of titles h2-h3-h4-h5 */ + --MAIN-LINK-color:#1C90F3; /* Color of links */ + --MAIN-LINK-HOVER-color:#167ad0; /* Color of hovered links */ + --MAIN-ANCHOR-color: #1C90F3; /* color of anchors on titles */ + + --MENU-HEADER-BG-color:#1C90F3; /* Background color of menu header */ + --MENU-HEADER-BORDER-color:#33a1ff; /*Color of menu header border */ + + --MENU-SEARCH-BG-color:#167ad0; /* Search field background color (by default borders + icons) */ + --MENU-SEARCH-BOX-color: #33a1ff; /* Override search field border color */ + --MENU-SEARCH-BOX-ICONS-color: #a1d2fd; /* Override search field icons color */ + + --MENU-SECTIONS-ACTIVE-BG-color:#20272b; /* Background color of the active section and its childs */ + --MENU-SECTIONS-BG-color:#252c31; /* Background color of other sections */ + --MENU-SECTIONS-LINK-color: #ccc; /* Color of links in menu */ + --MENU-SECTIONS-LINK-HOVER-color: #e6e6e6; /* Color of links in menu, when hovered */ + --MENU-SECTION-ACTIVE-CATEGORY-color: #777; /* Color of active category text */ + --MENU-SECTION-ACTIVE-CATEGORY-BG-color: #fff; /* Color of background for the active category (only) */ + + --MENU-VISITED-color: #33a1ff; /* Color of 'page visited' icons in menu */ + --MENU-SECTION-HR-color: #20272b; /* Color of
separator in menu */ + +} + +body { + color: var(--MAIN-TEXT-color) !important; +} + +textarea:focus, input[type="email"]:focus, input[type="number"]:focus, input[type="password"]:focus, input[type="search"]:focus, input[type="tel"]:focus, input[type="text"]:focus, input[type="url"]:focus, input[type="color"]:focus, input[type="date"]:focus, input[type="datetime"]:focus, input[type="datetime-local"]:focus, input[type="month"]:focus, input[type="time"]:focus, input[type="week"]:focus, select[multiple=multiple]:focus { + border-color: none; + box-shadow: none; +} + +h2, h3, h4, h5 { + color: var(--MAIN-TITLES-TEXT-color) !important; +} + +a { + color: var(--MAIN-LINK-color); +} + +.anchor { + color: var(--MAIN-ANCHOR-color); +} + +a:hover { + color: var(--MAIN-LINK-HOVER-color); +} + +#sidebar ul li.visited > a .read-icon { + color: var(--MENU-VISITED-color); +} + +#body a.highlight:after { + display: block; + content: ""; + height: 1px; + width: 0%; + -webkit-transition: width 0.5s ease; + -moz-transition: width 0.5s ease; + -ms-transition: width 0.5s ease; + transition: width 0.5s ease; + background-color: var(--MAIN-LINK-HOVER-color); +} +#sidebar { + background-color: var(--MENU-SECTIONS-BG-color); +} +#sidebar #header-wrapper { + background: var(--MENU-HEADER-BG-color); + color: var(--MENU-SEARCH-BOX-color); + border-color: var(--MENU-HEADER-BORDER-color); +} +#sidebar .searchbox { + border-color: var(--MENU-SEARCH-BOX-color); + background: var(--MENU-SEARCH-BG-color); +} +#sidebar ul.topics > li.parent, #sidebar ul.topics > li.active { + background: var(--MENU-SECTIONS-ACTIVE-BG-color); +} +#sidebar .searchbox * { + color: var(--MENU-SEARCH-BOX-ICONS-color); +} + +#sidebar a { + color: var(--MENU-SECTIONS-LINK-color); +} + +#sidebar a:hover { + color: var(--MENU-SECTIONS-LINK-HOVER-color); +} + +#sidebar ul li.active > a { + background: var(--MENU-SECTION-ACTIVE-CATEGORY-BG-color); + color: var(--MENU-SECTION-ACTIVE-CATEGORY-color) !important; +} + +#sidebar hr { + border-color: var(--MENU-SECTION-HR-color); +} + +#body .tags a.tag-link { + background-color: var(--MENU-HEADER-BG-color); +} + +#body .tags a.tag-link:before { + border-right-color: var(--MENU-HEADER-BG-color); +} \ No newline at end of file diff --git a/docs/21.4.2/css/theme-green.css b/docs/21.4.2/css/theme-green.css new file mode 100644 index 000000000..3b0b1f721 --- /dev/null +++ b/docs/21.4.2/css/theme-green.css @@ -0,0 +1,111 @@ + +:root{ + + --MAIN-TEXT-color:#323232; /* Color of text by default */ + --MAIN-TITLES-TEXT-color: #5e5e5e; /* Color of titles h2-h3-h4-h5 */ + --MAIN-LINK-color:#599a3e; /* Color of links */ + --MAIN-LINK-HOVER-color:#3f6d2c; /* Color of hovered links */ + --MAIN-ANCHOR-color: #599a3e; /* color of anchors on titles */ + + --MENU-HEADER-BG-color:#74b559; /* Background color of menu header */ + --MENU-HEADER-BORDER-color:#9cd484; /*Color of menu header border */ + + --MENU-SEARCH-BG-color:#599a3e; /* Search field background color (by default borders + icons) */ + --MENU-SEARCH-BOX-color: #84c767; /* Override search field border color */ + --MENU-SEARCH-BOX-ICONS-color: #c7f7c4; /* Override search field icons color */ + + --MENU-SECTIONS-ACTIVE-BG-color:#1b211c; /* Background color of the active section and its childs */ + --MENU-SECTIONS-BG-color:#222723; /* Background color of other sections */ + --MENU-SECTIONS-LINK-color: #ccc; /* Color of links in menu */ + --MENU-SECTIONS-LINK-HOVER-color: #e6e6e6; /* Color of links in menu, when hovered */ + --MENU-SECTION-ACTIVE-CATEGORY-color: #777; /* Color of active category text */ + --MENU-SECTION-ACTIVE-CATEGORY-BG-color: #fff; /* Color of background for the active category (only) */ + + --MENU-VISITED-color: #599a3e; /* Color of 'page visited' icons in menu */ + --MENU-SECTION-HR-color: #18211c; /* Color of
separator in menu */ + +} + +body { + color: var(--MAIN-TEXT-color) !important; +} + +textarea:focus, input[type="email"]:focus, input[type="number"]:focus, input[type="password"]:focus, input[type="search"]:focus, input[type="tel"]:focus, input[type="text"]:focus, input[type="url"]:focus, input[type="color"]:focus, input[type="date"]:focus, input[type="datetime"]:focus, input[type="datetime-local"]:focus, input[type="month"]:focus, input[type="time"]:focus, input[type="week"]:focus, select[multiple=multiple]:focus { + border-color: none; + box-shadow: none; +} + +h2, h3, h4, h5 { + color: var(--MAIN-TITLES-TEXT-color) !important; +} + +a { + color: var(--MAIN-LINK-color); +} + +.anchor { + color: var(--MAIN-ANCHOR-color); +} + +a:hover { + color: var(--MAIN-LINK-HOVER-color); +} + +#sidebar ul li.visited > a .read-icon { + color: var(--MENU-VISITED-color); +} + +#body a.highlight:after { + display: block; + content: ""; + height: 1px; + width: 0%; + -webkit-transition: width 0.5s ease; + -moz-transition: width 0.5s ease; + -ms-transition: width 0.5s ease; + transition: width 0.5s ease; + background-color: var(--MAIN-LINK-HOVER-color); +} +#sidebar { + background-color: var(--MENU-SECTIONS-BG-color); +} +#sidebar #header-wrapper { + background: var(--MENU-HEADER-BG-color); + color: var(--MENU-SEARCH-BOX-color); + border-color: var(--MENU-HEADER-BORDER-color); +} +#sidebar .searchbox { + border-color: var(--MENU-SEARCH-BOX-color); + background: var(--MENU-SEARCH-BG-color); +} +#sidebar ul.topics > li.parent, #sidebar ul.topics > li.active { + background: var(--MENU-SECTIONS-ACTIVE-BG-color); +} +#sidebar .searchbox * { + color: var(--MENU-SEARCH-BOX-ICONS-color); +} + +#sidebar a { + color: var(--MENU-SECTIONS-LINK-color); +} + +#sidebar a:hover { + color: var(--MENU-SECTIONS-LINK-HOVER-color); +} + +#sidebar ul li.active > a { + background: var(--MENU-SECTION-ACTIVE-CATEGORY-BG-color); + color: var(--MENU-SECTION-ACTIVE-CATEGORY-color) !important; +} + +#sidebar hr { + border-color: var(--MENU-SECTION-HR-color); +} + +#body .tags a.tag-link { + background-color: var(--MENU-HEADER-BG-color); +} + +#body .tags a.tag-link:before { + border-right-color: var(--MENU-HEADER-BG-color); +} \ No newline at end of file diff --git a/docs/21.4.2/css/theme-red.css b/docs/21.4.2/css/theme-red.css new file mode 100644 index 000000000..36c9278e5 --- /dev/null +++ b/docs/21.4.2/css/theme-red.css @@ -0,0 +1,111 @@ + +:root{ + + --MAIN-TEXT-color:#323232; /* Color of text by default */ + --MAIN-TITLES-TEXT-color: #5e5e5e; /* Color of titles h2-h3-h4-h5 */ + --MAIN-LINK-color:#f31c1c; /* Color of links */ + --MAIN-LINK-HOVER-color:#d01616; /* Color of hovered links */ + --MAIN-ANCHOR-color: #f31c1c; /* color of anchors on titles */ + + --MENU-HEADER-BG-color:#dc1010; /* Background color of menu header */ + --MENU-HEADER-BORDER-color:#e23131; /*Color of menu header border */ + + --MENU-SEARCH-BG-color:#b90000; /* Search field background color (by default borders + icons) */ + --MENU-SEARCH-BOX-color: #ef2020; /* Override search field border color */ + --MENU-SEARCH-BOX-ICONS-color: #fda1a1; /* Override search field icons color */ + + --MENU-SECTIONS-ACTIVE-BG-color:#2b2020; /* Background color of the active section and its childs */ + --MENU-SECTIONS-BG-color:#312525; /* Background color of other sections */ + --MENU-SECTIONS-LINK-color: #ccc; /* Color of links in menu */ + --MENU-SECTIONS-LINK-HOVER-color: #e6e6e6; /* Color of links in menu, when hovered */ + --MENU-SECTION-ACTIVE-CATEGORY-color: #777; /* Color of active category text */ + --MENU-SECTION-ACTIVE-CATEGORY-BG-color: #fff; /* Color of background for the active category (only) */ + + --MENU-VISITED-color: #ff3333; /* Color of 'page visited' icons in menu */ + --MENU-SECTION-HR-color: #2b2020; /* Color of
separator in menu */ + +} + +body { + color: var(--MAIN-TEXT-color) !important; +} + +textarea:focus, input[type="email"]:focus, input[type="number"]:focus, input[type="password"]:focus, input[type="search"]:focus, input[type="tel"]:focus, input[type="text"]:focus, input[type="url"]:focus, input[type="color"]:focus, input[type="date"]:focus, input[type="datetime"]:focus, input[type="datetime-local"]:focus, input[type="month"]:focus, input[type="time"]:focus, input[type="week"]:focus, select[multiple=multiple]:focus { + border-color: none; + box-shadow: none; +} + +h2, h3, h4, h5 { + color: var(--MAIN-TITLES-TEXT-color) !important; +} + +a { + color: var(--MAIN-LINK-color); +} + +.anchor { + color: var(--MAIN-ANCHOR-color); +} + +a:hover { + color: var(--MAIN-LINK-HOVER-color); +} + +#sidebar ul li.visited > a .read-icon { + color: var(--MENU-VISITED-color); +} + +#body a.highlight:after { + display: block; + content: ""; + height: 1px; + width: 0%; + -webkit-transition: width 0.5s ease; + -moz-transition: width 0.5s ease; + -ms-transition: width 0.5s ease; + transition: width 0.5s ease; + background-color: var(--MAIN-LINK-HOVER-color); +} +#sidebar { + background-color: var(--MENU-SECTIONS-BG-color); +} +#sidebar #header-wrapper { + background: var(--MENU-HEADER-BG-color); + color: var(--MENU-SEARCH-BOX-color); + border-color: var(--MENU-HEADER-BORDER-color); +} +#sidebar .searchbox { + border-color: var(--MENU-SEARCH-BOX-color); + background: var(--MENU-SEARCH-BG-color); +} +#sidebar ul.topics > li.parent, #sidebar ul.topics > li.active { + background: var(--MENU-SECTIONS-ACTIVE-BG-color); +} +#sidebar .searchbox * { + color: var(--MENU-SEARCH-BOX-ICONS-color); +} + +#sidebar a { + color: var(--MENU-SECTIONS-LINK-color); +} + +#sidebar a:hover { + color: var(--MENU-SECTIONS-LINK-HOVER-color); +} + +#sidebar ul li.active > a { + background: var(--MENU-SECTION-ACTIVE-CATEGORY-BG-color); + color: var(--MENU-SECTION-ACTIVE-CATEGORY-color) !important; +} + +#sidebar hr { + border-color: var(--MENU-SECTION-HR-color); +} + +#body .tags a.tag-link { + background-color: var(--MENU-HEADER-BG-color); +} + +#body .tags a.tag-link:before { + border-right-color: var(--MENU-HEADER-BG-color); +} \ No newline at end of file diff --git a/docs/21.4.2/css/theme.css b/docs/21.4.2/css/theme.css new file mode 100644 index 000000000..9b4550457 --- /dev/null +++ b/docs/21.4.2/css/theme.css @@ -0,0 +1,1141 @@ +@charset "UTF-8"; + +/* Tags */ +@import "tags.css"; + +#top-github-link, #body #breadcrumbs { + position: relative; + top: 50%; + -webkit-transform: translateY(-50%); + -moz-transform: translateY(-50%); + -o-transform: translateY(-50%); + -ms-transform: translateY(-50%); + transform: translateY(-50%); +} +.button, .button-secondary { + display: inline-block; + padding: 7px 12px; +} +.button:active, .button-secondary:active { + margin: 2px 0 -2px 0; +} +@font-face { + font-family: 'Novacento Sans Wide'; + src: url("../fonts/Novecentosanswide-UltraLight-webfont.eot"); + src: url("../fonts/Novecentosanswide-UltraLight-webfont.eot?#iefix") format("embedded-opentype"), url("../fonts/Novecentosanswide-UltraLight-webfont.woff2") format("woff2"), url("../fonts/Novecentosanswide-UltraLight-webfont.woff") format("woff"), url("../fonts/Novecentosanswide-UltraLight-webfont.ttf") format("truetype"), url("../fonts/Novecentosanswide-UltraLight-webfont.svg#novecento_sans_wideultralight") format("svg"); + font-style: normal; + font-weight: 200; +} +@font-face { + font-family: 'Work Sans'; + font-style: normal; + font-weight: 300; + src: url("../fonts/Work_Sans_300.eot?#iefix") format("embedded-opentype"), url("../fonts/Work_Sans_300.woff") format("woff"), url("../fonts/Work_Sans_300.woff2") format("woff2"), url("../fonts/Work_Sans_300.svg#WorkSans") format("svg"), url("../fonts/Work_Sans_300.ttf") format("truetype"); +} +@font-face { + font-family: 'Work Sans'; + font-style: normal; + font-weight: 500; + src: url("../fonts/Work_Sans_500.eot?#iefix") format("embedded-opentype"), url("../fonts/Work_Sans_500.woff") format("woff"), url("../fonts/Work_Sans_500.woff2") format("woff2"), url("../fonts/Work_Sans_500.svg#WorkSans") format("svg"), url("../fonts/Work_Sans_500.ttf") format("truetype"); +} +body { + background: #fff; + color: #777; +} +body #chapter h1 { + font-size: 3.5rem; +} +@media only all and (min-width: 48em) and (max-width: 59.938em) { + body #chapter h1 { + font-size: 3rem; + } +} +@media only all and (max-width: 47.938em) { + body #chapter h1 { + font-size: 2rem; + } +} +a { + color: #00bdf3; +} +a:hover { + color: #0082a7; +} +pre { + position: relative; + color: #ffffff; +} +.bg { + background: #fff; + border: 1px solid #eaeaea; +} +b, strong, label, th { + font-weight: 600; +} +.default-animation, #header #logo-svg, #header #logo-svg path, #sidebar, #sidebar ul, #body, #body .padding, #body .nav { + -webkit-transition: all 0.5s ease; + -moz-transition: all 0.5s ease; + transition: all 0.5s ease; +} +#grav-logo { + max-width: 60%; +} +#grav-logo path { + fill: #fff !important; +} +#sidebar { + font-weight: 300 !important; +} +fieldset { + border: 1px solid #ddd; +} +textarea, input[type="email"], input[type="number"], input[type="password"], input[type="search"], input[type="tel"], input[type="text"], input[type="url"], input[type="color"], input[type="date"], input[type="datetime"], input[type="datetime-local"], input[type="month"], input[type="time"], input[type="week"], select[multiple=multiple] { + background-color: white; + border: 1px solid #ddd; + box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.06); +} +textarea:hover, input[type="email"]:hover, input[type="number"]:hover, input[type="password"]:hover, input[type="search"]:hover, input[type="tel"]:hover, input[type="text"]:hover, input[type="url"]:hover, input[type="color"]:hover, input[type="date"]:hover, input[type="datetime"]:hover, input[type="datetime-local"]:hover, input[type="month"]:hover, input[type="time"]:hover, input[type="week"]:hover, select[multiple=multiple]:hover { + border-color: #c4c4c4; +} +textarea:focus, input[type="email"]:focus, input[type="number"]:focus, input[type="password"]:focus, input[type="search"]:focus, input[type="tel"]:focus, input[type="text"]:focus, input[type="url"]:focus, input[type="color"]:focus, input[type="date"]:focus, input[type="datetime"]:focus, input[type="datetime-local"]:focus, input[type="month"]:focus, input[type="time"]:focus, input[type="week"]:focus, select[multiple=multiple]:focus { + border-color: #00bdf3; + box-shadow: inset 0 1px 3px rgba(0,0,0,.06),0 0 5px rgba(0,169,218,.7) +} +#header-wrapper { + background: #8451a1; + color: #fff; + text-align: center; + border-bottom: 4px solid #9c6fb6; + padding: 1rem; +} +#header a { + display: inline-block; +} +#header #logo-svg { + width: 8rem; + height: 2rem; +} +#header #logo-svg path { + fill: #fff; +} +.searchbox { + margin-top: 1rem; + position: relative; + border: 1px solid #915eae; + background: #764890; + border-radius: 4px; +} +.searchbox label { + color: rgba(255, 255, 255, 0.8); + position: absolute; + left: 10px; + top: 3px; +} +.searchbox span { + color: rgba(255, 255, 255, 0.6); + position: absolute; + right: 10px; + top: 3px; + cursor: pointer; +} +.searchbox span:hover { + color: rgba(255, 255, 255, 0.9); +} +.searchbox input { + display: inline-block; + color: #fff; + width: 100%; + height: 30px; + background: transparent; + border: 0; + padding: 0 25px 0 30px; + margin: 0; + font-weight: 300; +} +.searchbox input::-webkit-input-placeholder { + color: rgba(255, 255, 255, 0.6); +} +.searchbox input::-moz-placeholder { + color: rgba(255, 255, 255, 0.6); +} +.searchbox input:-moz-placeholder { + color: rgba(255, 255, 255, 0.6); +} +.searchbox input:-ms-input-placeholder { + color: rgba(255, 255, 255, 0.6); +} +#sidebar-toggle-span { + display: none; +} +@media only all and (max-width: 47.938em) { + #sidebar-toggle-span { + display: inline; + } +} +#sidebar { + background-color: #322A38; + position: fixed; + top: 0; + width: 300px; + bottom: 0; + left: 0; + font-weight: 400; + font-size: 15px; +} +#sidebar a { + color: #ccc; +} +#sidebar a:hover { + color: #e6e6e6; +} +#sidebar a.subtitle { + color: rgba(204, 204, 204, 0.6); +} +#sidebar hr { + border-bottom: 1px solid #2a232f; +} +#sidebar a.padding { + padding: 0 1rem; +} +#sidebar h5 { + margin: 2rem 0 0; + position: relative; + line-height: 2; +} +#sidebar h5 a { + display: block; + margin-left: 0; + margin-right: 0; + padding-left: 1rem; + padding-right: 1rem; +} +#sidebar h5 i { + color: rgba(204, 204, 204, 0.6); + position: absolute; + right: 0.6rem; + top: 0.7rem; + font-size: 80%; +} +#sidebar h5.parent a { + background: #201b24; + color: #d9d9d9 !important; +} +#sidebar h5.active a { + background: #fff; + color: #777 !important; +} +#sidebar h5.active i { + color: #777 !important; +} +#sidebar h5 + ul.topics { + display: none; + margin-top: 0; +} +#sidebar h5.parent + ul.topics, #sidebar h5.active + ul.topics { + display: block; +} +#sidebar ul { + list-style: none; + padding: 0; + margin: 0; +} +#sidebar ul.searched a { + color: #999999; +} +#sidebar ul.searched .search-match a { + color: #e6e6e6; +} +#sidebar ul.searched .search-match a:hover { + color: white; +} +#sidebar ul.topics { + margin: 0 1rem; +} +#sidebar ul.topics.searched ul { + display: block; +} +#sidebar ul.topics ul { + display: none; + padding-bottom: 1rem; +} +#sidebar ul.topics ul ul { + padding-bottom: 0; +} +#sidebar ul.topics li.parent ul, #sidebar ul.topics > li.active ul { + display: block; +} +#sidebar ul.topics > li > a { + line-height: 2rem; + font-size: 1.1rem; +} +#sidebar ul.topics > li > a b { + opacity: 0.5; + font-weight: normal; +} +#sidebar ul.topics > li > a .fa { + margin-top: 9px; +} +#sidebar ul.topics > li.parent, #sidebar ul.topics > li.active { + background: #251f29; + margin-left: -1rem; + margin-right: -1rem; + padding-left: 1rem; + padding-right: 1rem; +} +#sidebar ul li.active > a { + background: #fff; + color: #777 !important; + margin-left: -1rem; + margin-right: -1rem; + padding-left: 1rem; + padding-right: 1rem; +} +#sidebar ul li { + padding: 0; +} +#sidebar ul li.visited + span { + margin-right: 16px; +} +#sidebar ul li a { + display: block; + padding: 2px 0; +} +#sidebar ul li a span { + text-overflow: ellipsis; + overflow: hidden; + white-space: nowrap; + display: block; +} +#sidebar ul li > a { + padding: 4px 0; +} +#sidebar ul li.visited > a .read-icon { + color: #9c6fb6; + display: inline; +} +#sidebar ul li li { + padding-left: 1rem; + text-indent: 0.2rem; +} +#main { + background: #f7f7f7; + margin: 0 0 1.563rem 0; +} +#body { + position: relative; + margin-left: 300px; + min-height: 100%; +} +#body img, #body .video-container { + margin: 3rem auto; + display: block; + text-align: center; +} +#body img.border, #body .video-container.border { + border: 2px solid #e6e6e6 !important; + padding: 2px; +} +#body img.shadow, #body .video-container.shadow { + box-shadow: 0 10px 30px rgba(0, 0, 0, 0.1); +} +#body img.inline { + display: inline !important; + margin: 0 !important; + vertical-align: bottom; +} +#body .bordered { + border: 1px solid #ccc; +} +#body .padding { + padding: 3rem 6rem; +} +@media only all and (max-width: 59.938em) { + #body .padding { + position: static; + padding: 15px 3rem; + } +} +@media only all and (max-width: 47.938em) { + #body .padding { + padding: 5px 1rem; + } +} +#body h1 + hr { + margin-top: -1.7rem; + margin-bottom: 3rem; +} +@media only all and (max-width: 59.938em) { + #body #navigation { + position: static; + margin-right: 0 !important; + width: 100%; + display: table; + } +} +#body .nav { + position: fixed; + top: 0; + bottom: 0; + width: 4rem; + font-size: 50px; + height: 100%; + cursor: pointer; + display: table; + text-align: center; +} +#body .nav > i { + display: table-cell; + vertical-align: middle; + text-align: center; +} +@media only all and (max-width: 59.938em) { + #body .nav { + display: table-cell; + position: static; + top: auto; + width: 50%; + text-align: center; + height: 100px; + line-height: 100px; + padding-top: 0; + } + #body .nav > i { + display: inline-block; + } +} +#body .nav:hover { + background: #F6F6F6; +} +#body .nav.nav-pref { + left: 0; +} +#body .nav.nav-next { + right: 0; +} +#body-inner { + margin-bottom: 5rem; +} +#chapter { + display: flex; + align-items: center; + justify-content: center; + height: 100%; + padding: 2rem 0; +} +#chapter #body-inner { + padding-bottom: 3rem; + max-width: 80%; +} +#chapter h3 { + font-family: "Work Sans", "Helvetica", "Tahoma", "Geneva", "Arial", sans-serif; + font-weight: 300; + text-align: center; +} +#chapter h1 { + font-size: 5rem; + border-bottom: 4px solid #F0F2F4; +} +#chapter p { + text-align: center; + font-size: 1.2rem; +} +#footer { + padding: 3rem 1rem; + color: #b3b3b3; + font-size: 13px; +} +#footer p { + margin: 0; +} +body { + font-family: "Work Sans", "Helvetica", "Tahoma", "Geneva", "Arial", sans-serif; + font-weight: 300; + line-height: 1.6; + font-size: 18px !important; +} +h2, h3, h4, h5, h6 { + font-family: "Work Sans", "Helvetica", "Tahoma", "Geneva", "Arial", sans-serif; + text-rendering: optimizeLegibility; + color: #5e5e5e; + font-weight: 400; + letter-spacing: -1px; +} +h1 { + font-family: "Novacento Sans Wide", "Helvetica", "Tahoma", "Geneva", "Arial", sans-serif; + text-align: center; + text-transform: uppercase; + color: #222; + font-weight: 200; +} +blockquote { + border-left: 10px solid #F0F2F4; +} +blockquote p { + font-size: 1.1rem; + color: #999; +} +blockquote cite { + display: block; + text-align: right; + color: #666; + font-size: 1.2rem; +} +div.notices { + margin: 2rem 0; + position: relative; +} +div.notices p { + padding: 15px; + display: block; + font-size: 1rem; + margin-top: 0rem; + margin-bottom: 0rem; + color: #666; +} +div.notices p:first-child:before { + position: absolute; + top: 2px; + color: #fff; + font-family: "Font Awesome 5 Free"; + font-weight: 900; + content: "\f06a"; + left: 10px; +} +div.notices p:first-child:after { + position: absolute; + top: 2px; + color: #fff; + left: 2rem; +} +div.notices.info p { + border-top: 30px solid #F0B37E; + background: #FFF2DB; +} +div.notices.info p:first-child:after { + content: 'Info'; +} +div.notices.warning p { + border-top: 30px solid rgba(217, 83, 79, 0.8); + background: #FAE2E2; +} +div.notices.warning p:first-child:after { + content: 'Warning'; +} +div.notices.note p { + border-top: 30px solid #6AB0DE; + background: #E7F2FA; +} +div.notices.note p:first-child:after { + content: 'Note'; +} +div.notices.tip p { + border-top: 30px solid rgba(92, 184, 92, 0.8); + background: #E6F9E6; +} +div.notices.tip p:first-child:after { + content: 'Tip'; +} + +/* attachments shortcode */ + +section.attachments { + margin: 2rem 0; + position: relative; +} + +section.attachments label { + font-weight: 400; + padding-left: 0.5em; + padding-top: 0.2em; + padding-bottom: 0.2em; + margin: 0; +} + +section.attachments .attachments-files { + padding: 15px; + display: block; + font-size: 1rem; + margin-top: 0rem; + margin-bottom: 0rem; + color: #666; +} + +section.attachments.orange label { + color: #fff; + background: #F0B37E; +} + +section.attachments.orange .attachments-files { + background: #FFF2DB; +} + +section.attachments.green label { + color: #fff; + background: rgba(92, 184, 92, 0.8); +} + +section.attachments.green .attachments-files { + background: #E6F9E6; +} + +section.attachments.blue label { + color: #fff; + background: #6AB0DE; +} + +section.attachments.blue .attachments-files { + background: #E7F2FA; +} + +section.attachments.grey label { + color: #fff; + background: #505d65; +} + +section.attachments.grey .attachments-files { + background: #f4f4f4; +} + +/* Children shortcode */ + +/* Children shortcode */ +.children p { + font-size: small; + margin-top: 0px; + padding-top: 0px; + margin-bottom: 0px; + padding-bottom: 0px; +} +.children-li p { + font-size: small; + font-style: italic; + +} +.children-h2 p, .children-h3 p { + font-size: small; + margin-top: 0px; + padding-top: 0px; + margin-bottom: 0px; + padding-bottom: 0px; +} +.children h3,.children h2 { + margin-bottom: 0px; + margin-top: 5px; +} + +code, kbd, pre, samp { + font-family: "Consolas", menlo, monospace; + font-size: 92%; +} +code { + border-radius: 2px; + white-space: nowrap; + color: #5e5e5e; + background: #FFF7DD; + border: 1px solid #fbf0cb; + padding: 0px 2px; +} +code + .copy-to-clipboard { + margin-left: -1px; + border-left: 0 !important; + font-size: inherit !important; + vertical-align: middle; + height: 21px; + top: 0; +} +pre { + padding: 1rem; + margin: 2rem 0; + background: #282c34; + border: 0; + border-radius: 2px; + line-height: 1.15; +} +pre code { + color: whitesmoke; + background: inherit; + white-space: inherit; + border: 0; + padding: 0; + margin: 0; + font-size: 15px; +} +hr { + border-bottom: 4px solid #F0F2F4; +} +.page-title { + margin-top: -25px; + padding: 25px; + float: left; + clear: both; + background: #9c6fb6; + color: #fff; +} +#body a.anchor-link { + color: #ccc; +} +#body a.anchor-link:hover { + color: #9c6fb6; +} +#body-inner .tabs-wrapper.ui-theme-badges { + background: #1d1f21; +} +#body-inner .tabs-wrapper.ui-theme-badges .tabs-nav li { + font-size: 0.9rem; + text-transform: uppercase; +} +#body-inner .tabs-wrapper.ui-theme-badges .tabs-nav li a { + background: #35393c; +} +#body-inner .tabs-wrapper.ui-theme-badges .tabs-nav li.current a { + background: #4d5257; +} +#body-inner pre { + white-space: pre-wrap; +} +.tabs-wrapper pre { + margin: 1rem 0; + border: 0; + padding: 0; + background: inherit; +} +table { + border: 1px solid #eaeaea; + table-layout: auto; +} +th { + background: #f7f7f7; + padding: 0.5rem; +} +td { + padding: 0.5rem; + border: 1px solid #eaeaea; +} +.button { + background: #9c6fb6; + color: #fff; + box-shadow: 0 3px 0 #00a5d4; +} +.button:hover { + background: #00a5d4; + box-shadow: 0 3px 0 #008db6; + color: #fff; +} +.button:active { + box-shadow: 0 1px 0 #008db6; +} +.button-secondary { + background: #F8B450; + color: #fff; + box-shadow: 0 3px 0 #f7a733; +} +.button-secondary:hover { + background: #f7a733; + box-shadow: 0 3px 0 #f69b15; + color: #fff; +} +.button-secondary:active { + box-shadow: 0 1px 0 #f69b15; +} +.bullets { + margin: 1.7rem 0; + margin-left: -0.85rem; + margin-right: -0.85rem; + overflow: auto; +} +.bullet { + float: left; + padding: 0 0.85rem; +} +.two-column-bullet { + width: 50%; +} +@media only all and (max-width: 47.938em) { + .two-column-bullet { + width: 100%; + } +} +.three-column-bullet { + width: 33.33333%; +} +@media only all and (max-width: 47.938em) { + .three-column-bullet { + width: 100%; + } +} +.four-column-bullet { + width: 25%; +} +@media only all and (max-width: 47.938em) { + .four-column-bullet { + width: 100%; + } +} +.bullet-icon { + float: left; + background: #9c6fb6; + padding: 0.875rem; + width: 3.5rem; + height: 3.5rem; + border-radius: 50%; + color: #fff; + font-size: 1.75rem; + text-align: center; +} +.bullet-icon-1 { + background: #9c6fb6; +} +.bullet-icon-2 { + background: #00f3d8; +} +.bullet-icon-3 { + background: #e6f300; +} +.bullet-content { + margin-left: 4.55rem; +} +.tooltipped { + position: relative; +} +.tooltipped:after { + position: absolute; + z-index: 1000000; + display: none; + padding: 5px 8px; + font: normal normal 11px/1.5 "Work Sans", "Helvetica", "Tahoma", "Geneva", "Arial", sans-serif; + color: #fff; + text-align: center; + text-decoration: none; + text-shadow: none; + text-transform: none; + letter-spacing: normal; + word-wrap: break-word; + white-space: pre; + pointer-events: none; + content: attr(aria-label); + background: rgba(0, 0, 0, 0.8); + border-radius: 3px; + -webkit-font-smoothing: subpixel-antialiased; +} +.tooltipped:before { + position: absolute; + z-index: 1000001; + display: none; + width: 0; + height: 0; + color: rgba(0, 0, 0, 0.8); + pointer-events: none; + content: ""; + border: 5px solid transparent; +} +.tooltipped:hover:before, .tooltipped:hover:after, .tooltipped:active:before, .tooltipped:active:after, .tooltipped:focus:before, .tooltipped:focus:after { + display: inline-block; + text-decoration: none; +} +.tooltipped-s:after, .tooltipped-se:after, .tooltipped-sw:after { + top: 100%; + right: 50%; + margin-top: 5px; +} +.tooltipped-s:before, .tooltipped-se:before, .tooltipped-sw:before { + top: auto; + right: 50%; + bottom: -5px; + margin-right: -5px; + border-bottom-color: rgba(0, 0, 0, 0.8); +} +.tooltipped-se:after { + right: auto; + left: 50%; + margin-left: -15px; +} +.tooltipped-sw:after { + margin-right: -15px; +} +.tooltipped-n:after, .tooltipped-ne:after, .tooltipped-nw:after { + right: 50%; + bottom: 100%; + margin-bottom: 5px; +} +.tooltipped-n:before, .tooltipped-ne:before, .tooltipped-nw:before { + top: -5px; + right: 50%; + bottom: auto; + margin-right: -5px; + border-top-color: rgba(0, 0, 0, 0.8); +} +.tooltipped-ne:after { + right: auto; + left: 50%; + margin-left: -15px; +} +.tooltipped-nw:after { + margin-right: -15px; +} +.tooltipped-s:after, .tooltipped-n:after { + transform: translateX(50%); +} +.tooltipped-w:after { + right: 100%; + bottom: 50%; + margin-right: 5px; + transform: translateY(50%); +} +.tooltipped-w:before { + top: 50%; + bottom: 50%; + left: -5px; + margin-top: -5px; + border-left-color: rgba(0, 0, 0, 0.8); +} +.tooltipped-e:after { + bottom: 50%; + left: 100%; + margin-left: 5px; + transform: translateY(50%); +} +.tooltipped-e:before { + top: 50%; + right: -5px; + bottom: 50%; + margin-top: -5px; + border-right-color: rgba(0, 0, 0, 0.8); +} +.highlightable { + padding: 1rem 0 1rem; + overflow: auto; + position: relative; +} +.hljs::selection, .hljs span::selection { + background: #b7b7b7; +} +.lightbox-active #body { + overflow: visible; +} +.lightbox-active #body .padding { + overflow: visible; +} +#github-contrib i { + vertical-align: middle; +} +.featherlight img { + margin: 0 !important; +} +.lifecycle #body-inner ul { + list-style: none; + margin: 0; + padding: 2rem 0 0; + position: relative; +} +.lifecycle #body-inner ol { + margin: 1rem 0 1rem 0; + padding: 2rem; + position: relative; +} +.lifecycle #body-inner ol li { + margin-left: 1rem; +} +.lifecycle #body-inner ol strong, .lifecycle #body-inner ol label, .lifecycle #body-inner ol th { + text-decoration: underline; +} +.lifecycle #body-inner ol ol { + margin-left: -1rem; +} +.lifecycle #body-inner h3[class*='level'] { + font-size: 20px; + position: absolute; + margin: 0; + padding: 4px 10px; + right: 0; + z-index: 1000; + color: #fff; + background: #1ABC9C; +} +.lifecycle #body-inner ol h3 { + margin-top: 1rem !important; + right: 2rem !important; +} +.lifecycle #body-inner .level-1 + ol { + background: #f6fefc; + border: 4px solid #1ABC9C; + color: #16A085; +} +.lifecycle #body-inner .level-1 + ol h3 { + background: #2ECC71; +} +.lifecycle #body-inner .level-2 + ol { + background: #f7fdf9; + border: 4px solid #2ECC71; + color: #27AE60; +} +.lifecycle #body-inner .level-2 + ol h3 { + background: #3498DB; +} +.lifecycle #body-inner .level-3 + ol { + background: #f3f9fd; + border: 4px solid #3498DB; + color: #2980B9; +} +.lifecycle #body-inner .level-3 + ol h3 { + background: #34495E; +} +.lifecycle #body-inner .level-4 + ol { + background: #e4eaf0; + border: 4px solid #34495E; + color: #2C3E50; +} +.lifecycle #body-inner .level-4 + ol h3 { + background: #34495E; +} +#top-bar { + background: #F6F6F6; + border-radius: 2px; + padding: 0 1rem; + height: 0; + min-height: 3rem; +} +#top-github-link { + position: relative; + z-index: 1; + float: right; + display: block; +} +#body #breadcrumbs { + height: auto; + margin-bottom: 0; + padding-left: 0; + line-height: 1.4; + overflow: hidden; + white-space: nowrap; + text-overflow: ellipsis; + width: 70%; + display: inline-block; + float: left; +} +#body #breadcrumbs span { + padding: 0 0.1rem; +} +@media only all and (max-width: 59.938em) { + #sidebar { + width: 230px; + } + #body { + margin-left: 230px; + } +} +@media only all and (max-width: 47.938em) { + #sidebar { + width: 230px; + left: -230px; + } + #body { + margin-left: 0; + width: 100%; + } + .sidebar-hidden { + overflow: hidden; + } + .sidebar-hidden #sidebar { + left: 0; + } + .sidebar-hidden #body { + margin-left: 230px; + overflow: hidden; + } + .sidebar-hidden #overlay { + position: absolute; + left: 0; + right: 0; + top: 0; + bottom: 0; + z-index: 10; + background: rgba(255, 255, 255, 0.5); + cursor: pointer; + } +} +.copy-to-clipboard { + background-image: url(../images/clippy.svg); + background-position: 50% 50%; + background-size: 16px 16px; + background-repeat: no-repeat; + width: 27px; + height: 1.45rem; + top: -1px; + display: inline-block; + vertical-align: middle; + position: relative; + color: #5e5e5e; + background-color: #FFF7DD; + margin-left: -.2rem; + cursor: pointer; + border-radius: 0 2px 2px 0; + margin-bottom: 1px; +} +.copy-to-clipboard:hover { + background-color: #E8E2CD; +} +pre .copy-to-clipboard { + position: absolute; + right: 4px; + top: 4px; + background-color: #949bab; + color: #ccc; + border-radius: 2px; +} +pre .copy-to-clipboard:hover { + background-color: #656c72; + color: #fff; +} +.parent-element { + -webkit-transform-style: preserve-3d; + -moz-transform-style: preserve-3d; + transform-style: preserve-3d; +} + +#sidebar ul.topics > li > a .read-icon { + margin-top: 9px; +} + +#sidebar ul { + list-style: none; + padding: 0; + margin: 0; +} + +#sidebar #shortcuts li { + padding: 2px 0; + list-style: none; +} + +#sidebar ul li .read-icon { + display: none; + float: right; + font-size: 13px; + min-width: 16px; + margin: 4px 0 0 0; + text-align: right; +} +#sidebar ul li.visited > a .read-icon { + color: #00bdf3; + display: inline; +} + +#sidebar #shortcuts h3 { + font-family: "Novacento Sans Wide", "Helvetica", "Tahoma", "Geneva", "Arial", sans-serif; + color: white ; + margin-top:1rem; + padding-left: 1rem; +} +#homelinks { + background-color: #9c6fb6; + color: #fff; + padding: 7px 0; + border-bottom: 4px solid #9c6fb6; +} +#searchResults { + text-align: left; +} + +option { + color: initial; +} diff --git a/docs/21.4.2/fonts/Inconsolata.eot b/docs/21.4.2/fonts/Inconsolata.eot new file mode 100644 index 000000000..0a705d653 Binary files /dev/null and b/docs/21.4.2/fonts/Inconsolata.eot differ diff --git a/docs/21.4.2/fonts/Inconsolata.svg b/docs/21.4.2/fonts/Inconsolata.svg new file mode 100644 index 000000000..b7f97c875 --- /dev/null +++ b/docs/21.4.2/fonts/Inconsolata.svg @@ -0,0 +1,359 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/21.4.2/fonts/Inconsolata.ttf b/docs/21.4.2/fonts/Inconsolata.ttf new file mode 100644 index 000000000..4b8a36d24 Binary files /dev/null and b/docs/21.4.2/fonts/Inconsolata.ttf differ diff --git a/docs/21.4.2/fonts/Inconsolata.woff b/docs/21.4.2/fonts/Inconsolata.woff new file mode 100644 index 000000000..6f39625e5 Binary files /dev/null and b/docs/21.4.2/fonts/Inconsolata.woff differ diff --git a/docs/21.4.2/fonts/Novecentosanswide-Normal-webfont.eot b/docs/21.4.2/fonts/Novecentosanswide-Normal-webfont.eot new file mode 100644 index 000000000..9984682fc Binary files /dev/null and b/docs/21.4.2/fonts/Novecentosanswide-Normal-webfont.eot differ diff --git a/docs/21.4.2/fonts/Novecentosanswide-Normal-webfont.svg b/docs/21.4.2/fonts/Novecentosanswide-Normal-webfont.svg new file mode 100644 index 000000000..c412ea8c1 --- /dev/null +++ b/docs/21.4.2/fonts/Novecentosanswide-Normal-webfont.svg @@ -0,0 +1,1019 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/21.4.2/fonts/Novecentosanswide-Normal-webfont.ttf b/docs/21.4.2/fonts/Novecentosanswide-Normal-webfont.ttf new file mode 100644 index 000000000..8cfb62dd5 Binary files /dev/null and b/docs/21.4.2/fonts/Novecentosanswide-Normal-webfont.ttf differ diff --git a/docs/21.4.2/fonts/Novecentosanswide-Normal-webfont.woff b/docs/21.4.2/fonts/Novecentosanswide-Normal-webfont.woff new file mode 100644 index 000000000..d5c429079 Binary files /dev/null and b/docs/21.4.2/fonts/Novecentosanswide-Normal-webfont.woff differ diff --git a/docs/21.4.2/fonts/Novecentosanswide-Normal-webfont.woff2 b/docs/21.4.2/fonts/Novecentosanswide-Normal-webfont.woff2 new file mode 100644 index 000000000..eefb4a318 Binary files /dev/null and b/docs/21.4.2/fonts/Novecentosanswide-Normal-webfont.woff2 differ diff --git a/docs/21.4.2/fonts/Novecentosanswide-UltraLight-webfont.eot b/docs/21.4.2/fonts/Novecentosanswide-UltraLight-webfont.eot new file mode 100644 index 000000000..2a26561f9 Binary files /dev/null and b/docs/21.4.2/fonts/Novecentosanswide-UltraLight-webfont.eot differ diff --git a/docs/21.4.2/fonts/Novecentosanswide-UltraLight-webfont.svg b/docs/21.4.2/fonts/Novecentosanswide-UltraLight-webfont.svg new file mode 100644 index 000000000..e642ab076 --- /dev/null +++ b/docs/21.4.2/fonts/Novecentosanswide-UltraLight-webfont.svg @@ -0,0 +1,918 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/21.4.2/fonts/Novecentosanswide-UltraLight-webfont.ttf b/docs/21.4.2/fonts/Novecentosanswide-UltraLight-webfont.ttf new file mode 100644 index 000000000..9ce9c7f99 Binary files /dev/null and b/docs/21.4.2/fonts/Novecentosanswide-UltraLight-webfont.ttf differ diff --git a/docs/21.4.2/fonts/Novecentosanswide-UltraLight-webfont.woff b/docs/21.4.2/fonts/Novecentosanswide-UltraLight-webfont.woff new file mode 100644 index 000000000..381650c98 Binary files /dev/null and b/docs/21.4.2/fonts/Novecentosanswide-UltraLight-webfont.woff differ diff --git a/docs/21.4.2/fonts/Novecentosanswide-UltraLight-webfont.woff2 b/docs/21.4.2/fonts/Novecentosanswide-UltraLight-webfont.woff2 new file mode 100644 index 000000000..7e659549b Binary files /dev/null and b/docs/21.4.2/fonts/Novecentosanswide-UltraLight-webfont.woff2 differ diff --git a/docs/21.4.2/fonts/Work_Sans_200.eot b/docs/21.4.2/fonts/Work_Sans_200.eot new file mode 100644 index 000000000..4052e4f94 Binary files /dev/null and b/docs/21.4.2/fonts/Work_Sans_200.eot differ diff --git a/docs/21.4.2/fonts/Work_Sans_200.svg b/docs/21.4.2/fonts/Work_Sans_200.svg new file mode 100644 index 000000000..58ab4ba22 --- /dev/null +++ b/docs/21.4.2/fonts/Work_Sans_200.svg @@ -0,0 +1,332 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/21.4.2/fonts/Work_Sans_200.ttf b/docs/21.4.2/fonts/Work_Sans_200.ttf new file mode 100644 index 000000000..68019e1cc Binary files /dev/null and b/docs/21.4.2/fonts/Work_Sans_200.ttf differ diff --git a/docs/21.4.2/fonts/Work_Sans_200.woff b/docs/21.4.2/fonts/Work_Sans_200.woff new file mode 100644 index 000000000..a1bd9e469 Binary files /dev/null and b/docs/21.4.2/fonts/Work_Sans_200.woff differ diff --git a/docs/21.4.2/fonts/Work_Sans_200.woff2 b/docs/21.4.2/fonts/Work_Sans_200.woff2 new file mode 100644 index 000000000..20c68a75c Binary files /dev/null and b/docs/21.4.2/fonts/Work_Sans_200.woff2 differ diff --git a/docs/21.4.2/fonts/Work_Sans_300.eot b/docs/21.4.2/fonts/Work_Sans_300.eot new file mode 100644 index 000000000..ace799382 Binary files /dev/null and b/docs/21.4.2/fonts/Work_Sans_300.eot differ diff --git a/docs/21.4.2/fonts/Work_Sans_300.svg b/docs/21.4.2/fonts/Work_Sans_300.svg new file mode 100644 index 000000000..f29d0c8a1 --- /dev/null +++ b/docs/21.4.2/fonts/Work_Sans_300.svg @@ -0,0 +1,331 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/21.4.2/fonts/Work_Sans_300.ttf b/docs/21.4.2/fonts/Work_Sans_300.ttf new file mode 100644 index 000000000..35387c235 Binary files /dev/null and b/docs/21.4.2/fonts/Work_Sans_300.ttf differ diff --git a/docs/21.4.2/fonts/Work_Sans_300.woff b/docs/21.4.2/fonts/Work_Sans_300.woff new file mode 100644 index 000000000..8d789eae9 Binary files /dev/null and b/docs/21.4.2/fonts/Work_Sans_300.woff differ diff --git a/docs/21.4.2/fonts/Work_Sans_300.woff2 b/docs/21.4.2/fonts/Work_Sans_300.woff2 new file mode 100644 index 000000000..f6e216d64 Binary files /dev/null and b/docs/21.4.2/fonts/Work_Sans_300.woff2 differ diff --git a/docs/21.4.2/fonts/Work_Sans_500.eot b/docs/21.4.2/fonts/Work_Sans_500.eot new file mode 100644 index 000000000..9df692942 Binary files /dev/null and b/docs/21.4.2/fonts/Work_Sans_500.eot differ diff --git a/docs/21.4.2/fonts/Work_Sans_500.svg b/docs/21.4.2/fonts/Work_Sans_500.svg new file mode 100644 index 000000000..4b030b790 --- /dev/null +++ b/docs/21.4.2/fonts/Work_Sans_500.svg @@ -0,0 +1,333 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/21.4.2/fonts/Work_Sans_500.ttf b/docs/21.4.2/fonts/Work_Sans_500.ttf new file mode 100644 index 000000000..5b8cc5342 Binary files /dev/null and b/docs/21.4.2/fonts/Work_Sans_500.ttf differ diff --git a/docs/21.4.2/fonts/Work_Sans_500.woff b/docs/21.4.2/fonts/Work_Sans_500.woff new file mode 100644 index 000000000..df058514f Binary files /dev/null and b/docs/21.4.2/fonts/Work_Sans_500.woff differ diff --git a/docs/21.4.2/fonts/Work_Sans_500.woff2 b/docs/21.4.2/fonts/Work_Sans_500.woff2 new file mode 100644 index 000000000..b06c54df0 Binary files /dev/null and b/docs/21.4.2/fonts/Work_Sans_500.woff2 differ diff --git a/docs/21.4.2/images/.gitkeep b/docs/21.4.2/images/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/docs/21.4.2/images/clippy.svg b/docs/21.4.2/images/clippy.svg new file mode 100644 index 000000000..1c8abc2fd --- /dev/null +++ b/docs/21.4.2/images/clippy.svg @@ -0,0 +1 @@ + diff --git a/docs/21.4.2/images/favicon.png b/docs/21.4.2/images/favicon.png new file mode 100644 index 000000000..df06e35d6 Binary files /dev/null and b/docs/21.4.2/images/favicon.png differ diff --git a/docs/21.4.2/images/fmw_12c_12_2_1_4_0-logo.png b/docs/21.4.2/images/fmw_12c_12_2_1_4_0-logo.png new file mode 100644 index 000000000..6a2d34fff Binary files /dev/null and b/docs/21.4.2/images/fmw_12c_12_2_1_4_0-logo.png differ diff --git a/docs/21.4.2/images/gopher-404.jpg b/docs/21.4.2/images/gopher-404.jpg new file mode 100644 index 000000000..2a5054389 Binary files /dev/null and b/docs/21.4.2/images/gopher-404.jpg differ diff --git a/docs/21.4.2/images/logo.png b/docs/21.4.2/images/logo.png new file mode 100644 index 000000000..6bfe10627 Binary files /dev/null and b/docs/21.4.2/images/logo.png differ diff --git a/docs/21.4.2/images/soa-domains/CreateApplicationServerConnection.jpg b/docs/21.4.2/images/soa-domains/CreateApplicationServerConnection.jpg new file mode 100644 index 000000000..e69f13ac0 Binary files /dev/null and b/docs/21.4.2/images/soa-domains/CreateApplicationServerConnection.jpg differ diff --git a/docs/21.4.2/images/soa-domains/CreateApplicationServerConnectionTestConnection.jpg b/docs/21.4.2/images/soa-domains/CreateApplicationServerConnectionTestConnection.jpg new file mode 100644 index 000000000..84796fec9 Binary files /dev/null and b/docs/21.4.2/images/soa-domains/CreateApplicationServerConnectionTestConnection.jpg differ diff --git a/docs/21.4.2/images/soa-domains/ExposeSOAMST3.png b/docs/21.4.2/images/soa-domains/ExposeSOAMST3.png new file mode 100644 index 000000000..119d72c67 Binary files /dev/null and b/docs/21.4.2/images/soa-domains/ExposeSOAMST3.png differ diff --git a/docs/21.4.2/images/soa-domains/JDEV_OSB_Deploy_Success_Status.png b/docs/21.4.2/images/soa-domains/JDEV_OSB_Deploy_Success_Status.png new file mode 100644 index 000000000..26adedcf5 Binary files /dev/null and b/docs/21.4.2/images/soa-domains/JDEV_OSB_Deploy_Success_Status.png differ diff --git a/docs/21.4.2/images/soa-domains/JDEV_OSB_Deploying_Progress.png b/docs/21.4.2/images/soa-domains/JDEV_OSB_Deploying_Progress.png new file mode 100644 index 000000000..c834a7852 Binary files /dev/null and b/docs/21.4.2/images/soa-domains/JDEV_OSB_Deploying_Progress.png differ diff --git a/docs/21.4.2/images/soa-domains/JDEV_OSB_Deployment_Start.png b/docs/21.4.2/images/soa-domains/JDEV_OSB_Deployment_Start.png new file mode 100644 index 000000000..e15c9d7a4 Binary files /dev/null and b/docs/21.4.2/images/soa-domains/JDEV_OSB_Deployment_Start.png differ diff --git a/docs/21.4.2/images/soa-domains/JDEV_OSB_Deployment_Summary.jpg b/docs/21.4.2/images/soa-domains/JDEV_OSB_Deployment_Summary.jpg new file mode 100644 index 000000000..303ab5151 Binary files /dev/null and b/docs/21.4.2/images/soa-domains/JDEV_OSB_Deployment_Summary.jpg differ diff --git a/docs/21.4.2/images/soa-domains/JDEV_OSB_Deployment_Summary.png b/docs/21.4.2/images/soa-domains/JDEV_OSB_Deployment_Summary.png new file mode 100644 index 000000000..303ab5151 Binary files /dev/null and b/docs/21.4.2/images/soa-domains/JDEV_OSB_Deployment_Summary.png differ diff --git a/docs/21.4.2/images/soa-domains/JDEV_OSB_Select_Application_Server.png b/docs/21.4.2/images/soa-domains/JDEV_OSB_Select_Application_Server.png new file mode 100644 index 000000000..1ea311e9c Binary files /dev/null and b/docs/21.4.2/images/soa-domains/JDEV_OSB_Select_Application_Server.png differ diff --git a/docs/21.4.2/images/soa-domains/JDEV_OSB_Select_Deployment_Action.png b/docs/21.4.2/images/soa-domains/JDEV_OSB_Select_Deployment_Action.png new file mode 100644 index 000000000..ae3fecbf8 Binary files /dev/null and b/docs/21.4.2/images/soa-domains/JDEV_OSB_Select_Deployment_Action.png differ diff --git a/docs/21.4.2/images/soa-domains/JDEV_OSB_servicebus_launch_test_console.png b/docs/21.4.2/images/soa-domains/JDEV_OSB_servicebus_launch_test_console.png new file mode 100644 index 000000000..a01fe9f55 Binary files /dev/null and b/docs/21.4.2/images/soa-domains/JDEV_OSB_servicebus_launch_test_console.png differ diff --git a/docs/21.4.2/images/soa-domains/JDEV_Reference_Config_Settings.png b/docs/21.4.2/images/soa-domains/JDEV_Reference_Config_Settings.png new file mode 100644 index 000000000..549fa9390 Binary files /dev/null and b/docs/21.4.2/images/soa-domains/JDEV_Reference_Config_Settings.png differ diff --git a/docs/21.4.2/images/soa-domains/JDEV_SOA_Deploy_Configuration.png b/docs/21.4.2/images/soa-domains/JDEV_SOA_Deploy_Configuration.png new file mode 100644 index 000000000..8d4320237 Binary files /dev/null and b/docs/21.4.2/images/soa-domains/JDEV_SOA_Deploy_Configuration.png differ diff --git a/docs/21.4.2/images/soa-domains/JDEV_SOA_Deploy_Success_Status.png b/docs/21.4.2/images/soa-domains/JDEV_SOA_Deploy_Success_Status.png new file mode 100644 index 000000000..84a132f0b Binary files /dev/null and b/docs/21.4.2/images/soa-domains/JDEV_SOA_Deploy_Success_Status.png differ diff --git a/docs/21.4.2/images/soa-domains/JDEV_SOA_Deploying_Progress.png b/docs/21.4.2/images/soa-domains/JDEV_SOA_Deploying_Progress.png new file mode 100644 index 000000000..ef49c887c Binary files /dev/null and b/docs/21.4.2/images/soa-domains/JDEV_SOA_Deploying_Progress.png differ diff --git a/docs/21.4.2/images/soa-domains/JDEV_SOA_Deployment_Start.png b/docs/21.4.2/images/soa-domains/JDEV_SOA_Deployment_Start.png new file mode 100644 index 000000000..45cb532c7 Binary files /dev/null and b/docs/21.4.2/images/soa-domains/JDEV_SOA_Deployment_Start.png differ diff --git a/docs/21.4.2/images/soa-domains/JDEV_SOA_Deployment_Summary.png b/docs/21.4.2/images/soa-domains/JDEV_SOA_Deployment_Summary.png new file mode 100644 index 000000000..02ac26fdc Binary files /dev/null and b/docs/21.4.2/images/soa-domains/JDEV_SOA_Deployment_Summary.png differ diff --git a/docs/21.4.2/images/soa-domains/JDEV_SOA_Select_Application_Server.png b/docs/21.4.2/images/soa-domains/JDEV_SOA_Select_Application_Server.png new file mode 100644 index 000000000..f751e0c56 Binary files /dev/null and b/docs/21.4.2/images/soa-domains/JDEV_SOA_Select_Application_Server.png differ diff --git a/docs/21.4.2/images/soa-domains/JDEV_SOA_Select_Deployment_Action.png b/docs/21.4.2/images/soa-domains/JDEV_SOA_Select_Deployment_Action.png new file mode 100644 index 000000000..a4eaf58fb Binary files /dev/null and b/docs/21.4.2/images/soa-domains/JDEV_SOA_Select_Deployment_Action.png differ diff --git a/docs/21.4.2/images/soa-domains/JDEV_SOA_Server_Lookup.png b/docs/21.4.2/images/soa-domains/JDEV_SOA_Server_Lookup.png new file mode 100644 index 000000000..fae2f2378 Binary files /dev/null and b/docs/21.4.2/images/soa-domains/JDEV_SOA_Server_Lookup.png differ diff --git a/docs/21.4.2/images/soa-domains/JDEV_SOA_Target_soa_servers.png b/docs/21.4.2/images/soa-domains/JDEV_SOA_Target_soa_servers.png new file mode 100644 index 000000000..5b8cd53f6 Binary files /dev/null and b/docs/21.4.2/images/soa-domains/JDEV_SOA_Target_soa_servers.png differ diff --git a/docs/21.4.2/images/soa-domains/JDEV_SOA_soainfra_server1.png b/docs/21.4.2/images/soa-domains/JDEV_SOA_soainfra_server1.png new file mode 100644 index 000000000..9f225df7c Binary files /dev/null and b/docs/21.4.2/images/soa-domains/JDEV_SOA_soainfra_server1.png differ diff --git a/docs/21.4.2/images/soa-domains/JDEV_SOA_soainfra_server2.png b/docs/21.4.2/images/soa-domains/JDEV_SOA_soainfra_server2.png new file mode 100644 index 000000000..4e5e1f768 Binary files /dev/null and b/docs/21.4.2/images/soa-domains/JDEV_SOA_soainfra_server2.png differ diff --git a/docs/21.4.2/images/soa-domains/SOA_Composites_Deploy_using_Jdev.png b/docs/21.4.2/images/soa-domains/SOA_Composites_Deploy_using_Jdev.png new file mode 100644 index 000000000..baaf4f82b Binary files /dev/null and b/docs/21.4.2/images/soa-domains/SOA_Composites_Deploy_using_Jdev.png differ diff --git a/docs/21.4.2/images/soa-domains/custIdentity-custTrust-keystores.png b/docs/21.4.2/images/soa-domains/custIdentity-custTrust-keystores.png new file mode 100644 index 000000000..290492743 Binary files /dev/null and b/docs/21.4.2/images/soa-domains/custIdentity-custTrust-keystores.png differ diff --git a/docs/21.4.2/images/soasuite-logo.png b/docs/21.4.2/images/soasuite-logo.png new file mode 100644 index 000000000..347663f5b Binary files /dev/null and b/docs/21.4.2/images/soasuite-logo.png differ diff --git a/docs/21.4.2/index.html b/docs/21.4.2/index.html new file mode 100644 index 000000000..e393357bb --- /dev/null +++ b/docs/21.4.2/index.html @@ -0,0 +1,4829 @@ + + + + + + + + + + + + Oracle Fusion Middleware on Kubernetes :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+ +
+ + + + + + + navigation + + + +

Oracle Fusion Middleware on Kubernetes

+

Oracle supports the deployment of the following Oracle Fusion Middleware products on Kubernetes. Click on the appropriate document link below to get started on setting up the product.

+ + + + + + + +
    + + + + + + + + + + + + + + + + + + + +

    +Oracle Access Management +

    + + + + + +

    The WebLogic Kubernetes Operator supports deployment of Oracle Access Management (OAM). Follow the instructions in this guide to set up these Oracle Access Management domains on Kubernetes.

    + + + + + + + + + + + + +

    +Oracle Internet Directory +

    + + + + + +

    Oracle Internet Directory provides a comprehensive Directory Solution for robust Identity Management

    + + + + + + + + + + + + +

    +Oracle Identity Governance +

    + + + + + +

    The WebLogic Kubernetes Operator supports deployment of Oracle Identity Governance. Follow the instructions in this guide to set up Oracle Identity Governance domains on Kubernetes.

    + + + + + + + + + + + + +

    +Oracle SOA Suite +

    + + + + + +

    The Oracle WebLogic Kubernetes Operator (the “operator”) supports deployment of Oracle SOA Suite components such as Oracle Service-Oriented Architecture (SOA), Oracle Service Bus, and Oracle Enterprise Scheduler (ESS). Follow the instructions in this guide to set up these Oracle SOA Suite domains on Kubernetes.

    + + + + + + + + + + + + +

    +Oracle Unified Directory +

    + + + + + +

    Oracle Unified Directory provides a comprehensive Directory Solution for robust Identity Management

    + + + + + + + + + + + + +

    +Oracle Unified Directory Services Manager +

    + + + + + +

    Oracle Unified Directory Services Manager provides an interface for managing instances of Oracle Unified Directory

    + + + + + + + + +
+ + + + + + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/21.4.2/index.json b/docs/21.4.2/index.json new file mode 100644 index 000000000..26feca2ab --- /dev/null +++ b/docs/21.4.2/index.json @@ -0,0 +1,799 @@ +[ +{ + "uri": "/fmw-kubernetes/21.4.2/soa-domains/installguide/prerequisites/", + "title": "Requirements and limitations", + "tags": [], + "description": "Understand the system requirements and limitations for deploying and running Oracle SOA Suite domains with the WebLogic Kubernetes Operator, including the SOA cluster sizing recommendations.", + "content": "This section provides information about the system requirements and limitations for deploying and running Oracle SOA Suite domains with the WebLogic Kubernetes Operator.\nSystem requirements for Oracle SOA Suite domains For the current production release 21.4.2:\n Operating systems supported: Oracle Linux 7 (UL6+) Red Hat Enterprise Linux 7 (UL3+ only with standalone Kubernetes) Oracle Linux Cloud Native Environment (OLCNE) version 1.3. Kubernetes 1.16.15+, 1.17.13+, 1.18.10+, 1.19.7+, and 1.20.6+ (check with kubectl version). Docker 18.9.1 or 19.03.1+ (check with docker version) or CRI-O 1.20.2+ (check with crictl version | grep RuntimeVersion). Flannel networking v0.9.1-amd64 or later (check with docker images | grep flannel), Calico networking v3.16.1 or later. Helm 3.3.4+ (check with helm version --client --short). WebLogic Kubernetes Operator 3.3.0 (see the operator releases page). Oracle SOA Suite 12.2.1.4 Docker image downloaded from My Oracle Support (MOS patch 33467899). This image contains the latest bundle patch and one-off patches for Oracle SOA Suite. You must have the cluster-admin role to install the operator. The operator does not need the cluster-admin role at runtime. For more information, see the role-based access control (RBAC) documentation. We do not currently support running SOA in non-Linux containers. Additionally, see the Oracle SOA Suite documentation for other requirements such as database version. See here for resource sizing information for Oracle SOA Suite domains set up on a Kubernetes cluster.\nLimitations Compared to running a WebLogic Server domain in Kubernetes using the operator, the following limitations currently exist for Oracle SOA Suite domains:\n In this release, Oracle SOA Suite domains are supported using the domain on a persistent volume model only, where the domain home is located in a persistent volume (PV). The \u0026ldquo;domain in image\u0026rdquo; and \u0026ldquo;model in image\u0026rdquo; models are not supported. Also, \u0026ldquo;WebLogic Deploy Tooling (WDT)\u0026rdquo; based deployments are currently not supported. Only configured clusters are supported. Dynamic clusters are not supported for Oracle SOA Suite domains. Note that you can still use all of the scaling features, but you need to define the maximum size of your cluster at domain creation time. Mixed clusters (configured servers targeted to a dynamic cluster) are not supported. The WebLogic Logging Exporter currently supports WebLogic Server logs only. Other logs will not be sent to Elasticsearch. Note, however, that you can use a sidecar with a log handling tool like Logstash or Fluentd to get logs. The WebLogic Monitoring Exporter currently supports WebLogic MBean trees only. Support for JRF and Oracle SOA Suite MBeans is not available. Also, a metrics dashboard specific to Oracle SOA Suite is not available. Instead, use the WebLogic Server dashboard to monitor the Oracle SOA Suite server metrics in Grafana. Some features such as multicast, multitenancy, production redeployment, and Node Manager (although it is used internally for the liveness probe and to start WebLogic Server instances) are not supported in this release. Features such as Java Messaging Service whole server migration, consensus leasing, and maximum availability architecture (Oracle SOA Suite EDG setup) are not supported in this release. Enabling or disabling the memory resiliency for Oracle Service Bus using the Enterprise Manager Console is not supported in this release. Zero downtime upgrade (ZDT) of the domain is not supported. For up-to-date information about the features of WebLogic Server that are supported in Kubernetes environments, see My Oracle Support Doc ID 2349228.1.\n" +}, +{ + "uri": "/fmw-kubernetes/21.4.2/", + "title": "Oracle Fusion Middleware on Kubernetes", + "tags": [], + "description": "This document lists all the Oracle Fusion Middleware products deployment supported on Kubernetes.", + "content": "Oracle Fusion Middleware on Kubernetes Oracle supports the deployment of the following Oracle Fusion Middleware products on Kubernetes. Click on the appropriate document link below to get started on setting up the product.\n Oracle Access Management The WebLogic Kubernetes Operator supports deployment of Oracle Access Management (OAM). Follow the instructions in this guide to set up these Oracle Access Management domains on Kubernetes.\n Oracle Internet Directory Oracle Internet Directory provides a comprehensive Directory Solution for robust Identity Management\n Oracle Identity Governance The WebLogic Kubernetes Operator supports deployment of Oracle Identity Governance. Follow the instructions in this guide to set up Oracle Identity Governance domains on Kubernetes.\n Oracle SOA Suite The Oracle WebLogic Kubernetes Operator (the “operator”) supports deployment of Oracle SOA Suite components such as Oracle Service-Oriented Architecture (SOA), Oracle Service Bus, and Oracle Enterprise Scheduler (ESS). Follow the instructions in this guide to set up these Oracle SOA Suite domains on Kubernetes.\n Oracle Unified Directory Oracle Unified Directory provides a comprehensive Directory Solution for robust Identity Management\n Oracle Unified Directory Services Manager Oracle Unified Directory Services Manager provides an interface for managing instances of Oracle Unified Directory\n " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oam/", + "title": "Oracle Access Management", + "tags": [], + "description": "The WebLogic Kubernetes Operator supports deployment of Oracle Access Management (OAM). Follow the instructions in this guide to set up these Oracle Access Management domains on Kubernetes.", + "content": "The WebLogic Kubernetes Operator supports deployment of Oracle Access Management (OAM).\nIn this release, OAM domains are supported using the “domain on a persistent volume” model only, where the domain home is located in a persistent volume (PV).\nThe WebLogic Kubernetes Operator has several key features to assist you with deploying and managing Oracle Access Management domains in a Kubernetes environment. You can:\n Create OAM instances in a Kubernetes persistent volume. This persistent volume can reside in an NFS file system or other Kubernetes volume types. Start servers based on declarative startup parameters and desired states. Expose the OAM Services through external access. Scale OAM domains by starting and stopping Managed Servers on demand. Publish operator and WebLogic Server logs into Elasticsearch and interact with them in Kibana. Monitor the OAM instance using Prometheus and Grafana. Current production release The current production release for the Oracle Access Management domain deployment on Kubernetes is 21.4.2. This release uses the WebLogic Kubernetes Operator version 3.3.0.\nThis release of the documentation can also be used for 3.1.X and 3.2.0 WebLogic Kubernetes Operator. For 3.0.X WebLogic Kubernetes Operator refer to Version 21.4.1\nRecent changes and known issues See the Release Notes for recent changes and known issues for Oracle Access Management domain deployment on Kubernetes.\nLimitations See here for limitations in this release.\nGetting started For detailed information about deploying Oracle Access Management domains, start at Prerequisites and follow this documentation sequentially.\nIf performing an Enterprise Deployment, refer to the Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster instead.\nDocumentation for earlier releases To view documentation for an earlier release, see:\n Version 21.4.1 " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oam/release-notes/", + "title": "Release Notes", + "tags": [], + "description": "", + "content": "Review the latest changes and known issues for Oracle Access Management on Kubernetes.\nRecent changes Date Version Change November, 2021 21.4.2 Supports Oracle Access Management domain deployment using WebLogic Kubernetes Operator 3.3.0. Voyager ingress removed as no longer supported. October 2021 21.4.1 A) References to supported Kubernetes, Helm and Docker versions removed and replaced with Support note reference. B) Namespace and domain names changed to be consistent with Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster. C) Addtional post configuration tasks added. D) Upgrading a Kubernetes Cluster and Security Hardening removed as vendor specific. November 2020 20.4.1 Initial release of Oracle Access Management on Kubernetes. " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oid/release-notes/", + "title": "Release Notes", + "tags": [], + "description": "", + "content": "Review the latest changes and known issues for Oracle Internet Directory on Kubernetes.\nRecent changes Date Version Change October, 2021 21.4.1 Initial release of Oracle Identity Directory on Kubernetes. " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oig/release-notes/", + "title": "Release Notes", + "tags": [], + "description": "", + "content": "Review the latest changes and known issues for Oracle Identity Governance on Kubernetes.\nRecent changes Date Version Change November, 2021 21.4.2 Supports Oracle Identity Governance domain deployment using WebLogic Kubernetes Operator 3.3.0. Voyager ingress removed as no longer supported. October 2021 21.4.1 A) References to supported Kubernetes, Helm and Docker versions removed and replaced with Support note reference. B) Namespace and domain names changed to be consistent with Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster. C) Addtional post configuration tasks added. D) New section on how to start Design Console in a container. E) Upgrading a Kubernetes Cluster and Security Hardening removed as vendor specific. November 2020 20.4.1 Initial release of Identity Governance on Kubernetes. " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oud/release-notes/", + "title": "Release Notes", + "tags": [], + "description": "", + "content": "Review the latest changes and known issues for Oracle Unified Directory on Kubernetes.\nRecent changes Date Version Change November 2021 21.4.2 Voyager ingress removed as no longer supported. October 2021 21.4.1 A) References to supported Kubernetes, Helm and Docker versions removed and replaced with Support note reference. B) Namespace and domain names changed to be consistent with Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster. C) Upgrading a Kubernetes Cluster and Security Hardening removed as vendor specific. November 2020 20.4.1 Initial release of Oracle Unified Directory on Kubernetes. " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oudsm/release-notes/", + "title": "Release Notes", + "tags": [], + "description": "", + "content": "Review the latest changes and known issues for Oracle Unified Directory Services Manager on Kubernetes.\nRecent changes Date Version Change November 2021 21.4.2 Voyager ingress removed as no longer supported. October 2021 21.4.1 A) References to supported Kubernetes, Helm and Docker versions removed and replaced with Support note reference. B) Namespace and domain names changed to be consistent with Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster. C) Upgrading a Kubernetes Cluster and Security Hardening removed as vendor specific. November 2020 20.4.1 Initial release of Oracle Unified Directory Services Manager on Kubernetes. " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/soa-domains/release-notes/", + "title": "Release Notes", + "tags": [], + "description": "", + "content": "Review the latest changes and known issues for Oracle SOA Suite on Kubernetes.\nRecent changes Date Version Change November 30, 2021 21.4.2 Supports Oracle SOA Suite 12.2.1.4 domains deployment using October 2021 PSU and known bug fixes. Oracle SOA Suite 12.2.1.4 Docker image for this release can be downloaded from My Oracle Support (MOS patch 33467899). August 6, 2021 21.3.2 Supports Oracle SOA Suite 12.2.1.4 domains deployment using July 2021 PSU and known bug fixes. Oracle SOA Suite 12.2.1.4 Docker image for this release can be downloaded from My Oracle Support (MOS patch 33125465). May 31, 2021 21.2.2 Supports Oracle SOA Suite 12.2.1.4 domains deployment using April 2021 PSU and known bug fixes. Oracle SOA Suite 12.2.1.4 Docker image for this release can be downloaded from My Oracle Support (MOS patch 32794257). February 28, 2021 21.1.2 Supports Oracle SOA Suite 12.2.1.4 domains deployment using January 2021 PSU and known bug fixes. Oracle SOA Suite 12.2.1.4 Docker image for this release can be downloaded from My Oracle Support (MOS patch 32398542). November 30, 2020 20.4.2 Supports Oracle SOA Suite 12.2.1.4 domains deployment using October 2020 PSU and known bug fixes. Added HEALTHCHECK support for Oracle SOA Suite docker image. Oracle SOA Suite 12.2.1.4 Docker image for this release can be downloaded from My Oracle Support (MOS patch 32215749). October 3, 2020 20.3.3 Certified Oracle WebLogic Kubernetes Operator version 3.0.1. Kubernetes 1.14.8+, 1.15.7+, 1.16.0+, 1.17.0+, and 1.18.0+ support. Flannel is the only supported CNI in this release. SSL enabling for the Administration Server and Managed Servers is supported. Only Oracle SOA Suite 12.2.1.4 is supported. Known issues Overriding tuning parameters is not supported using configuration overrides Deployments in WebLogic administration console display unexpected error Enterprise Manager console may display ADF_FACES-30200 error Configure the external URL access for Oracle SOA Suite composite applications Configure the external access for the Oracle Enterprise Scheduler WebServices WSDL URLs Missing gif images in Oracle Service Bus console pipeline configuration page " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/soa-domains/adminguide/deploying-composites/supportjdev/", + "title": "Deploy using JDeveloper", + "tags": [], + "description": "Deploy Oracle SOA Suite and Oracle Service Bus composite applications from Oracle JDeveloper to Oracle SOA Suite in the WebLogic Kubernetes Operator environment.", + "content": "Learn how to deploy Oracle SOA Suite and Oracle Service Bus composite applications from Oracle JDeveloper (running outside the Kubernetes network) to an Oracle SOA Suite instance in the WebLogic Kubernetes Operator environment.\nUse JDeveloper for development and test environments only. For a production environment, you should deploy using Application Control and WLST methods.\n Deploy Oracle SOA Suite and Oracle Service Bus composite applications to Oracle SOA Suite from JDeveloper To deploy Oracle SOA Suite and Oracle Service Bus composite applications from Oracle JDeveloper, the Administration Server must be configured to expose a T3 channel. The WebLogic Kubernetes Operator provides an option to expose a T3 channel for the Administration Server using the exposeAdminT3Channel setting during domain creation, then the matching T3 service can be used to connect. By default, when exposeAdminT3Channel is set, the WebLogic Kubernetes Operator environment exposes the NodePort for the T3 channel of the NetworkAccessPoint at 30012 (use t3ChannelPort to configure the port to a different value).\nIf you miss enabling exposeAdminT3Channel during domain creation, follow Expose a T3/T3S Channel for the Administration Server to expose a T3 channel manually.\nPrerequisites Get the Kubernetes cluster master address and verify the T3 port that will be used for creating application server connections. Use the following command to get the T3 port:\n$ kubectl get service \u0026lt;domainUID\u0026gt;-\u0026lt;AdministrationServerName\u0026gt;-external -n \u0026lt;namespace\u0026gt;-o jsonpath='{.spec.ports[0].nodePort}' For example:\n$ kubectl get service soainfra-adminserver-external -n soans -o jsonpath='{.spec.ports[0].nodePort}' Oracle SOA Suite in the WebLogic Kubernetes Operator environment is deployed in a Reference Configuration domain. If a SOA project is developed in Classic mode JDeveloper displays a Mismatch notification in the Deploy Composite Wizard. By default, JDeveloper is in Classic mode. To develop SOA projects in Reference Configuration mode, you must manually enable this feature in JDeveloper: a. From the File menu, select Tools, then Preferences. b. Select Reference Configuration Settings. c. Select Enable Reference Configuration settings in adapters.\n JDeveloper needs to access the Servers during deployment. In the WebLogic Kubernetes Operator environment, Administration and Managed Servers are pods and cannot be accessed directly by JDeveloper. As a workaround, you must configure the reachability of the Managed Servers:\nThe Managed Server T3 port is not exposed by default and opening this will have a security risk as the authentication method here is based on a userid/password. It is not recommended to do this on production instances.\n Decide on an external IP address to be used to configure access to the Managed Servers. Master or worker node IP address can be used to configure Managed Server reachability. In these steps, the Kubernetes cluster master IP is used for demonstration.\n Get the pod names of the Administration Server and Managed Servers (that is, \u0026lt;domainUID\u0026gt;-\u0026lt;server name\u0026gt;), which will be used to map in /etc/hosts.\n Update /etc/hosts (or in Windows, C:\\Windows\\System32\\Drivers\\etc\\hosts) on the host where JDeveloper is running with the entries below, where\n\u0026lt;Master IP\u0026gt; \u0026lt;Administration Server pod name\u0026gt; \u0026lt;Master IP\u0026gt; \u0026lt;Managed Server1 pod name\u0026gt; \u0026lt;Master IP\u0026gt; \u0026lt;Managed Server2 pod name\u0026gt; Sample /etc/hosts entries looks as follows, where X.X.X.X is the master node IP address:\nX.X.X.X soainfra-adminserver X.X.X.X soainfra-soa-server1 X.X.X.X soainfra-soa-server2 Get the Kubernetes service name of the Oracle SOA Suite cluster to access externally with the master IP (or external IP):\n$ kubectl get service \u0026lt;domainUID\u0026gt;-cluster-\u0026lt;soa-cluster\u0026gt; -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl get service soainfra-cluster-soa-cluster -n soans Create a Kubernetes service to expose the Oracle SOA Suite cluster service (\u0026lt;domainUID\u0026gt;-cluster-\u0026lt;soa-cluster\u0026gt;) externally with same port as the Managed Server:\n$ kubectl expose service \u0026lt;domainUID\u0026gt;-cluster-\u0026lt;soa-cluster\u0026gt; --name \u0026lt;domainUID\u0026gt;-\u0026lt;soa-cluster\u0026gt;-ext --external-ip=\u0026lt;Master IP\u0026gt; -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl expose service soainfra-cluster-soa-cluster --name soainfra-cluster-soa-cluster-ext --external-ip=X.X.X.X -n soans In a production environment, exposing the SOA cluster service with an external IP address is not recommended, as it can cause message drops on the SOA Managed Servers.\n Create an application server connection in JDeveloper Create a new application server connection (for example wls-k8s-op-connection) in JDeveloper: In the configuration page, provide the WebLogic Hostname as the Kubernetes Master Address.\n Update the Port as the T3 port (default is 30012) obtained in Prerequisites.\n Enter the WebLogic Domain value (domainUID).\n Test the connection to verify it is successful. Deploy SOA composite applications using JDeveloper In JDeveloper, right-click the SOA project you want to deploy and select Deploy to display the deployment wizard. In the Deployment Action page, select Deploy to Application Server and click Next. In the Deployment Configuration page, select the appropriate options and click Next. In the Select server page, select the application server connection (wls-k8s-op-connection) that was created earlier and click Next. If the Prerequisites were configured correctly, the lookup discovers the Managed Servers for deploying the composite. Using the application server connection, the Managed Servers (Oracle SOA Suite cluster) are listed on the SOA Servers page. Select the Oracle SOA Suite cluster and click Next. On the Summary page, click Finish to start deploying the composites to the Oracle SOA Suite cluster. Verify logs on JDeveloper to confirm successful deployment. Enter the soa-infra URLs in a browser to confirm the composites are deployed on both servers of the Oracle SOA Suite cluster. Deploy Oracle Service Bus composite applications using JDeveloper In JDeveloper, right-click the Oracle Service Bus project you want to deploy and select Deploy to display the deployment wizard. In the Deployment Action page, select Deploy to Application Server and click Next. In the Select Server page, select the application server connection (wls-k8s-op-connection) that was created earlier and click Next. On the Summary page, click Finish to start deploying the composites to the Oracle Service Bus cluster. In JDeveloper, verify logs to confirm successful deployment. In the Oracle Service Bus Console, click Launch Test Console to verify that the Oracle Service Bus composite application is deployed successfully. " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/soa-domains/appendix/soa-cluster-sizing-info/", + "title": "Domain resource sizing", + "tags": [], + "description": "Describes the resourse sizing information for Oracle SOA Suite domains setup on Kubernetes cluster.", + "content": "Oracle SOA cluster sizing recommendations Oracle SOA Normal Usage Moderate Usage High Usage Administration Server No of CPU core(s) : 1, Memory : 4GB No of CPU core(s) : 1, Memory : 4GB No of CPU core(s) : 1, Memory : 4GB Number of Managed Servers 2 2 4 Configurations per Managed Server No of CPU core(s) : 2, Memory : 16GB No of CPU core(s) : 4, Memory : 16GB No of CPU core(s) : 6, Memory : 16-32GB PV Storage Minimum 250GB Minimum 250GB Minimum 500GB " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/soa-domains/patch_and_upgrade/patch-an-image/", + "title": "Patch an image", + "tags": [], + "description": "Create a patched Oracle SOA Suite image using the WebLogic Image Tool.", + "content": "Oracle releases Oracle SOA Suite images regularly with the latest bundle and recommended interim patches in My Oracle Support (MOS). However, if you need to create images with new bundle and interim patches, you can build these images using the WebLogic Image Tool.\nIf you have access to the Oracle SOA Suite patches, you can patch an existing Oracle SOA Suite image with a bundle patch and interim patches. Oracle recommends that you use the WebLogic Image Tool to patch the Oracle SOA Suite image.\n Recommendations:\n Use the WebLogic Image Tool create feature for patching the Oracle SOA Suite Docker image with a bundle patch and multiple interim patches. This is the recommended approach because it optimizes the size of the image. Use the WebLogic Image Tool update feature for patching the Oracle SOA Suite Docker image with a single interim patch. Note that the patched image size may increase considerably due to additional image layers introduced by the patch application tool. Apply the patched Oracle SOA Suite image To update an Oracle SOA Suite domain with a patched image, first make sure the patched image is pulled or created and available on the nodes in your Kubernetes cluster. Once the patched image is available, you can follow these steps to update the Oracle SOA Suite domain with a patched image:\n Stop all servers Update user permissions of the domain PV storage Address post-installation requirements Apply the patched image Stop all servers Note: The following steps are applicable only for non-Zero Downtime Patching. For Zero Downtime Patching, go to Address post-installation requirements.\n Before applying the patch, stop all servers in the domain:\n In the domain.yaml configuration file, update the spec.serverStartPolicy field value to NEVER.\n Shut down the domain (stop all servers) by applying the updated domain.yaml file:\n$ kubectl apply -f domain.yaml Update user permissions of the domain PV storage The Oracle SOA Suite image for release 21.4.2 has an oracle user with UID 1000, with the default group set to root. Before applying the patched image, update the user permissions of the domain persistent volume (PV) to set the group to root:\n$ sudo chown -R 1000:0 /scratch/k8s_dir/SOA Address post-installation requirements If the patches in the patched Oracle SOA Suite image have any post-installation steps, follow these steps:\n Create a Kubernetes pod with domain home access Perform post-installation steps Create a Kubernetes pod with domain home access Get domain home persistence volume claim details for the Oracle SOA Suite domain.\nFor example, to list the persistent volume claim details in the namespace soans:\n$ kubectl get pvc -n soans Sample output showing the persistent volume claim is soainfra-domain-pvc:\nNAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE soainfra-domain-pvc Bound soainfra-domain-pv 10Gi RWX soainfra-domain-storage-class xxd Create a YAML soapostinstall.yaml using the domain home persistence volume claim.\nFor example, using soainfra-domain-pvc per the sample output:\n Note: Replace soasuite:12.2.1.4-30761841 with the patched image in the following sample YAML:\n apiVersion: v1 kind: Pod metadata: labels: run: soapostinstall name: soapostinstall namespace: soans spec: containers: - image: soasuite:12.2.1.4-30761841 name: soapostinstall command: [\u0026quot;/bin/bash\u0026quot;, \u0026quot;-c\u0026quot;, \u0026quot;sleep infinity\u0026quot;] imagePullPolicy: IfNotPresent volumeMounts: - name: soainfra-domain-storage-volume mountPath: /u01/oracle/user_projects volumes: - name: soainfra-domain-storage-volume persistentVolumeClaim: claimName: soainfra-domain-pvc Apply the YAML to create the Kubernetes pod:\n$ kubectl apply -f soapostinstall.yaml Perform post-installation steps If you need to perform any post-installation steps on the domain home:\n Start a bash shell in the soapostinstall pod:\n$ kubectl exec -it -n soans soapostinstall -- bash This opens a bash shell in the running soapostinstall pod:\n[oracle@soapostinstall oracle]$ Use the bash shell of the soapostinstall pod and perform the required steps on the domain home.\n After successful completion of the post-installation steps, you can delete the soapostinstall pod:\n$ kubectl delete -f soapostinstall.yaml Apply the patched image After completing the required SOA schema upgrade and post-installation steps, start up the domain:\n In the domain.yaml configuration file, update the image field value with the patched image:\nFor example:\n image: soasuite:12.2.1.4-30761841 In case of non-Zero Downtime Patching, update the spec.serverStartPolicy field value to IF_NEEDED in domain.yaml.\n Apply the updated domain.yaml configuration file to start up the domain.\n$ kubectl apply -f domain.yaml Note: In case of non-Zero Downtime Patching, the complete domain startup happens, as the servers in the domain were stopped earlier. For Zero Downtime Patching, the servers in the domain are rolling restarted.\n Verify the domain is updated with the patched image:\n$ kubectl describe domain \u0026lt;domainUID\u0026gt; -n \u0026lt;domain-namespace\u0026gt;|grep \u0026quot;Image:\u0026quot; Sample output:\n$ kubectl describe domain soainfra -n soans |grep \u0026quot;Image:\u0026quot; Image: soasuite:12.2.1.4-30761841 $ " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/soa-domains/adminguide/configure-load-balancer/", + "title": "Set up a load balancer", + "tags": [], + "description": "Configure different load balancers for Oracle SOA Suite domains.", + "content": "The WebLogic Kubernetes Operator supports ingress-based load balancers such as Traefik and NGINX (kubernetes/ingress-nginx). It also supports Apache web tier load balancer.\n Traefik Configure the ingress-based Traefik load balancer for Oracle SOA Suite domains.\n NGINX Configure the ingress-based NGINX load balancer for Oracle SOA Suite domains.\n Apache web tier Configure the Apache web tier load balancer for Oracle SOA Suite domains.\n " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/soa-domains/adminguide/configure-load-balancer/traefik/", + "title": "Traefik", + "tags": [], + "description": "Configure the ingress-based Traefik load balancer for Oracle SOA Suite domains.", + "content": "This section provides information about how to install and configure the ingress-based Traefik load balancer (version 2.2.1 or later for production deployments) to load balance Oracle SOA Suite domain clusters. You can configure Traefik for non-SSL, SSL termination, and end-to-end SSL access of the application URL.\nFollow these steps to set up Traefik as a load balancer for an Oracle SOA Suite domain in a Kubernetes cluster:\n Install the Traefik (ingress-based) load balancer Create an Ingress for the domain Verify domain application URL access Uninstall the Traefik ingress Uninstall Traefik Install the Traefik (ingress-based) load balancer Use Helm to install the Traefik (ingress-based) load balancer. Use the values.yaml file in the sample but set kubernetes.namespaces specifically.\n$ cd ${WORKDIR} $ kubectl create namespace traefik $ helm repo add traefik https://containous.github.io/traefik-helm-chart Sample output:\n\u0026#34;traefik\u0026#34; has been added to your repositories Install Traefik:\n$ helm install traefik traefik/traefik \\ --namespace traefik \\ --values charts/traefik/values.yaml \\ --set \u0026#34;kubernetes.namespaces={traefik}\u0026#34; \\ --set \u0026#34;service.type=NodePort\u0026#34; --wait Click here to see the sample output. LAST DEPLOYED: Sun Sep 13 21:32:00 2020 NAMESPACE: traefik STATUS: deployed REVISION: 1 TEST SUITE: None A sample values.yaml for deployment of Traefik 2.2.x:\nimage: name: traefik tag: 2.2.8 pullPolicy: IfNotPresent ingressRoute: dashboard: enabled: true # Additional ingressRoute annotations (e.g. for kubernetes.io/ingress.class) annotations: {} # Additional ingressRoute labels (e.g. for filtering IngressRoute by custom labels) labels: {} providers: kubernetesCRD: enabled: true kubernetesIngress: enabled: true # IP used for Kubernetes Ingress endpoints ports: traefik: port: 9000 expose: true # The exposed port for this service exposedPort: 9000 # The port protocol (TCP/UDP) protocol: TCP web: port: 8000 # hostPort: 8000 expose: true exposedPort: 30305 nodePort: 30305 # The port protocol (TCP/UDP) protocol: TCP # Use nodeport if set. This is useful if you have configured Traefik in a # LoadBalancer # nodePort: 32080 # Port Redirections # Added in 2.2, you can make permanent redirects via entrypoints. # https://docs.traefik.io/routing/entrypoints/#redirection # redirectTo: websecure websecure: port: 8443 # # hostPort: 8443 expose: true exposedPort: 30443 # The port protocol (TCP/UDP) protocol: TCP nodePort: 30443 Verify the Traefik status and find the port number of the SSL and non-SSL services:\n$ kubectl get all -n traefik Click here to see the sample output. NAME READY STATUS RESTARTS AGE pod/traefik-5fc4947cf9-fbl9r 1/1 Running 5 7d17h NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/traefik NodePort 10.100.195.70 \u0026lt;none\u0026gt; 9000:31288/TCP,30305:30305/TCP,30443:30443/TCP 7d17h NAME READY UP-TO-DATE AVAILABLE AGE deployment.apps/traefik 1/1 1 1 7d17h NAME DESIRED CURRENT READY AGE replicaset.apps/traefik-5fc4947cf9 1 1 1 7d17h Access the Traefik dashboard through the URL http://$(hostname -f):31288, with the HTTP host traefik.example.com:\n$ curl -H \u0026#34;host: $(hostname -f)\u0026#34; http://$(hostname -f):31288/dashboard/ Note: Make sure that you specify a fully qualified node name for $(hostname -f)\n Configure Traefik to manage ingresses created in this namespace, where traefik is the Traefik namespace and soans is the namespace of the domain:\n $ helm upgrade traefik traefik/traefik --namespace traefik --reuse-values \\ --set \u0026#34;kubernetes.namespaces={traefik,soans}\u0026#34; Click here to see the sample output. Release \u0026#34;traefik\u0026#34; has been upgraded. Happy Helming! NAME: traefik LAST DEPLOYED: Sun Sep 13 21:32:12 2020 NAMESPACE: traefik STATUS: deployed REVISION: 2 TEST SUITE: None Create an ingress for the domain Create an ingress for the domain in the domain namespace by using the sample Helm chart. Here path-based routing is used for ingress. Sample values for default configuration are shown in the file ${WORKDIR}/charts/ingress-per-domain/values.yaml. By default, type is TRAEFIK, sslType is NONSSL, and domainType is soa. These values can be overridden by passing values through the command line or can be edited in the sample file values.yaml based on the type of configuration (NONSSL, SSL, and E2ESSL).\nIf needed, you can update the ingress YAML file to define more path rules (in section spec.rules.host.http.paths) based on the domain application URLs that need to be accessed. The template YAML file for the Traefik (ingress-based) load balancer is located at ${WORKDIR}/charts/ingress-per-domain/templates/traefik-ingress.yaml.\n Note: See here for all the configuration parameters.\n Install ingress-per-domain using Helm for NONSSL configuration:\n$ cd ${WORKDIR} $ helm install soa-traefik-ingress \\ charts/ingress-per-domain \\ --namespace soans \\ --values charts/ingress-per-domain/values.yaml \\ --set \u0026#34;traefik.hostname=$(hostname -f)\u0026#34; Sample output:\nNAME: soa-traefik-ingress LAST DEPLOYED: Mon Jul 20 11:44:13 2020 NAMESPACE: soans STATUS: deployed REVISION: 1 TEST SUITE: None For secured access (SSL termination and E2ESSL) to the Oracle SOA Suite application, create a certificate, and generate a Kubernetes secret:\n$ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /tmp/tls1.key -out /tmp/tls1.crt -subj \u0026#34;/CN=*\u0026#34; $ kubectl -n soans create secret tls soainfra-tls-cert --key /tmp/tls1.key --cert /tmp/tls1.crt Create the Traefik TLSStore custom resource.\nIn case of SSL termination, Traefik should be configured to use the user-defined SSL certificate. If the user-defined SSL certificate is not configured, Traefik will create a default SSL certificate. To configure a user-defined SSL certificate for Traefik, use the TLSStore custom resource. The Kubernetes secret created with the SSL certificate should be referenced in the TLSStore object. Run the following command to create the TLSStore:\n$ cat \u0026lt;\u0026lt;EOF | kubectl apply -f - apiVersion: traefik.containo.us/v1alpha1 kind: TLSStore metadata: name: default namespace: soans spec: defaultCertificate: secretName: soainfra-tls-cert EOF Install ingress-per-domain using Helm for SSL configuration.\nThe Kubernetes secret name should be updated in the template file.\nThe template file also contains the following annotations:\ntraefik.ingress.kubernetes.io/router.entrypoints: websecure traefik.ingress.kubernetes.io/router.tls: \u0026#34;true\u0026#34; traefik.ingress.kubernetes.io/router.middlewares: soans-wls-proxy-ssl@kubernetescrd The entry point for SSL termination access and the Middleware name should be updated in the annotation. The Middleware name should be in the form \u0026lt;namespace\u0026gt;-\u0026lt;middleware name\u0026gt;@kubernetescrd.\n$ cd ${WORKDIR} $ helm install soa-traefik-ingress \\ charts/ingress-per-domain \\ --namespace soans \\ --values charts/ingress-per-domain/values.yaml \\ --set \u0026#34;traefik.hostname=$(hostname -f)\u0026#34; \\ --set sslType=SSL Sample output:\nNAME: soa-traefik-ingress LAST DEPLOYED: Mon Jul 20 11:44:13 2020 NAMESPACE: soans STATUS: deployed REVISION: 1 TEST SUITE: None Install ingress-per-domain using Helm for E2ESSL configuration.\n$ cd ${WORKDIR} $ helm install soa-traefik-ingress \\ charts/ingress-per-domain \\ --namespace soans \\ --values charts/ingress-per-domain/values.yaml \\ --set sslType=E2ESSL Sample output:\nNAME: soa-traefik-ingress LAST DEPLOYED: Fri Apr 9 09:47:27 2021 NAMESPACE: soans STATUS: deployed REVISION: 1 TEST SUITE: None For NONSSL access to the Oracle SOA Suite application, get the details of the services by the ingress:\n$ kubectl describe ingress soainfra-traefik -n soans Click here to see all services supported by the above deployed ingress. Name: soainfra-traefik Namespace: soans Address: Default backend: default-http-backend:80 (\u0026lt;error: endpoints \u0026#34;default-http-backend\u0026#34; not found\u0026gt;) Rules: Host Path Backends ---- ---- -------- www.example.com /console soainfra-adminserver:7001 (10.244.0.45:7001) /em soainfra-adminserver:7001 (10.244.0.45:7001) /weblogic/ready soainfra-adminserver:7001 (10.244.0.45:7001) soainfra-cluster-soa-cluster:8001 (10.244.0.46:8001,10.244.0.47:8001) /soa-infra soainfra-cluster-soa-cluster:8001 (10.244.0.46:8001,10.244.0.47:8001) /soa/composer soainfra-cluster-soa-cluster:8001 (10.244.0.46:8001,10.244.0.47:8001) /integration/worklistapp soainfra-cluster-soa-cluster:8001 (10.244.0.46:8001,10.244.0.47:8001) Annotations: kubernetes.io/ingress.class: traefik Events: \u0026lt;none\u0026gt; For SSL access to the Oracle SOA Suite application, get the details of the services by the above deployed ingress:\n$ kubectl describe ingress soainfra-traefik -n soans Click here to see all services supported by the above deployed ingress. ``` Name: soainfra-traefik Namespace: soans Address: Default backend: default-http-backend:80 (\u0026lt;error: endpoints \u0026quot;default-http-backend\u0026quot; not found\u0026gt;) TLS: soainfra-tls-cert terminates www.example.com Rules: Host Path Backends ---- ---- -------- www.example.com /console soainfra-adminserver:7001 () /em soainfra-adminserver:7001 () /weblogic/ready soainfra-adminserver:7001 () soainfra-cluster-soa-cluster:8001 () /soa-infra soainfra-cluster-soa-cluster:8001 () /soa/composer soainfra-cluster-soa-cluster:8001 () /integration/worklistapp soainfra-cluster-soa-cluster:8001 () Annotations: kubernetes.io/ingress.class: traefik meta.helm.sh/release-name: soa-traefik-ingress meta.helm.sh/release-namespace: soans traefik.ingress.kubernetes.io/router.entrypoints: websecure traefik.ingress.kubernetes.io/router.middlewares: soans-wls-proxy-ssl@kubernetescrd traefik.ingress.kubernetes.io/router.tls: true Events: \u0026lt;none\u0026gt; ``` For E2ESSL access to the Oracle SOA Suite application, get the details of the services by the above deployed ingress:\n$ kubectl describe IngressRouteTCP soainfra-traefik -n soans\t Click here to see all services supported by the above deployed ingress. ``` Name: soa-cluster-routetcp Namespace: soans Labels: app.kubernetes.io/managed-by=Helm Annotations: meta.helm.sh/release-name: soa-traefik-ingress meta.helm.sh/release-namespace: soans API Version: traefik.containo.us/v1alpha1 Kind: IngressRouteTCP Metadata: Creation Timestamp: 2021-04-09T09:47:27Z Generation: 1 Managed Fields: API Version: traefik.containo.us/v1alpha1 Fields Type: FieldsV1 fieldsV1: f:metadata: f:annotations: .: f:meta.helm.sh/release-name: f:meta.helm.sh/release-namespace: f:labels: .: f:app.kubernetes.io/managed-by: f:spec: .: f:entryPoints: f:routes: f:tls: .: f:passthrough: Manager: Go-http-client Operation: Update Time: 2021-04-09T09:47:27Z Resource Version: 548305 Self Link: /apis/traefik.containo.us/v1alpha1/namespaces/soans/ingressroutetcps/soa-cluster-routetcp UID: 933e524c-b773-474b-a87f-560d69f08d4b Spec: Entry Points: websecure Routes: Match: HostSNI(`HostName`) Services: Termination Delay: 400 Name: soainfra-adminserver Port: 7002 Weight: 3 Tls: Passthrough: true Events: \u0026lt;none\u0026gt; ``` To confirm that the load balancer noticed the new ingress and is successfully routing to the domain server pods, you can send a request to the URL for the \u0026ldquo;WebLogic ReadyApp framework\u0026rdquo;, which should return an HTTP 200 status code, as follows:\n$ curl -v http://${LOADBALANCER_HOSTNAME}:${LOADBALANCER_PORT}/weblogic/ready * Trying 149.87.129.203... \u0026gt; GET http://${LOADBALANCER_HOSTNAME}:${LOADBALANCER_PORT}/weblogic/ready HTTP/1.1 \u0026gt; User-Agent: curl/7.29.0 \u0026gt; Accept: */* \u0026gt; Proxy-Connection: Keep-Alive \u0026gt; host: $(hostname -f) \u0026gt; \u0026lt; HTTP/1.1 200 OK \u0026lt; Date: Sat, 14 Mar 2020 08:35:03 GMT \u0026lt; Vary: Accept-Encoding \u0026lt; Content-Length: 0 \u0026lt; Proxy-Connection: Keep-Alive \u0026lt; * Connection #0 to host localhost left intact Verify domain application URL access For NONSSL configuration After setting up the Traefik (ingress-based) load balancer, verify that the domain application URLs are accessible through the non-SSL load balancer port 30305 for HTTP access. The sample URLs for Oracle SOA Suite domain of type soa are:\nhttp://${LOADBALANCER_HOSTNAME}:${LOADBALANCER-Non-SSLPORT}/weblogic/ready http://${LOADBALANCER_HOSTNAME}:${LOADBALANCER-Non-SSLPORT}/console http://${LOADBALANCER_HOSTNAME}:${LOADBALANCER-Non-SSLPORT}/em http://${LOADBALANCER_HOSTNAME}:${LOADBALANCER-Non-SSLPORT}/soa-infra http://${LOADBALANCER_HOSTNAME}:${LOADBALANCER-Non-SSLPORT}/soa/composer http://${LOADBALANCER_HOSTNAME}:${LOADBALANCER-Non-SSLPORT}/integration/worklistapp For SSL configuration After setting up the Traefik (ingress-based) load balancer, verify that the domain applications are accessible through the SSL load balancer port 30443 for HTTPS access. The sample URLs for Oracle SOA Suite domain of type soa are:\nhttps://${LOADBALANCER_HOSTNAME}:${LOADBALANCER-SSLPORT}/weblogic/ready https://${LOADBALANCER_HOSTNAME}:${LOADBALANCER-SSLPORT}/console https://${LOADBALANCER_HOSTNAME}:${LOADBALANCER-SSLPORT}/em https://${LOADBALANCER_HOSTNAME}:${LOADBALANCER-SSLPORT}/soa-infra https://${LOADBALANCER_HOSTNAME}:${LOADBALANCER-SSLPORT}/soa/composer https://${LOADBALANCER_HOSTNAME}:${LOADBALANCER-SSLPORT}/integration/worklistapp For E2ESSL configuration After setting up the Traefik (ingress-based) load balancer, verify that the domain applications are accessible through the SSL load balancer port 30443 for HTTPS access.\n To access the application URLs from the browser, update /etc/hosts on the browser host (in Windows, C:\\Windows\\System32\\Drivers\\etc\\hosts) with the entries below\nX.X.X.X admin.org X.X.X.X soa.org X.X.X.X osb.org Note: The value of X.X.X.X is the host ipaddress on which this ingress is deployed.\n Note: If you are behind any corporate proxy, make sure to update the browser proxy settings appropriately to access the host names updated /etc/hosts file.\n The sample URLs for Oracle SOA Suite domain of type soa are:\nhttps://admin.org:${LOADBALANCER-SSLPORT}/weblogic/ready https://admin.org:${LOADBALANCER-SSLPORT}/console https://admin.org:${LOADBALANCER-SSLPORT}/em https://soa.org:${LOADBALANCER-SSLPORT}/soa-infra https://soa.org:${LOADBALANCER-SSLPORT}/soa/composer https://soa.org:${LOADBALANCER-SSLPORT}/integration/worklistapp Uninstall the Traefik ingress Uninstall and delete the ingress deployment:\n$ helm delete soa-traefik-ingress -n soans Uninstall Traefik $ helm delete traefik -n traefik " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oud/create-oud-instances/create-oud-instances-using-samples/", + "title": "a. Create Oracle Unified Directory Instances Using Samples", + "tags": [], + "description": "Samples for deploying Oracle Unified Directory instances to a Kubernetes POD.", + "content": " Introduction Preparing the Environment for Container Creation Create Kubernetes Namespace Create Secrets for User IDs and Passwords Prepare a Host Directory to be used for Filesystem Based PersistentVolume Create PersistentVolume (PV) and PersistentVolumeClaim (PVC) for your Namespace Directory Server (instanceType=Directory) Directory Server (instanceType=Directory) as a Kubernetes Service Proxy Server (instanceType=Proxy) as a Kubernetes Service Replication Server (instanceType=Replication) as a Kubernetes Service Directory Server/Service added to existing Replication Server/Service (instanceType=AddDS2RS) Appendix A : Reference Introduction The Oracle Unified Directory deployment scripts provided in the samples directory of this project demonstrate the creation of different types of Oracle Unified Directory Instances (Directory Service, Proxy, Replication) in containers within a Kubernetes environment.\nNote: The sample files to assist you in creating and configuring your Oracle Unified Directory Kubernetes environment can be found in the project at the following location:\nhttps://github.com/oracle/fmw-kubernetes/tree/master/OracleUnifiedDirectory/kubernetes/samples\nPreparing the Environment for Container Creation In this section you prepare the environment for the Oracle Unified Directory container creation. This involves the following steps:\n Create Kubernetes Namespace Create Secrets for User IDs and Passwords Prepare a host directory to be used for Filesystem based PersistentVolume Create PersistentVolume (PV) and PersistentVolumeClaim (PVC) for your Namespace Note: Sample files to assist you in creating and configuring your Oracle Unified Directory Kubernetes environment can be found in the project at the following location:\nhttps://github.com/oracle/fmw-kubernetes/tree/master/OracleUnifiedDirectory/kubernetes/samples\nCreate Kubernetes Namespace You should create a Kubernetes namespace to provide a scope for other objects such as pods and services that you create in the environment. To create your namespace you should refer to the oudns.yaml file.\nUpdate the oudns.yaml file and replace %NAMESPACE% with the value of the namespace you would like to create. In the example below the value \u0026lsquo;oudns\u0026rsquo; is used.\nTo create the namespace apply the file using kubectl:\n$ cd \u0026lt;work directory\u0026gt;/fmw-kubernetes/OracleUnifiedDirectory/kubernetes/samples $ kubectl apply -f oudns.yaml For example:\n$ cd /scratch/OUDContainer/fmw-kubernetes/OracleUnifiedDirectory/kubernetes/samples $ kubectl apply -f oudns.yaml The output will look similar to the following:\nnamespace/oudns created Confirm that the namespace is created:\n$ kubectl get namespaces NAME STATUS AGE default Active 4d kube-public Active 4d kube-system Active 4d oudns Active 53s Create Secrets for User IDs and Passwords To protect sensitive information, namely user IDs and passwords, you should create Kubernetes Secrets for the key-value pairs with following keys. The Secret with key-value pairs will be used to pass values to containers created through the Oracle Unified Directory image:\n rootUserDN rootUserPassword adminUID adminPassword bindDN1 bindPassword1 bindDN2 bindPassword2 There are two ways by which a Kubernetes secret object can be created with required key-value pairs.\nUsing secrets.yaml file In this method you update the secrets.yaml file with the value for %SECRET_NAME% and %NAMESPACE%, together with the Base64 value for each secret.\n %rootUserDN% - With Base64 encoded value for rootUserDN parameter. %rootUserPassword% - With Base64 encoded value for rootUserPassword parameter. %adminUID% - With Base64 encoded value for adminUID parameter. %adminPassword% - With Base64 encoded value for adminPassword parameter. %bindDN1% - With Base64 encoded value for bindDN1 parameter. %bindPassword1% - With Base64 encoded value for bindPassword1 parameter. %bindDN2% - With Base64 encoded value for bindDN2 parameter. %bindPassword2% - With Base64 encoded value for bindPassword2 parameter. Obtain the base64 value for your secrets, for example:\n$ echo -n cn=Directory Manager | base64 Y249RGlyZWN0b3J5IE1hbmFnZXI= $ echo -n Oracle123 | base64 T3JhY2xlMTIz $ echo -n admin | base64 YWRtaW4= Note: Ensure that you use the -n parameter with the echo command. If the parameter is omitted Base64 values willbe generated with a new-line character included.\nUpdate the secrets.yaml file with your values. It should look similar to the file shown below:\napiVersion: v1 kind: Secret metadata: name: oudsecret namespace: oudns type: Opaque data: rootUserDN: Y249RGlyZWN0b3J5IE1hbmFnZXI= rootUserPassword: T3JhY2xlMTIz adminUID: YWRtaW4= adminPassword: T3JhY2xlMTIz bindDN1: Y249RGlyZWN0b3J5IE1hbmFnZXI= bindPassword1: T3JhY2xlMTIz bindDN2: Y249RGlyZWN0b3J5IE1hbmFnZXI= bindPassword2: T3JhY2xlMTIz Apply the file:\n$ kubectl apply -f secrets.yaml secret/oudsecret created Verify that the secret has been created:\n$ kubectl --namespace oudns get secret NAME TYPE DATA AGE default-token-fztcb kubernetes.io/service-account-token 3 15m oudsecret Opaque 8 99s Using kubectl create secret command The Kubernetes secret can be created using the command line with the following syntax:\n$ kubectl --namespace %NAMESPACE% create secret generic %SECRET_NAME% \\ --from-literal=rootUserDN=\u0026quot;%rootUserDN%\u0026quot; \\ --from-literal=rootUserPassword=\u0026quot;%rootUserPassword%\u0026quot; \\ --from-literal=adminUID=\u0026quot;%adminUID%\u0026quot; \\ --from-literal=adminPassword=\u0026quot;%adminPassword%\u0026quot; \\ --from-literal=bindDN1=\u0026quot;%bindDN1%\u0026quot; \\ --from-literal=bindPassword1=\u0026quot;%bindPassword1%\u0026quot; \\ --from-literal=bindDN2=\u0026quot;%bindDN2%\u0026quot; \\ --from-literal=bindPassword2=\u0026quot;%bindPassword2%\u0026quot; Update the following placeholders in the command with the relevant value:\n %NAMESPACE% - With name of namespace in which secret is required to be created %SECRET_NAME% - Name for the secret object %rootUserDN% - With Base64 encoded value for rootUserDN parameter. %rootUserPassword% - With Base64 encoded value for rootUserPassword parameter. %adminUID% - With Base64 encoded value for adminUID parameter. %adminPassword% - With Base64 encoded value for adminPassword parameter. %bindDN1% - With Base64 encoded value for bindDN1 parameter. %bindPassword1% - With Base64 encoded value for bindPassword1 parameter. %bindDN2% - With Base64 encoded value for bindDN2 parameter. %bindPassword2% - With Base64 encoded value for bindPassword2 parameter. After executing the kubectl create secret command, verify that the secret has been created:\n$ kubectl --namespace oudns get secret NAME TYPE DATA AGE default-token-fztcb kubernetes.io/service-account-token 3 15m oudsecret Opaque 8 99s Prepare a Host Directory to be used for Filesystem Based PersistentVolume It is required to prepare a directory on the Host filesystem to store Oracle Unified Directory Instances and other configuration outside the container filesystem. That directory from the Host filesystem will be associated with a PersistentVolume.\nIn the case of a multi-node Kubernetes cluster, the Host directory to be associated with the PersistentVolume should be accessible on all the nodes at the same path.\nTo prepare a Host directory (for example: /scratch/user_projects) for mounting as a file system based PersistentVolume inside your containers, execute the command below on your Host:\n The userid can be anything but it must have uid:guid as 1000:1000, which is the same as the \u0026lsquo;oracle\u0026rsquo; user running in the container. This ensures the \u0026lsquo;oracle\u0026rsquo; user has access to the shared volume/directory.\n $ sudo su - root $ mkdir -p /scratch/user_projects $ chown 1000:1000 /scratch/user_projects $ exit All container operations are performed as the oracle user.\nNote: If a user already exists with -u 1000 -g 1000 then use the same user. Else modify the existing user to have uid-gid as '-u 1000 -g 1000\u0026rsquo;\nCreate PersistentVolume (PV) and PersistentVolumeClaim (PVC) for your Namespace A PersistentVolume (PV) is a storage resource, while a PersistentVolumeClaim (PVC) is a request for that resource. To provide storage for your namespace, update the persistent-volume.yaml file.\nUpdate the following to values specific to your environment:\n Param Value Example %PV_NAME% PV name oudpv %PV_HOST_PATH% Valid path on localhost /scratch/user_projects %PVC_NAME% PVC name oudpvc %NAMESPACE% Namespace oudns Apply the file:\n$ kubectl apply -f persistent-volume.yaml persistentvolume/oudpv created persistentvolumeclaim/oudpvc created Verify the PersistentVolume:\n$ kubectl --namespace oudns describe persistentvolume oudpv Name: oudpv Labels: type=oud-pv Annotations: pv.kubernetes.io/bound-by-controller: yes Finalizers: [kubernetes.io/pv-protection] StorageClass: manual Status: Bound Claim: oudns/oudpvc Reclaim Policy: Retain Access Modes: RWX VolumeMode: Filesystem Capacity: 10Gi Node Affinity: \u0026lt;none\u0026gt; Message: Source: Type: HostPath (bare host directory volume) Path: /scratch/user_projects HostPathType: Events: \u0026lt;none\u0026gt; Verify the PersistentVolumeClaim:\n$ kubectl --namespace oudns describe pvc oudpvc Name: oudpvc Namespace: oudns StorageClass: manual Status: Bound Volume: oudpv Labels: \u0026lt;none\u0026gt; Annotations: pv.kubernetes.io/bind-completed: yes pv.kubernetes.io/bound-by-controller: yes Finalizers: [kubernetes.io/pvc-protection] Capacity: 10Gi Access Modes: RWX VolumeMode: Filesystem Mounted By: \u0026lt;none\u0026gt; Events: \u0026lt;none\u0026gt; Directory Server (instanceType=Directory) In this example you create a POD (oudpod1) which comprises a single container based on an Oracle Unified Directory 12c PS4 (12.2.1.4.0) image.\nTo create the POD update the oud-dir-pod.yaml file.\nUpdate the following to values specific to your environment:\n Param Value Example %NAMESPACE% Namespace oudns %IMAGE% Oracle image tag oracle/oud:12.2.1.4.0 %SECRET_NAME% Secret name oudsecret %PV_NAME% PV name oudpv %PVC_NAME% PVC name oudpvc Apply the file:\n$ kubectl apply -f oud-dir-pod.yaml pod/oudpod1 created To check the status of the created pod:\n$ kubectl get pods -n oudns NAME READY STATUS RESTARTS AGE oudpod1 1/1 Running 0 14m If you see any errors then use the following commands to debug the pod/container.\nTo review issues with the pod e.g. CreateContainerConfigError:\n$ kubectl --namespace \u0026lt;namespace\u0026gt; describe pod \u0026lt;pod\u0026gt; For example:\n$ kubectl --namespace oudns describe pod oudpod1 To tail the container logs while it is initializing use the following command:\n$ kubectl --namespace \u0026lt;namespace\u0026gt; logs -f -c \u0026lt;container\u0026gt; \u0026lt;pod\u0026gt; For example:\n$ kubectl --namespace oudns logs -f -c oudds1 oudpod1 To view the full container logs:\n$ kubectl --namespace \u0026lt;namespace\u0026gt; logs -c \u0026lt;container\u0026gt; \u0026lt;pod\u0026gt; To validate that the Oracle Unified Directory directory server instance is running, connect to the container:\n$ kubectl --namespace oudns exec -it -c oudds1 oudpod1 /bin/bash In the container, run ldapsearch to return entries from the directory server:\n$ cd /u01/oracle/user_projects/oudpod1/OUD/bin $ ./ldapsearch -h localhost -p 1389 -D \u0026quot;cn=Directory Manager\u0026quot; -w Oracle123 -b \u0026quot;\u0026quot; -s sub \u0026quot;(objectclass=*)\u0026quot; dn dn: dc=example1,dc=com dn: ou=People,dc=example1,dc=com dn: uid=user.0,ou=People,dc=example1,dc=com ... dn: uid=user.99,ou=People,dc=example1,dc=com To exit the bash session type exit.\nDirectory Server (instanceType=Directory) as a Kubernetes Service In this example you will create two pods and 2 associated containers, both running Oracle Unified Directory 12c Directory Server instances. This demonstrates how you can expose Oracle Unified Directory 12c as a network service. This provides a way of abstracting access to the backend service independent of the pod details.\nTo create the POD update the oud-dir-svc.yaml file.\nUpdate the following to values specific to your environment:\n Param Value Example %NAMESPACE% Namespace oudns %IMAGE% Oracle image tag oracle/oud:12.2.1.4.0 %SECRET_NAME% Secret name oudsecret %PV_NAME% PV name oudpv %PVC_NAME% PVC name oudpvc Apply the file:\n$ kubectl apply -f oud-dir-svc.yaml service/oud-dir-svc-1 created pod/oud-dir1 created service/oud-dir-svc-2 created pod/oud-dir2 created To check the status of the created pods (oud-dir1 and oud-dir2) and services (oud-dir-svc-1 and oud-dir-svc-2):\n$ kubectl --namespace oudns get all NAME READY STATUS RESTARTS AGE pod/oud-dir1 1/1 Running 0 28m pod/oud-dir2 1/1 Running 0 28m pod/oudpod1 1/1 Running 0 22h NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/oud-dir-svc-1 NodePort 10.107.171.235 \u0026lt;none\u0026gt; 1444:30616/TCP,1888:32605/TCP,1389:31405/TCP,1636:32544/TCP,1080:31509/TCP,1081:32395/TCP,1898:31116/TCP 28m service/oud-dir-svc-2 NodePort 10.106.206.229 \u0026lt;none\u0026gt; 1444:30882/TCP,1888:30427/TCP,1389:31299/TCP,1636:31529/TCP,1080:30056/TCP,1081:30458/TCP,1898:31796/TCP 28m From this example you can see that the following service port mappings are available to access the container:\n service/oud-dir-svc-1 : 10.107.171.235 : 1389:31405 service/oud-dir-svc-2 : 10.106.206.229 : 1389:31299 To access the Oracle Unified Directory directory server running in pod/oud-dir1 via the LDAP port 1389 you would use the service port : 31405.\nTo access the Oracle Unified Directory directory server running in pod/oud-dir2 via the LDAP port 1389 you would use the service port : 31299.\nFor example:\nNote: use the ldapsearch from the Oracle Unified Directory ORACLE_HOME when accessing the cluster externally.\n$ ldapsearch -h $HOSTNAME -p 31405 -D \u0026quot;cn=Directory Manager\u0026quot; -w Oracle123 -b \u0026quot;\u0026quot; -s sub \u0026quot;(objectclass=*)\u0026quot; dn dn: dc=example1,dc=com dn: ou=People,dc=example1,dc=com dn: uid=user.0,ou=People,dc=example1,dc=com ... dn: uid=user.98,ou=People,dc=example1,dc=com dn: uid=user.99,ou=People,dc=example1,dc=com $ ldapsearch -h $HOSTNAME -p 31299 -D \u0026quot;cn=Directory Manager\u0026quot; -w Oracle123 -b \u0026quot;\u0026quot; -s sub \u0026quot;(objectclass=*)\u0026quot; dn dn: dc=example2,dc=com dn: ou=People,dc=example2,dc=com dn: uid=user.0,ou=People,dc=example2,dc=com ... dn: uid=user.98,ou=People,dc=example2,dc=com dn: uid=user.99,ou=People,dc=example2,dc=com Validation It is possible to access the Oracle Unified Directory instances and the data within externally from the cluster, using commands like curl. In this way you can access interfaces exposed through NodePort. In the example below, two services (service/oud-dir-svc-1 and service/oud-dir-svc-2) expose a set of ports. The following curl commands can be executed against the ports exposed through each service.\nCurl command example for Oracle Unified Directory Admin REST: curl --noproxy \u0026quot;*\u0026quot; --insecure --location --request GET \\ 'https://\u0026lt;HOSTNAME\u0026gt;:\u0026lt;AdminHttps NodePort mapped to 1888\u0026gt;/rest/v1/admin/?scope=base\u0026amp;attributes=%2b' \\ --header 'Content-Type: application/json' \\ --header 'Authorization: Basic Y249RGlyZWN0b3J5IE1hbmFnZXI6T3JhY2xlMTIz' where Y249RGlyZWN0b3J5IE1hbmFnZXI6T3JhY2xlMTIz is the base64 encoded version of : cn=Directory Manager:Oracle123\nCurl command example for Oracle Unified Directory Data REST : curl --noproxy \u0026quot;*\u0026quot; --insecure --location --request GET \\ 'https://\u0026lt;HOSTNAME\u0026gt;:\u0026lt;Https NodePort mapped to 1081\u0026gt;/rest/v1/directory/?scope=base\u0026amp;attributes=%2b' \\ --header 'Authorization: Basic Y249RGlyZWN0b3J5IE1hbmFnZXI6T3JhY2xlMTIz' Curl command example for Oracle Unified Directory Data SCIM: curl --noproxy \u0026quot;*\u0026quot; --insecure --location --request GET \\ 'https://\u0026lt;HOSTNAME\u0026gt;:\u0026lt;Https NodePort mapped to 1081\u0026gt;/iam/directory/oud/scim/v1/Schemas/urn:ietf:params:scim:schemas:core:2.0:Schema' \\ --header 'Authorization: Basic Y249RGlyZWN0b3J5IE1hbmFnZXI6T3JhY2xlMTIz' Proxy Server (instanceType=Proxy) as a Kubernetes Service In this example you will create a service, pod and associated container, in which an Oracle Unified Directory 12c Proxy Server instance is deployed. This acts as a proxy to the 2 services you created in the previous example.\nTo create the POD update the oud-ds_proxy-svc.yaml file.\nUpdate the following to values specific to your environment:\n Param Value Example %NAMESPACE% Namespace oudns %IMAGE% Oracle image tag oracle/oud:12.2.1.4.0 %SECRET_NAME% Secret name oudsecret %PV_NAME% PV name oudpv %PVC_NAME% PVC name oudpvc Apply the file:\n$ kubectl apply -f oud-ds_proxy-svc.yaml service/oud-ds-proxy-svc created pod/oudp1 created Check the status of the new pod/service:\n$ kubectl --namespace oudns get all NAME READY STATUS RESTARTS AGE pod/oud-dir1 1/1 Running 0 166m pod/oud-dir2 1/1 Running 0 166m pod/oudp1 1/1 Running 0 20m pod/oudpod1 1/1 Running 0 25h NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/oud-dir-svc-1 NodePort 10.107.171.235 \u0026lt;none\u0026gt; 1444:30616/TCP,1888:32605/TCP,1389:31405/TCP,1636:32544/TCP,1080:31509/TCP,1081:32395/TCP,1898:31116/TCP 166m service/oud-dir-svc-2 NodePort 10.106.206.229 \u0026lt;none\u0026gt; 1444:30882/TCP,1888:30427/TCP,1389:31299/TCP,1636:31529/TCP,1080:30056/TCP,1081:30458/TCP,1898:31796/TCP 166m service/oud-ds-proxy-svc NodePort 10.103.41.171 \u0026lt;none\u0026gt; 1444:30878/TCP,1888:30847/TCP,1389:31810/TCP,1636:30873/TCP,1080:32076/TCP,1081:30762/TCP,1898:31269/TCP 20m Verify operation of the proxy server, accessing through the external service port:\n$ ldapsearch -h $HOSTNAME -p 31810 -D \u0026quot;cn=Directory Manager\u0026quot; -w Oracle123 -b \u0026quot;\u0026quot; -s sub \u0026quot;(objectclass=*)\u0026quot; dn dn: dc=example1,dc=com dn: ou=People,dc=example1,dc=com dn: uid=user.0,ou=People,dc=example1,dc=com ... dn: uid=user.99,ou=People,dc=example1,dc=com dn: dc=example2,dc=com dn: ou=People,dc=example2,dc=com dn: uid=user.0,ou=People,dc=example2,dc=com ... dn: uid=user.98,ou=People,dc=example2,dc=com dn: uid=user.99,ou=People,dc=example2,dc=com Note: Entries are returned from both backend directory servers (dc=example1,dc=com and dc=example2,dc=com) via the proxy server.\nReplication Server (instanceType=Replication) as a Kubernetes Service In this example you will create a service, pod and associated container, in which an Oracle Unified Directory 12c Replication Server instance is deployed. This creates a single Replication Server which has 2 Directory Servers as its replication group. This example extends the Oracle Unified Directory instances created as part of Directory Server (instanceType=Directory) as a Kubernetes Service.\nTo create the POD update the oud-ds_rs_ds-svc.yaml file.\nUpdate the following to values specific to your environment:\n Param Value Example %NAMESPACE% Namespace oudns %IMAGE% Oracle image tag oracle/oud:12.2.1.4.0 %SECRET_NAME% Secret name oudsecret %PV_NAME% PV name oudpv %PVC_NAME% PVC name oudpvc Apply the file:\n$ kubectl apply -f oud-ds_rs_ds-svc.yaml service/oud-rs-svc-1 created pod/oudpodrs1 created service/oud-ds-svc-1a created pod/oudpodds1a created service/oud-ds-svc-1b created pod/oudpodds1b created Check the status of the new services:\n$ kubectl --namespace oudns get all NAME READY STATUS RESTARTS AGE pod/oud-dir1 1/1 Running 0 2d20h pod/oud-dir2 1/1 Running 0 2d20h pod/oudp1 1/1 Running 0 2d18h pod/oudpod1 1/1 Running 0 3d18h pod/oudpodds1a 0/1 Running 0 2m44s pod/oudpodds1b 0/1 Running 0 2m44s pod/oudpodrs1 0/1 Running 0 2m45s NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/oud-dir-svc-1 NodePort 10.107.171.235 \u0026lt;none\u0026gt; 1444:30616/TCP,1888:32605/TCP,1389:31405/TCP,1636:32544/TCP,1080:31509/TCP,1081:32395/TCP,1898:31116/TCP 2d20h service/oud-dir-svc-2 NodePort 10.106.206.229 \u0026lt;none\u0026gt; 1444:30882/TCP,1888:30427/TCP,1389:31299/TCP,1636:31529/TCP,1080:30056/TCP,1081:30458/TCP,1898:31796/TCP 2d20h service/oud-ds-proxy-svc NodePort 10.103.41.171 \u0026lt;none\u0026gt; 1444:30878/TCP,1888:30847/TCP,1389:31810/TCP,1636:30873/TCP,1080:32076/TCP,1081:30762/TCP,1898:31269/TCP 2d18h service/oud-ds-svc-1a NodePort 10.102.218.25 \u0026lt;none\u0026gt; 1444:30347/TCP,1888:30392/TCP,1389:32482/TCP,1636:31161/TCP,1080:31241/TCP,1081:32597/TCP 2m45s service/oud-ds-svc-1b NodePort 10.104.6.215 \u0026lt;none\u0026gt; 1444:32031/TCP,1888:31621/TCP,1389:32511/TCP,1636:31698/TCP,1080:30737/TCP,1081:30748/TCP 2m44s service/oud-rs-svc-1 NodePort 10.110.237.193 \u0026lt;none\u0026gt; 1444:32685/TCP,1888:30176/TCP,1898:30543/TCP 2m45s Validation To validate that the Oracle Unified Directory replication group is running, connect to the replication server container (oudrs1):\n$ kubectl --namespace oudns exec -it -c oudrs1 oudpodrs1 /bin/bash $ cd /u01/oracle/user_projects/oudpodrs1/OUD/bin In the container, run dsreplication to return details of the replication group:\n$ ./dsreplication status --trustAll --hostname localhost --port 1444 --adminUID admin --dataToDisplay compat-view --dataToDisplay rs-connections \u0026gt;\u0026gt;\u0026gt;\u0026gt; Specify Oracle Unified Directory LDAP connection parameters Password for user 'admin': Establishing connections and reading configuration ..... Done. dc=example1,dc=com - Replication Enabled ======================================== Server : Entries : M.C. [1] : A.O.M.C. [2] : Port [3] : Encryption [4] : Trust [5] : U.C. [6] : Status [7] : ChangeLog [8] : Group ID [9] : Connected To [10] --------------------:----------:----------:--------------:----------:----------------:-----------:----------:------------:---------------:--------------:--------------------------- oud-rs-svc-1:1444 : -- [11] : 0 : -- : 1898 : Disabled : -- : -- : Up : -- : 1 : -- oud-ds-svc-1a:1444 : 1 : 0 : 0 : -- [12] : Disabled : Trusted : -- : Normal : Enabled : 1 : oud-rs-svc-1:1898 (GID=1) oud-ds-svc-1b:1444 : 1 : 0 : 0 : -- [12] : Disabled : Trusted : -- : Normal : Enabled : 1 : oud-rs-svc-1:1898 (GID=1) You can see that the Replication Server is running as the oud-rs-svc-1:1444, while you have Directory Server services running on oud-ds-svc-1a:1444 and oud-ds-svc-1b:1444.\nTo exit the bash session type exit.\nFrom outside the cluster, you can invoke curl commands, as shown in the following examples, to access interfaces exposed through NodePort. In this example, there are two Directory services (service/oud-ds-svc-1a and service/oud-ds-svc-1b) exposing a set of ports. The following curl commands can be executed against ports exposed through each service.\nCurl command example for Oracle Unified Directory Admin REST: curl --noproxy \u0026quot;*\u0026quot; --insecure --location --request GET \\ 'https://\u0026lt;HOSTNAME\u0026gt;:\u0026lt;AdminHttps NodePort mapped to 1888\u0026gt;/rest/v1/admin/?scope=base\u0026amp;attributes=%2b' \\ --header 'Content-Type: application/json' \\ --header 'Authorization: Basic Y249RGlyZWN0b3J5IE1hbmFnZXI6T3JhY2xlMTIz' where Y249RGlyZWN0b3J5IE1hbmFnZXI6T3JhY2xlMTIz is the base64 encoded version of : cn=Directory Manager:Oracle123\nNote: This can be executed against the replication service (oud-rs-svc-1) as well.\nCurl command example for Oracle Unified Directory Data REST : curl --noproxy \u0026quot;*\u0026quot; --insecure --location --request GET \\ 'https://\u0026lt;HOSTNAME\u0026gt;:\u0026lt;Https NodePort mapped to 1081\u0026gt;/rest/v1/directory/?scope=base\u0026amp;attributes=%2b' \\ --header 'Authorization: Basic Y249RGlyZWN0b3J5IE1hbmFnZXI6T3JhY2xlMTIz' Curl command example for Oracle Unified Directory Data SCIM: curl --noproxy \u0026quot;*\u0026quot; --insecure --location --request GET \\ 'https://\u0026lt;HOSTNAME\u0026gt;:\u0026lt;Https NodePort mapped to 1081\u0026gt;/iam/directory/oud/scim/v1/Schemas/urn:ietf:params:scim:schemas:core:2.0:Schema' \\ --header 'Authorization: Basic Y249RGlyZWN0b3J5IE1hbmFnZXI6T3JhY2xlMTIz' Directory Server/Service added to existing Replication Server/Service (instanceType=AddDS2RS) In this example you will create services, pods and containers, in which Oracle Unified Directory 12c Replication Server instances are deployed. In this case 2 Replication/Directory Server Services are added, in addition the Directory Server created in Directory Server (instanceType=Directory) as a Kubernetes Service (oud-dir-svc-2) is added to the replication group.\nTo create the POD update the oud-ds-plus-rs-svc.yaml file.\nUpdate the following to values specific to your environment:\n Param Value Example %NAMESPACE% Namespace oudns %IMAGE% Oracle image tag oracle/oud:12.2.1.4.0 %SECRET_NAME% Secret name oudsecret %PV_NAME% PV name oudpv %PVC_NAME% PVC name oudpvc Apply the file:\n$ kubectl apply -f oud-ds-plus-rs-svc.yaml service/oud-dsrs-svc-1 created pod/ouddsrs1 created service/oud-dsrs-svc-2 created pod/ouddsrs2 created Check the status of the new services:\n$ kubectl --namespace oudns get all NAME READY STATUS RESTARTS AGE pod/oud-dir1 1/1 Running 0 3d pod/oud-dir2 1/1 Running 0 3d pod/ouddsrs1 0/1 Running 0 75s pod/ouddsrs2 0/1 Running 0 75s pod/oudp1 1/1 Running 0 2d21h pod/oudpod1 1/1 Running 0 3d22h pod/oudpodds1a 1/1 Running 0 3h33m pod/oudpodds1b 1/1 Running 0 3h33m pod/oudpodrs1 1/1 Running 0 3h33m NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/oud-dir-svc-1 NodePort 10.107.171.235 \u0026lt;none\u0026gt; 1444:30616/TCP,1888:32605/TCP,1389:31405/TCP,1636:32544/TCP,1080:31509/TCP,1081:32395/TCP,1898:31116/TCP 3d service/oud-dir-svc-2 NodePort 10.106.206.229 \u0026lt;none\u0026gt; 1444:30882/TCP,1888:30427/TCP,1389:31299/TCP,1636:31529/TCP,1080:30056/TCP,1081:30458/TCP,1898:31796/TCP 3d service/oud-ds-proxy-svc NodePort 10.103.41.171 \u0026lt;none\u0026gt; 1444:30878/TCP,1888:30847/TCP,1389:31810/TCP,1636:30873/TCP,1080:32076/TCP,1081:30762/TCP,1898:31269/TCP 2d21h service/oud-ds-svc-1a NodePort 10.102.218.25 \u0026lt;none\u0026gt; 1444:30347/TCP,1888:30392/TCP,1389:32482/TCP,1636:31161/TCP,1080:31241/TCP,1081:32597/TCP 3h33m service/oud-ds-svc-1b NodePort 10.104.6.215 \u0026lt;none\u0026gt; 1444:32031/TCP,1888:31621/TCP,1389:32511/TCP,1636:31698/TCP,1080:30737/TCP,1081:30748/TCP 3h33m service/oud-dsrs-svc-1 NodePort 10.102.118.29 \u0026lt;none\u0026gt; 1444:30738/TCP,1888:30935/TCP,1389:32438/TCP,1636:32109/TCP,1080:31776/TCP,1081:31897/TCP,1898:30874/TCP 75s service/oud-dsrs-svc-2 NodePort 10.98.139.53 \u0026lt;none\u0026gt; 1444:32312/TCP,1888:30595/TCP,1389:31376/TCP,1636:30090/TCP,1080:31238/TCP,1081:31174/TCP,1898:31863/TCP 75s service/oud-rs-svc-1 NodePort 10.110.237.193 \u0026lt;none\u0026gt; 1444:32685/TCP,1888:30176/TCP,1898:30543/TCP 3h33m Validation To validate that the Oracle Unified Directory replication group is running, connect to the replication server container (oudrs1):\n$ kubectl --namespace oudns exec -it -c ouddsrs ouddsrs1 /bin/bash $ cd /u01/oracle/user_projects/ouddsrs1/OUD/bin In the container, run dsreplication to return details of the replication group:\n$ ./dsreplication status --trustAll --hostname localhost --port 1444 --adminUID admin --dataToDisplay compat-view --dataToDisplay rs-connections \u0026gt;\u0026gt;\u0026gt;\u0026gt; Specify Oracle Unified Directory LDAP connection parameters Password for user 'admin': Establishing connections and reading configuration ..... Done. dc=example2,dc=com - Replication Enabled ======================================== Server : Entries : M.C. [1] : A.O.M.C. [2] : Port [3] : Encryption [4] : Trust [5] : U.C. [6] : Status [7] : ChangeLog [8] : Group ID [9] : Connected To [10] --------------------:---------:----------:--------------:----------:----------------:-----------:----------:------------:---------------:--------------:----------------------------- oud-dir-svc-2:1444 : 102 : 0 : 0 : 1898 : Disabled : Trusted : -- : Normal : Enabled : 1 : oud-dir-svc-2:1898 (GID=1) oud-dsrs-svc-1:1444 : 102 : 0 : 0 : 1898 : Disabled : Trusted : -- : Normal : Enabled : 2 : oud-dsrs-svc-1:1898 (GID=2) oud-dsrs-svc-2:1444 : 102 : 0 : 0 : 1898 : Disabled : Trusted : -- : Normal : Enabled : 2 : oud-dsrs-svc-2:1898 (GID=2) Replication Server [11] : RS #1 : RS #2 : RS #3 --------------------------:-------:-------:------- oud-dir-svc-2:1898 (#1) : -- : Yes : Yes oud-dsrs-svc-1:1898 (#2) : Yes : -- : Yes oud-dsrs-svc-2:1898 (#3) : Yes : Yes : -- From outside the cluster, you can invoke curl commands like following for accessing interfaces exposed through NodePort. In this example, there are two services (service/oud-dsrs-svc-1 and service/oud-dsrs-svc-2) exposing set of ports. Following curl commands can be executed against ports exposed through each service.\nTo exit the bash session type exit.\nCurl command example for Oracle Unified Directory Admin REST: curl --noproxy \u0026quot;*\u0026quot; --insecure --location --request GET \\ 'https://\u0026lt;HOSTNAME\u0026gt;:\u0026lt;AdminHttps NodePort mapped to 1888\u0026gt;/rest/v1/admin/?scope=base\u0026amp;attributes=%2b' \\ --header 'Content-Type: application/json' \\ --header 'Authorization: Basic Y249RGlyZWN0b3J5IE1hbmFnZXI6T3JhY2xlMTIz' where Y249RGlyZWN0b3J5IE1hbmFnZXI6T3JhY2xlMTIz is the base64 encoded version of : cn=Directory Manager:Oracle123\nCurl command example for Oracle Unified Directory Data REST : curl --noproxy \u0026quot;*\u0026quot; --insecure --location --request GET \\ 'https://\u0026lt;HOSTNAME\u0026gt;:\u0026lt;Https NodePort mapped to 1081\u0026gt;/rest/v1/directory/?scope=base\u0026amp;attributes=%2b' \\ --header 'Authorization: Basic Y249RGlyZWN0b3J5IE1hbmFnZXI6T3JhY2xlMTIz' Curl command example for Oracle Unified Directory Data SCIM: curl --noproxy \u0026quot;*\u0026quot; --insecure --location --request GET \\ 'https://\u0026lt;HOSTNAME\u0026gt;:\u0026lt;Https NodePort mapped to 1081\u0026gt;/iam/directory/oud/scim/v1/Schemas/urn:ietf:params:scim:schemas:core:2.0:Schema' \\ --header 'Authorization: Basic Y249RGlyZWN0b3J5IE1hbmFnZXI6T3JhY2xlMTIz' Appendix A : Reference Before using these sample yaml files, the following variables must be updated:\n %NAMESPACE% - with value for Kubernetes namespace of your choice %IMAGE% - with exact docker image for oracle/oud:12.2.1.x.x %PV_NAME% - with value of the persistent volume name of your choice %PV_HOST_PATH% - with value of the persistent volume Host Path (Directory Path which would be used as storage path for volume) %PVC_NAME% - with value of the persistent volume claim name of your choice %SECRET_NAME% - with value of the secret name which can be created using secrets.yaml file. %rootUserDN% - With Base64 encoded value for rootUserDN parameter. %rootUserPassword% - With Base64 encoded value for rootUserPassword parameter. %adminUID% - With Base64 encoded value for adminUID parameter. %adminPassword% - With Base64 encoded value for adminPassword parameter. %bindDN1% - With Base64 encoded value for bindDN1 parameter. %bindPassword1% - With Base64 encoded value for bindPassword1 parameter. %bindDN2% - With Base64 encoded value for bindDN2 parameter. %bindPassword2% - With Base64 encoded value for bindPassword2 parameter. oudns.yaml This is a sample file to create a Kubernetes namespace.\npersistent-volume.yaml This is a sample file to create Persistent Volume and Persistent Volume Claim\nsecrets.yaml This is a sample file to create the Kubernetes secrets which can be used to substitute values during Pod creation.\nThe keys below will be honoured by the different Oracle Unified Directory yaml files\n rootUserDN rootUserPassword adminUID adminPassword bindDN1 bindPassword1 bindDN2 bindPassword2 All the values of the keys should be encoded using the command below and the encoded value should be used in the secrets.yaml file.\nTo generate an encoded value for keys in Base64 format, execute the following command:\n$ echo -n 'MyPassword' | base64 TXlQYXNzd29yZA== oud-dir-pod.yaml This is a sample file to create POD (oudpod1) and a container for an Oracle Unified Directory Directory Instance.\noud-ds_proxy-svc.yaml This is a sample file to create:\n POD (oudds1) with container for Oracle Unified Directory Directory Instance (dc=example1,dc=com) POD (oudds2) with container for Oracle Unified Directory Directory Instance (dc=example2,dc=com) POD (oudp1) with container for Oracle Unified Directory Directory Proxy referring to Oracle Unified Directory Directory Instances (oudds1 and oudds2) for dc=example1,dc=com and dc=example2,dc=com Service (oud-ds-proxy-svc) referring to POD with Oracle Unified Directory Directory Proxy (oudp1) oud-ds_rs_ds-svc.yaml This is a sample file to create:\n POD (oudpodds1) with container for Oracle Unified Directory Directory Instance (dc=example1,dc=com) POD (oudpodrs1) with container for Oracle Unified Directory Replication Server Instance connected to Oracle Unified Directory Directory Instance (oudpodds1) POD (oudpodds1a) with container for Oracle Unified Directory Directory Instance having replication enabled through Replication Server Instance (oudpodrs1) POD (oudpodds1b) with container for Oracle Unified Directory Directory Instance having replication enabled through Replication Server Instance (oudpodrs1) Service (oud-ds-rs-ds-svc) referring to all PODs The following command can be executed in the container to check the status of the replicated instances:\n$ /u01/oracle/user_projects/oudpodrs1/OUD/bin/dsreplication status \\ --trustAll --hostname oudpodrs1.oud-ds-rs-ds-svc.oudns.svc.cluster.local --port 1444 \\ --dataToDisplay compat-view oud-ds-plus-rs-svc.yaml This is a sample file to create 3 replicated DS+RS Instances:\n POD (ouddsrs1) with container for Oracle Unified Directory Directory Server (dc=example1,dc=com) and Replication Server POD (ouddsrs2) with container for Oracle Unified Directory Directory Server (dc=example1,dc=com) and Replication Server Service (oud-dsrs-svc) referring to all PODs The following command can be executed in the container to check the status of the replicated instances:\n$ /u01/oracle/user_projects/ouddsrs1/OUD/bin/dsreplication status \\ --trustAll --hostname ouddsrs1.oud-dsrs-svc.oudns.svc.cluster.local --port 1444 \\ --dataToDisplay compat-view " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oudsm/create-oudsm-instances/create-oudsm-instances-using-samples/", + "title": "a. Create Oracle Unified Directory Services Manager Instances Using Samples", + "tags": [], + "description": "Samples for deploying Oracle Unified Directory Services Manager instances to a Kubernetes POD.", + "content": " Introduction Preparing the Environment for Container Creation Create Kubernetes Namespace Create Secrets for User IDs and Passwords Prepare a Host Directory to be used for Filesystem Based PersistentVolume Create PersistentVolume (PV) and PersistentVolumeClaim (PVC) for your Namespace Oracle Unified Directory Services Manager POD Oracle Unified Directory Services Manager Deployment Introduction The Oracle Unified Directory Services Manager deployment scripts provided in the code repository demonstrate how to deploy Oracle Unified Directory Services Manager in containers within a Kubernetes environment.\nNote: The sample files to assist you in creating and configuring your Oracle Unified Directory Services Manager Kubernetes environment can be found in the project at the following location:\n\u0026lt;work directory\u0026gt;/fmw-kubernetes/OracleUnifiedDirectorySM/kubernetes/samples\nPreparing the Environment for Container Creation In this section you prepare the environment for the Oracle Unified Directory Services Manager container creation. This involves the following steps:\n Create Kubernetes Namespace Create Secrets for User IDs and Passwords Prepare a host directory to be used for Filesystem based PersistentVolume Create PersistentVolume (PV) and PersistentVolumeClaim (PVC) for your Namespace Create Kubernetes Namespace You should create a Kubernetes namespace to provide a scope for other objects such as pods and services that you create in the environment. To create your namespace you should refer to the oudsmns.yaml file.\nUpdate the oudsmns.yaml file and replace %NAMESPACE% with the value of the namespace you would like to create. In the example below the value \u0026lsquo;oudns\u0026rsquo; is used.\nTo create the namespace apply the file using kubectl:\n$ kubectl apply -f oudsmns.yaml namespace/oudns created Confirm that the namespace is created:\n$ kubectl get namespaces NAME STATUS AGE default Active 4d kube-public Active 4d kube-system Active 4d oudns Active 53s Create Secrets for User IDs and Passwords To protect sensitive information, namely user IDs and passwords, you should create Kubernetes Secrets for the key-value pairs with following keys. The Secret with key-value pairs will be used to pass values to containers created through the OUD image:\n adminUser adminPass There are two ways by which a Kubernetes secret object can be created with required key-value pairs.\nUsing samples/secrets.yaml file In this method you update the samples/secrets.yaml file with the value for %SECRET_NAME% and %NAMESPACE%, together with the Base64 value for each secret.\n %adminUser% - With Base64 encoded value for adminUser parameter. %adminPass% - With Base64 encoded value for adminPass parameter. Obtain the base64 value for your secrets, for example:\n$ echo -n weblogic | base64 d2VibG9naWM= $ echo -n Oracle123 | base64 T3JhY2xlMTIz Note: Ensure that you use the -n parameter with the echo command. If the parameter is omitted Base64 values will be generated with a new-line character included.\nUpdate the secrets.yaml file with your values. It should look similar to the file shown below:\napiVersion: v1 kind: Secret metadata: name: oudsmsecret namespace: oudns type: Opaque data: adminUser: d2VibG9naWM= adminPass: T3JhY2xlMTIz Apply the file:\n$ kubectl apply -f secrets.yaml secret/oudsmsecret created Verify that the secret has been created:\n$ kubectl --namespace oudns get secret NAME TYPE DATA AGE default-token-fztcb kubernetes.io/service-account-token 3 15m oudsmsecret Opaque 8 99s Using kubectl create secret command The Kubernetes secret can be created using the command line with the following syntax:\n$ kubectl --namespace %NAMESPACE% create secret generic %SECRET_NAME% \\ --from-literal=adminUser=\u0026quot;%adminUser%\u0026quot; \\ --from-literal=adminPass=\u0026quot;%adminPass%\u0026quot; Update the following placeholders in the command with the relevant value:\n %NAMESPACE% - With name of namespace in which secret is required to be created %SECRET_NAME% - Name for the secret object %adminUser% - With Base64 encoded value for adminUser parameter. %adminPass%- With Base64 encoded value for adminPass parameter. After executing the kubectl create secret command, verify that the secret has been created:\n$ kubectl --namespace oudns get secret NAME TYPE DATA AGE default-token-fztcb kubernetes.io/service-account-token 3 15m oudsmsecret Opaque 8 99s Prepare a Host Directory to be used for Filesystem Based PersistentVolume It is required to prepare a directory on the Host filesystem to store Oracle Unified Directory Services Manager Instances and other configuration outside the container filesystem. That directory from the Host filesystem will be associated with a PersistentVolume.\nIn the case of a multi-node Kubernetes cluster, the Host directory to be associated with the PersistentVolume should be accessible on all the nodes at the same path.\nTo prepare a Host directory (for example: /scratch/user_projects) for mounting as a file system based PersistentVolume inside your containers, execute the command below on your Host:\n The userid can be anything but it must have uid:guid as 1000:1000, which is the same as the \u0026lsquo;oracle\u0026rsquo; user running in the container. This ensures the \u0026lsquo;oracle\u0026rsquo; user has access to the shared volume/directory.\n $ sudo su - root $ mkdir -p /scratch/user_projects $ chown 1000:1000 /scratch/user_projects $ exit All container operations are performed as the oracle user.\nNote: If a user already exists with -u 1000 -g 1000 then use the same user. Else modify the existing user to have uid-gid as '-u 1000 -g 1000\u0026rsquo;\nCreate PersistentVolume (PV) and PersistentVolumeClaim (PVC) for your Namespace A PersistentVolume (PV) is a storage resource, while a PersistentVolumeClaim (PVC) is a request for that resource. To provide storage for your namespace, update the persistent-volume.yaml file.\nUpdate the following to values specific to your environment:\n Param Value Example %PV_NAME% PV name oudsmpv %PV_HOST_PATH% Valid path on localhost /scratch/user_projects %PVC_NAME% PVC name oudsmpvc %NAMESPACE% Namespace oudns Apply the file:\n$ kubectl apply -f persistent-volume.yaml persistentvolume/oudsmpv created persistentvolumeclaim/oudsmpvc created Verify the PersistentVolume:\n$ kubectl describe persistentvolume oudsmpv Name: oudsmpv Labels: type=oud-pv Annotations: pv.kubernetes.io/bound-by-controller: yes Finalizers: [kubernetes.io/pv-protection] StorageClass: manual Status: Bound Claim: oudns/oudsmpvc Reclaim Policy: Retain Access Modes: RWX VolumeMode: Filesystem Capacity: 10Gi Node Affinity: \u0026lt;none\u0026gt; Message: Source: Type: HostPath (bare host directory volume) Path: /scratch/user_projects HostPathType: Events: \u0026lt;none\u0026gt; Verify the PersistentVolumeClaim:\n$ kubectl --namespace oudns describe pvc oudsmpvc Name: oudsmpvc Namespace: oudns StorageClass: manual Status: Bound Volume: oudsmpv Labels: \u0026lt;none\u0026gt; Annotations: pv.kubernetes.io/bind-completed: yes pv.kubernetes.io/bound-by-controller: yes Finalizers: [kubernetes.io/pvc-protection] Capacity: 10Gi Access Modes: RWX VolumeMode: Filesystem Mounted By: \u0026lt;none\u0026gt; Events: \u0026lt;none\u0026gt; Oracle Unified Directory Services Manager POD In this example you create a POD (oudsmpod) which holds a single container based on an Oracle Unified Directory Services Manager 12c PS4 (12.2.1.4.0) image. This container is configured to run Oracle Unified Directory Services Manager. You also create a service (oudsm) through which you can access the Oracle Unified Directory Services Manager GUI.\nTo create the POD update the samples/oudsm-pod.yaml file.\nUpdate the following parameters to values specific to your environment:\n Param Value Example %NAMESPACE% Namespace oudns %IMAGE% Oracle image tag oracle/oudsm:12.2.1.4.0 %SECRET_NAME% Secret name oudsmsecret %PV_NAME% PV name oudsmpv %PVC_NAME% PVC name oudsmpvc Apply the file:\n$ kubectl apply -f samples/oudsm-pod.yaml service/oudsm-svc created pod/oudsmpod created To check the status of the created pod:\n$ kubectl get pods -n oudns NAME READY STATUS RESTARTS AGE oudsmpod 1/1 Running 0 22m If you see any errors then use the following commands to debug the pod/container.\nTo review issues with the pod e.g. CreateContainerConfigError:\n$ kubectl --namespace \u0026lt;namespace\u0026gt; describe pod \u0026lt;pod\u0026gt; For example:\n$ kubectl --namespace oudns describe pod oudsmpod To tail the container logs while it is initialising use the following command:\n$ kubectl --namespace \u0026lt;namespace\u0026gt; logs -f -c \u0026lt;container\u0026gt; \u0026lt;pod\u0026gt; For example:\n$ kubectl --namespace oudns logs -f -c oudsm oudsmpod To view the full container logs:\n$ kubectl --namespace \u0026lt;namespace\u0026gt; logs -c \u0026lt;container\u0026gt; \u0026lt;pod\u0026gt; To validate that the POD is running:\n$ kubectl --namespace \u0026lt;namespace\u0026gt; get all,pv,pvc,secret For example:\n$ kubectl --namespace oudns get all,pv,pvc,secret NAME READY STATUS RESTARTS AGE pod/oudsmpod 1/1 Running 0 24m NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/oudsm-svc NodePort 10.109.142.163 \u0026lt;none\u0026gt; 7001:31674/TCP,7002:31490/TCP 24m NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE persistentvolume/oudsmpv 10Gi RWX Delete Bound oudns/oudsmpvc manual 45m NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE persistentvolumeclaim/oudsmpvc Bound oudsmpv 10Gi RWX manual 45m NAME TYPE DATA AGE secret/default-token-5kbxk kubernetes.io/service-account-token 3 84m secret/oudsmsecret Opaque 2 80m Once the container is running (READY shows as \u0026lsquo;1/1\u0026rsquo;) check the value of the service port (PORT/s value : here 7001:31674/TCP,7002:31490/TCP) for the Oracle Unified Directory Services Manager service and use this to access Oracle Unified Directory Services Manager in a browser:\nhttp://\u0026lt;hostname\u0026gt;:\u0026lt;svcport\u0026gt;/oudsm In the case here:\nhttp://\u0026lt;myhost\u0026gt;:31674/oudsm If you need to release the resources created in this example (POD, service) then issue the following command:\n$ kubectl delete -f samples/oudsm-pod.yaml service \u0026quot;oudsm-svc\u0026quot; deleted pod \u0026quot;oudsmpod\u0026quot; deleted This will avoid conflicts when running the following example for Deployments.\nOracle Unified Directory Services Manager Deployment In this example you create multiple Oracle Unified Directory Services Manager PODs/Services using Kubernetes deployments.\nTo create the deployment update the samples/oudsm-deployment.yaml file.\nUpdate the following to values specific to your environment:\n Param Value Example %NAMESPACE% Namespace oudns %IMAGE% Oracle image tag oracle/oudsm:12.2.1.4.0 %SECRET_NAME% Secret name oudsmsecret Apply the file:\n$ kubectl apply -f samples/oudsm-deployment.yaml service/oudsm created deployment.apps/oudsmdeploypod created To validate that the POD is running:\n$ kubectl --namespace \u0026lt;namespace\u0026gt; get all,pv,pvc,secret For example:\n$ kubectl --namespace oudns get all,pv,pvc,secret For example:\n$ kubectl --namespace oudns get all,pv,pvc,secret NAME READY STATUS RESTARTS AGE pod/oudsmdeploypod-7c6bb5476-6zcmc 1/1 Running 0 13m pod/oudsmdeploypod-7c6bb5476-nldd8 1/1 Running 0 13m NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/oudsm NodePort 10.97.245.58 \u0026lt;none\u0026gt; 7001:31342/TCP,7002:31222/TCP 13m NAME READY UP-TO-DATE AVAILABLE AGE deployment.apps/oudsmdeploypod 2/2 2 2 13m NAME DESIRED CURRENT READY AGE replicaset.apps/oudsmdeploypod-7c6bb5476 2 2 2 13m NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE persistentvolume/oudsmpv 10Gi RWX Delete Bound oudns/oudsmpvc manual 16h NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE persistentvolumeclaim/oudsmpvc Bound oudsmpv 10Gi RWX manual 16h NAME TYPE DATA AGE secret/default-token-5kbxk kubernetes.io/service-account-token 3 16h secret/oudsmsecret Opaque 2 16h Once the container is running (READY shows as \u0026lsquo;1/1\u0026rsquo;) check the value of the service port (PORT/s value : here 7001:31421/TCP,7002:31737/TCP) for the Oracle Unified Directory Services Manager service and use this to access Oracle Unified Directory Services Manager in a browser:\n http://\u0026lt;hostname\u0026gt;:\u0026lt;svcport\u0026gt;/oudsm In the case here:\n http://\u0026lt;myhost\u0026gt;:31342/oudsm Notice that in the output above we have created 2 Oracle Unified Directory Services Manager PODs (pod/oudsmdeploypod-7bb67b685c-78sq5, pod/oudsmdeploypod-7bb67b685c-xssbq) which are accessed via a service (service/oudsm).\nThe number of PODs is governed by the replicas parameter in the samples/oudsm-deployment.yaml file:\n... kind: Deployment metadata: name: oudsmdeploypod namespace: oudns labels: app: oudsmdeploypod spec: replicas: 2 selector: matchLabels: app: oudsmdeploypod ... If you have a requirement to add additional PODs to your cluster you can update the samples/oudsm-deployment.yaml file with the new value for replicas and apply the file. For example, setting replicas to \u0026lsquo;3\u0026rsquo; would start an additional POD as shown below:\n... kind: Deployment metadata: name: oudsmdeploypod namespace: oudns labels: app: oudsmdeploypod spec: replicas: 3 selector: matchLabels: app: oudsmdeploypod ... $ kubectl apply -f samples/oudsm-deployment.yaml service/oudsm unchanged deployment.apps/oudsmdeploypod configured Check the number of PODs have increased to 3.\n$ kubectl --namespace oudns get all,pv,pvc,secret NAME READY STATUS RESTARTS AGE pod/oudsmdeploypod-7c6bb5476-6zcmc 1/1 Running 0 17m pod/oudsmdeploypod-7c6bb5476-nldd8 1/1 Running 0 17m pod/oudsmdeploypod-7c6bb5476-vqmz7 0/1 Running 0 26s NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/oudsm NodePort 10.97.245.58 \u0026lt;none\u0026gt; 7001:31342/TCP,7002:31222/TCP 17m NAME READY UP-TO-DATE AVAILABLE AGE deployment.apps/oudsmdeploypod 2/3 3 2 17m NAME DESIRED CURRENT READY AGE replicaset.apps/oudsmdeploypod-7c6bb5476 3 3 2 17m NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE persistentvolume/mike-oud-ds-rs-espv1 20Gi RWX Retain Bound mikens/data-mike-oud-ds-rs-es-cluster-0 elk 4d18h persistentvolume/mike-oud-ds-rs-pv 30Gi RWX Retain Bound mikens/mike-oud-ds-rs-pvc manual 4d18h persistentvolume/oimcluster-oim-pv 10Gi RWX Retain Bound oimcluster/oimcluster-oim-pvc oimcluster-oim-storage-class 69d persistentvolume/oudsmpv 10Gi RWX Delete Bound oudns/oudsmpvc manual 16h NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE persistentvolumeclaim/oudsmpvc Bound oudsmpv 10Gi RWX manual 16h NAME TYPE DATA AGE secret/default-token-5kbxk kubernetes.io/service-account-token 3 16h secret/oudsmsecret Opaque 2 16h bash-4.2$ In this example, the POD pod/oudsmdeploypod-7c6bb5476-vqmz7 has been added.\nAppendix A : Reference samples/oudsm-pod.yaml : This yaml file is use to create the pod and bring up the Oracle Unified Directory Services Manager services samples/oudsm-deployment.yaml : This yaml file is used to create replicas of Oracle Unified Directory Services Manager and bring up the Oracle Unified Directory Services Manager services based on the deployment " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oig/configure-design-console/using-the-design-console-with-nginx-non-ssl/", + "title": "a. Using Design Console with NGINX(non-SSL)", + "tags": [], + "description": "Configure Design Console with NGINX(non-SSL).", + "content": "Configure an NGINX ingress (non-SSL) to allow Design Console to connect to your Kubernetes cluster.\n Prerequisites\n Setup routing rules for the Design Console ingress\n Create the ingress\n Update the T3 channel\n Restart the OIG domain\n Design Console client\na. Using an on-premises installed Design Console\nb. Using a container image for Design Console\n Login to the Design Console\n Prerequisites If you haven\u0026rsquo;t already configured an NGINX ingress controller (Non-SSL) for OIG, follow Using an Ingress with NGINX (non-SSL).\nMake sure you know the master hostname and ingress port for NGINX before proceeding e.g http://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}.\nSetup routing rules for the Design Console ingress Setup routing rules by running the following commands:\n$ cd $WORKDIR/kubernetes/design-console-ingress Edit values.yaml and ensure that tls: NONSSL and domainUID: governancedomain are set, for example:\n# Load balancer type. Supported values are: NGINX type: NGINX # Type of Configuration Supported Values are : NONSSL,SSL # tls: NONSSL tls: NONSSL # TLS secret name if the mode is SSL secretName: dc-tls-cert # WLS domain as backend to the load balancer wlsDomain: domainUID: governancedomain oimClusterName: oim_cluster oimServerT3Port: 14002 Create the ingress Run the following command to create the ingress:\n$ cd $WORKDIR $ helm install governancedomain-nginx-designconsole kubernetes/design-console-ingress --namespace oigns --values kubernetes/design-console-ingress/values.yaml Note: If using Kubernetes 1.18 then add --version=3.34.0 to the end of command.\nFor example:\nThe output will look similar to the following:\nNAME: governancedomain-nginx-designconsole LAST DEPLOYED: Mon Nov 15 06:07:09 2021 NAMESPACE: oigns STATUS: deployed REVISION: 1 TEST SUITE: None Run the following command to show the ingress is created successfully:\n$ kubectl describe ing governancedomain-nginx-designconsole -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl describe ing governancedomain-nginx-designconsole -n oigns The output will look similar to the following:\nName: governancedomain-nginx-designconsole Namespace: oigns Address: Default backend: default-http-backend:80 (\u0026lt;error: endpoints \u0026quot;default-http-backend\u0026quot; not found\u0026gt;) Rules: Host Path Backends ---- ---- -------- * governancedomain-cluster-oim-cluster:14002 (10.244.1.25:14002) Annotations: kubernetes.io/ingress.class: nginx meta.helm.sh/release-name: governancedomain-nginx-designconsole meta.helm.sh/release-namespace: oigns nginx.ingress.kubernetes.io/affinity: cookie nginx.ingress.kubernetes.io/enable-access-log: false Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Sync 13s nginx-ingress-controller Scheduled for sync Update the T3 channel Log in to the WebLogic Console using http://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console.\n Navigate to Environment, click Servers, and then select oim_server1.\n Click Protocols, and then Channels.\n Click the default T3 channel called T3Channel.\n Click Lock and Edit.\n Set the External Listen Address to a worker node where oim_server1 is running.\nNote: Use kubectl get pods -n \u0026lt;domain_namespace\u0026gt; -o wide to see the worker node it is running on. For example, below the governancedomain-oim-server1 is running on worker-node2:\n$ kubectl get pods -n oigns -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES governancedomain-adminserver 1/1 Running 0 33m 10.244.2.96 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 11d 10.244.2.45 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; governancedomain-oim-server1 1/1 Running 0 31m 10.244.2.98 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; governancedomain-soa-server1 1/1 Running 0 31m 10.244.2.97 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; helper 1/1 Running 0 11d 10.244.2.30 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; logstash-wls-f448b44c8-92l27 1/1 Running 0 7d23h 10.244.1.27 worker-node1 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; Set the External Listen Port to the ingress controller port.\n Click Save.\n Click Activate Changes.\n Restart the OIG domain Restart the domain for the above changes to take effect by following Stopping and starting the administration server and managed servers.\nDesign Console client It is possible to use Design Console from an on-premises install, or from a container image.\nUsing an on-premises installed Design Console Install Design Console on an on-premises machine\n Follow Login to the Design Console.\n Using a container image for Design Console The Design Console can be run from a container using X windows emulation.\n On the parent machine where the Design Console is to be displayed, run xhost+.\n Execute the following command to start a container to run Design Console:\n$ docker run -u root --name oigdcbase -it \u0026lt;image\u0026gt; bash For example:\n$ docker run -u root -it --name oigdcbase oracle/oig:12.2.1.4.0-8-ol7-211022.0723 bash This will take you into a bash shell inside the container:\nbash-4.2# Inside the container set the proxy, for example:\nbash-4.2# export https_proxy=http://proxy.example.com:80 Install the relevant X windows packages in the container:\nbash-4.2# yum install libXext libXrender libXtst Execute the following outside the container to create a new Design Console image from the container:\n$ docker commit \u0026lt;container_name\u0026gt; \u0026lt;design_console_image_name\u0026gt; For example:\n$ docker commit oigdcbase oigdc Exit the container bash session:\nbash-4.2# exit Start a new container using the Design Console image:\n$ docker run --name oigdc -it oigdc /bin/bash This will take you into a bash shell for the container:\nbash-4.2# In the container run the following to export the DISPLAY:\n$ export DISPLAY=\u0026lt;parent_machine_hostname:1\u0026gt; Start the Design Console from the container:\nbash-4.2# cd idm/designconsole bash-4.2# sh xlclient.sh The Design Console login should be displayed. Now follow Login to the Design Console.\n Login to the Design Console Launch the Design Console and in the Oracle Identity Manager Design Console login page enter the following details:\nEnter the following details and click Login:\n Server URL: \u0026lt;url\u0026gt; User ID: xelsysadm Password: \u0026lt;password\u0026gt;. where \u0026lt;url\u0026gt; is http://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}\n If successful the Design Console will be displayed.\n " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oig/manage-oig-domains/domain-lifecycle/", + "title": "Domain life cycle", + "tags": [], + "description": "Learn about the domain life cyle of an OIG domain.", + "content": " View existing OIG servers Starting/Scaling up OIG Managed servers Stopping/Scaling down OIG Managed servers Stopping and starting the Administration Server and Managed Servers As OIG domains use the WebLogic Kubernetes Operator, domain lifecyle operations are managed using the WebLogic Kubernetes Operator itself.\nThis document shows the basic operations for starting, stopping and scaling servers in the OIG domain.\nFor more detailed information refer to Domain Life Cycle in the WebLogic Kubernetes Operator documentation.\nDo not use the WebLogic Server Administration Console or Oracle Enterprise Manager Console to start or stop servers.\n View existing OIG Servers The default OIG deployment starts the Administration Server (AdminServer), one OIG Managed Server (oim_server1) and one SOA Managed Server (soa_server1).\nThe deployment also creates, but doesn\u0026rsquo;t start, four extra OIG Managed Servers (oim-server2 to oim-server5) and four more SOA Managed Servers (soa_server2 to soa_server5).\nAll these servers are visible in the WebLogic Server Administration Console https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console by navigating to Domain Structure \u0026gt; governancedomain \u0026gt; Environment \u0026gt; Servers.\nTo view the running servers using kubectl, run the following command:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oigns The output should look similar to the following:\nNAME READY STATUS RESTARTS AGE governancedomain-adminserver 1/1 Running 0 23h governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 24h governancedomain-oim-server1 1/1 Running 0 23h governancedomain-soa-server1 1/1 Running 0 23h Starting/Scaling up OIG Managed Servers The number of OIG Managed Servers running is dependent on the replicas parameter configured for the cluster. To start more OIG Managed Servers perform the following steps:\n Run the following kubectl command to edit the domain:\n$ kubectl edit domain \u0026lt;domain_uid\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl edit domain governancedomain -n oigns Note: This opens an edit session for the domain where parameters can be changed using standard vi commands.\n In the edit session search for clusterName: oim_cluster and look for the replicas parameter. By default the replicas parameter is set to \u0026ldquo;1\u0026rdquo; hence a single OIG Managed Server is started (oim_server1):\n - clusterName: oim_cluster replicas: 1 serverPod: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - podAffinityTerm: labelSelector: matchExpressions: - key: weblogic.clusterName operator: In values: - $(CLUSTER_NAME) To start more OIG Managed Servers, increase the replicas value as desired. In the example below, one more Managed Server will be started by setting replicas to \u0026ldquo;2\u0026rdquo;:\n - clusterName: oim_cluster replicas: 2 serverPod: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - podAffinityTerm: labelSelector: matchExpressions: - key: weblogic.clusterName operator: In values: - $(CLUSTER_NAME) Save the file and exit (:wq)\nThe output will look similar to the following:\ndomain.weblogic.oracle/governancedomain edited Run the following kubectl command to view the pods:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oigns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE governancedomain-adminserver 1/1 Running 0 23h governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 24h governancedomain-oim-server1 1/1 Running 0 23h governancedomain-oim-server2 0/1 Running 0 7s governancedomain-soa-server1 1/1 Running 0 23h One new pod (governancedomain-oim-server2) is started, but currently has a READY status of 0/1. This means oim_server2 is not currently running but is in the process of starting. The server will take several minutes to start so keep executing the command until READY shows 1/1:\nNAME READY STATUS RESTARTS AGE governancedomain-adminserver 1/1 Running 0 23h governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 24h governancedomain-oim-server1 1/1 Running 0 23h governancedomain-oim-server2 1/1 Running 0 5m27s governancedomain-soa-server1 1/1 Running 0 23h Note: To check what is happening during server startup when READY is 0/1, run the following command to view the log of the pod that is starting:\n$ kubectl logs \u0026lt;pod\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl logs governancedomain-oim-server2 -n oigns Stopping/Scaling down OIG Managed Servers As mentioned in the previous section, the number of OIG Managed Servers running is dependent on the replicas parameter configured for the cluster. To stop one or more OIG Managed Servers, perform the following:\n Run the following kubectl command to edit the domain:\n$ kubectl edit domain \u0026lt;domain_uid\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl edit domain governancedomain -n oigns In the edit session search for clusterName: oim_cluster and look for the replicas parameter. In the example below replicas is set to \u0026ldquo;2\u0026rdquo; hence two OIG Managed Servers are started (oim_server1 and oim_server2):\n - clusterName: oim_cluster replicas: 2 serverPod: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - podAffinityTerm: labelSelector: matchExpressions: - key: weblogic.clusterName operator: In values: - $(CLUSTER_NAME) To stop OIG Managed Servers, decrease the replicas value as desired. In the example below, we will stop one Managed Server by setting replicas to \u0026ldquo;1\u0026rdquo;:\n - clusterName: oim_cluster replicas: 1 serverPod: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - podAffinityTerm: labelSelector: matchExpressions: - key: weblogic.clusterName operator: In values: - $(CLUSTER_NAME) Save the file and exit (:wq)\n Run the following kubectl command to view the pods:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oigns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE governancedomain-adminserver 1/1 Running 0 23h governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 24h governancedomain-oim-server1 1/1 Running 0 23h governancedomain-oim-server2 1/1 Terminating 0 7m30s governancedomain-soa-server1 1/1 Running 0 23h The exiting pod shows a STATUS of Terminating (governancedomain-oim-server2). The server may take a minute or two to stop, so keep executing the command until the pod has disappeared:\nNAME READY STATUS RESTARTS AGE governancedomain-adminserver 1/1 Running 0 23h governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 24h governancedomain-oim-server1 1/1 Running 0 23h governancedomain-soa-server1 1/1 Running 0 23h Stopping and Starting the Administration Server and Managed Servers To stop all the OIG Managed Servers and the Administration Server in one operation:\n Run the following kubectl command to edit the domain:\n$ kubectl edit domain \u0026lt;domain_uid\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl edit domain governancedomain -n oigns In the edit session search for serverStartPolicy: IF_NEEDED:\n volumeMounts: - mountPath: /u01/oracle/user_projects/domains name: weblogic-domain-storage-volume volumes: - name: weblogic-domain-storage-volume persistentVolumeClaim: claimName: governancedomain-domain-pvc serverStartPolicy: IF_NEEDED Change serverStartPolicy: IF_NEEDED to NEVER as follows:\n volumeMounts: - mountPath: /u01/oracle/user_projects/domains name: weblogic-domain-storage-volume volumes: - name: weblogic-domain-storage-volume persistentVolumeClaim: claimName: governancedomain-domain-pvc serverStartPolicy: NEVER Save the file and exit (:wq).\n Run the following kubectl command to view the pods:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oigns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE governancedomain-adminserver 1/1 Terminating 0 23h governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 24h governancedomain-oim-server1 1/1 Terminating 0 23h governancedomain-soa-server1 1/1 Terminating 0 23h The AdminServer pod and Managed Server pods will move to a STATUS of Terminating. After a few minutes, run the command again and the pods should have disappeared:\nNAME READY STATUS RESTARTS AGE governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 24h To start the Administration Server and Managed Servers up again, repeat the previous steps but change serverStartPolicy: NEVER to IF_NEEDED as follows:\n volumeMounts: - mountPath: /u01/oracle/user_projects/domains name: weblogic-domain-storage-volume volumes: - name: weblogic-domain-storage-volume persistentVolumeClaim: claimName: governancedomain-domain-pvc serverStartPolicy: IF_NEEDED Run the following kubectl command to view the pods:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oigns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE governancedomain-adminserver 0/1 Running 0 4s governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 24h The Administration Server pod will start followed by the OIG Managed Servers pods. This process will take several minutes, so keep executing the command until all the pods are running with READY status 1/1 :\nNAME READY STATUS RESTARTS AGE governancedomain-adminserver 1/1 Running 0 6m57s governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 24h governancedomain-oim-server1 1/1 Running 0 4m33s governancedomain-soa-server1 1/1 Running 0 4m33s " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/soa-domains/installguide/", + "title": "Install Guide", + "tags": [], + "description": "", + "content": "Install the WebLogic Kubernetes Operator and prepare and deploy Oracle SOA Suite domains.\n Requirements and limitations Understand the system requirements and limitations for deploying and running Oracle SOA Suite domains with the WebLogic Kubernetes Operator, including the SOA cluster sizing recommendations.\n Prepare your environment Prepare for creating Oracle SOA Suite domains, including required secrets creation, persistent volume and volume claim creation, database creation, and database schema creation.\n Create Oracle SOA Suite domains Create an Oracle SOA Suite domain home on an existing PV or PVC, and create the domain resource YAML file for deploying the generated Oracle SOA Suite domain.\n " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/soa-domains/appendix/quickstart-deployment-on-prem/", + "title": "Quick start deployment on-premise", + "tags": [], + "description": "Describes how to quickly get an Oracle SOA Suite domain instance running (using the defaults, nothing special) for development and test purposes.", + "content": "Use this Quick Start to create an Oracle SOA Suite domain deployment in a Kubernetes cluster (on-premise environments) with the WebLogic Kubernetes Operator. Note that this walkthrough is for demonstration purposes only, not for use in production. These instructions assume that you are already familiar with Kubernetes. If you need more detailed instructions, refer to the Install Guide.\nHardware requirements The Linux kernel supported for deploying and running Oracle SOA Suite domains with the operator is Oracle Linux 7 (UL6+) and Red Hat Enterprise Linux 7 (UL3+ only with standalone Kubernetes). Refer to the prerequisites for more details.\nFor this exercise, the minimum hardware requirements to create a single-node Kubernetes cluster and then deploy the soaosb (Oracle SOA Suite, Oracle Service Bus, and Enterprise Scheduler (ESS)) domain type with one Managed Server for Oracle SOA Suite and one for the Oracle Service Bus cluster, along with Oracle Database running as a container are:\n Hardware Size RAM 32GB Disk Space 250GB+ CPU core(s) 6 See here for resource sizing information for Oracle SOA Suite domains set up on a Kubernetes cluster.\nSet up Oracle SOA Suite in an on-premise environment Use the steps in this topic to create a single-instance on-premise Kubernetes cluster and then create an Oracle SOA Suite soaosb domain type, which deploys a domain with Oracle SOA Suite, Oracle Service Bus, and Oracle Enterprise Scheduler (ESS).\n Step 1 - Prepare a virtual machine for the Kubernetes cluster Step 2 - Set up a single instance Kubernetes cluster Step 3 - Get scripts and images Step 4 - Install the WebLogic Kubernetes Operator Step 5 - Install the Traefik (ingress-based) load balancer Step 6 - Create and configure an Oracle SOA Suite domain 1. Prepare a virtual machine for the Kubernetes cluster For illustration purposes, these instructions are for Oracle Linux 7u6+. If you are using a different flavor of Linux, you will need to adjust the steps accordingly.\nThese steps must be run with the root user, unless specified otherwise. Any time you see YOUR_USERID in a command, you should replace it with your actual userid.\n 1.1 Prerequisites Choose the directories where your Docker and Kubernetes files will be stored. The Docker directory should be on a disk with a lot of free space (more than 100GB) because it will be used for the Docker file system, which contains all of your images and containers. The Kubernetes directory is used for the /var/lib/kubelet file system and persistent volume storage.\n$ export docker_dir=/u01/docker $ export kubelet_dir=/u01/kubelet $ mkdir -p $docker_dir $kubelet_dir $ ln -s $kubelet_dir /var/lib/kubelet Verify that IPv4 forwarding is enabled on your host.\nNote: Replace eth0 with the ethernet interface name of your compute resource if it is different.\n$ /sbin/sysctl -a 2\u0026gt;\u0026amp;1|grep -s \u0026#39;net.ipv4.conf.docker0.forwarding\u0026#39; $ /sbin/sysctl -a 2\u0026gt;\u0026amp;1|grep -s \u0026#39;net.ipv4.conf.eth0.forwarding\u0026#39; $ /sbin/sysctl -a 2\u0026gt;\u0026amp;1|grep -s \u0026#39;net.ipv4.conf.lo.forwarding\u0026#39; $ /sbin/sysctl -a 2\u0026gt;\u0026amp;1|grep -s \u0026#39;net.ipv4.ip_nonlocal_bind\u0026#39; For example: Verify that all are set to 1:\n$ net.ipv4.conf.docker0.forwarding = 1 $ net.ipv4.conf.eth0.forwarding = 1 $ net.ipv4.conf.lo.forwarding = 1 $ net.ipv4.ip_nonlocal_bind = 1 Solution: Set all values to 1 immediately:\n$ /sbin/sysctl net.ipv4.conf.docker0.forwarding=1 $ /sbin/sysctl net.ipv4.conf.eth0.forwarding=1 $ /sbin/sysctl net.ipv4.conf.lo.forwarding=1 $ /sbin/sysctl net.ipv4.ip_nonlocal_bind=1 To preserve the settings permanently: Update the above values to 1 in files in /usr/lib/sysctl.d/, /run/sysctl.d/, and /etc/sysctl.d/.\n Verify the iptables rule for forwarding.\nKubernetes uses iptables to handle many networking and port forwarding rules. A standard Docker installation may create a firewall rule that prevents forwarding.\nVerify if the iptables rule to accept forwarding traffic is set:\n$ /sbin/iptables -L -n | awk \u0026#39;/Chain FORWARD / {print $4}\u0026#39; | tr -d \u0026#34;)\u0026#34; If the output is \u0026ldquo;DROP\u0026rdquo;, then run the following command:\n$ /sbin/iptables -P FORWARD ACCEPT Verify if the iptables rule is properly set to \u0026ldquo;ACCEPT\u0026rdquo;:\n$ /sbin/iptables -L -n | awk \u0026#39;/Chain FORWARD / {print $4}\u0026#39; | tr -d \u0026#34;)\u0026#34; Disable and stop firewalld:\n$ systemctl disable firewalld $ systemctl stop firewalld 1.2 Install and configure Docker Note: If you have already installed Docker with version 18.03+ and configured the Docker daemon root to sufficient disk space along with proxy settings, continue to Install and configure Kubernetes.\n Make sure that you have the right operating system version:\n$ uname -a $ more /etc/oracle-release Example output:\nLinux xxxxxxx 4.1.12-124.27.1.el7uek.x86_64 #2 SMP Mon May 13 08:56:17 PDT 2019 x86_64 x86_64 x86_64 GNU/Linux Oracle Linux Server release 7.6 Install the latest docker-engine and start the Docker service:\n$ docker_version=\u0026#34;19.03.1.ol\u0026#34; $ yum-config-manager --enable ol7_addons $ yum install docker-engine-$docker_version $ systemctl enable docker $ systemctl start docker Add your user ID to the Docker group to allow you to run Docker commands without root access:\n$ /sbin/usermod -a -G docker \u0026lt;YOUR_USERID\u0026gt; Check that your Docker version is at least 18.03:\n$ docker version Example output:\nClient: Docker Engine - Community Version: 19.03.1-ol API version: 1.40 Go version: go1.12.5 Git commit: ead9442 Built: Wed Sep 11 06:40:28 2019 OS/Arch: linux/amd64 Experimental: false Server: Docker Engine - Community Engine: Version: 19.03.1-ol API version: 1.40 (minimum version 1.12) Go version: go1.12.5 Git commit: ead9442 Built: Wed Sep 11 06:38:43 2019 OS/Arch: linux/amd64 Experimental: false Default Registry: docker.io containerd: Version: v1.2.0-rc.0-108-gc444666 GitCommit: c4446665cb9c30056f4998ed953e6d4ff22c7c39 runc: Version: 1.0.0-rc5+dev GitCommit: 4bb1fe4ace1a32d3676bb98f5d3b6a4e32bf6c58 docker-init: Version: 0.18.0 GitCommit: fec3683 Update the Docker engine configuration:\n$ mkdir -p /etc/docker $ cat \u0026lt;\u0026lt;EOF \u0026gt; /etc/docker/daemon.json { \u0026#34;group\u0026#34;: \u0026#34;docker\u0026#34;, \u0026#34;data-root\u0026#34;: \u0026#34;/u01/docker\u0026#34; } EOF Configure proxy settings if you are behind an HTTP proxy:\n### Create the drop-in file /etc/systemd/system/docker.service.d/http-proxy.conf that contains proxy details: $ cat \u0026lt;\u0026lt;EOF \u0026gt; /etc/systemd/system/docker.service.d/http-proxy.conf [Service] Environment=\u0026#34;HTTP_PROXY=http://REPLACE-WITH-YOUR-COMPANY-PROXY-HOST:PORT\u0026#34; Environment=\u0026#34;HTTPS_PROXY=http://REPLACE-WITH-YOUR-COMPANY-PROXY-HOST:PORT\u0026#34; Environment=\u0026#34;NO_PROXY=localhost,127.0.0.0/8,ADD-YOUR-INTERNAL-NO-PROXY-LIST,/var/run/docker.sock\u0026#34; EOF Note: On some hosts /etc/systemd/system/docker.service.d may not be available. Create this directory if it is not available.\n Restart the Docker daemon to load the latest changes:\n$ systemctl daemon-reload $ systemctl restart docker Verify that the proxy is configured with Docker:\n$ docker info|grep -i proxy Example output:\nHTTP Proxy: http://REPLACE-WITH-YOUR-COMPANY-PROXY-HOST:PORT HTTPS Proxy: http://REPLACE-WITH-YOUR-COMPANY-PROXY-HOST:PORT No Proxy: localhost,127.0.0.0/8,ADD-YOUR-INTERNAL-NO-PROXY-LIST,/var/run/docker.sock Verify Docker installation:\n$ docker run hello-world Example output:\nHello from Docker! This message shows that your installation appears to be working correctly. To generate this message, Docker took the following steps: 1. The Docker client contacted the Docker daemon. 2. The Docker daemon pulled the \u0026#34;hello-world\u0026#34; image from the Docker Hub. (amd64) 3. The Docker daemon created a new container from that image which runs the executable that produces the output you are currently reading. 4. The Docker daemon streamed that output to the Docker client, which sent it to your terminal. To try something more ambitious, you can run an Ubuntu container with: $ docker run -it ubuntu bash Share images, automate workflows, and more with a free Docker ID: https://hub.docker.com/ For more examples and ideas, visit: https://docs.docker.com/get-started/ 1.3 Install and configure Kubernetes Add the external Kubernetes repository:\n$ cat \u0026lt;\u0026lt;EOF | sudo tee /etc/yum.repos.d/kubernetes.repo [kubernetes] name=Kubernetes baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-\\$basearch enabled=1 gpgcheck=1 repo_gpgcheck=1 gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg exclude=kubelet kubeadm kubectl EOF Set SELinux in permissive mode (effectively disabling it):\n$ export PATH=/sbin:$PATH $ setenforce 0 $ sed -i \u0026#39;s/^SELINUX=enforcing$/SELINUX=permissive/\u0026#39; /etc/selinux/config Export proxy and install kubeadm, kubelet, and kubectl:\n### Get the nslookup IP address of the master node to use with apiserver-advertise-address during setting up Kubernetes master ### as the host may have different internal ip (hostname -i) and nslookup $HOSTNAME $ ip_addr=`nslookup $(hostname -f) | grep -m2 Address | tail -n1| awk -F: \u0026#39;{print $2}\u0026#39;| tr -d \u0026#34; \u0026#34;` $ echo $ip_addr ### Set the proxies $ export NO_PROXY=localhost,127.0.0.0/8,ADD-YOUR-INTERNAL-NO-PROXY-LIST,/var/run/docker.sock,$ip_addr $ export no_proxy=localhost,127.0.0.0/8,ADD-YOUR-INTERNAL-NO-PROXY-LIST,/var/run/docker.sock,$ip_addr $ export http_proxy=http://REPLACE-WITH-YOUR-COMPANY-PROXY-HOST:PORT $ export https_proxy=http://REPLACE-WITH-YOUR-COMPANY-PROXY-HOST:PORT $ export HTTPS_PROXY=http://REPLACE-WITH-YOUR-COMPANY-PROXY-HOST:PORT $ export HTTP_PROXY=http://REPLACE-WITH-YOUR-COMPANY-PROXY-HOST:PORT ### install kubernetes 1.18.4-1 $ VERSION=1.18.4-1 $ yum install -y kubelet-$VERSION kubeadm-$VERSION kubectl-$VERSION --disableexcludes=kubernetes ### enable kubelet service so that it auto-restart on reboot $ systemctl enable --now kubelet Ensure net.bridge.bridge-nf-call-iptables is set to 1 in your sysctl to avoid traffic routing issues:\n$ cat \u0026lt;\u0026lt;EOF \u0026gt; /etc/sysctl.d/k8s.conf net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 EOF $ sysctl --system Disable swap check:\n$ sed -i \u0026#39;s/KUBELET_EXTRA_ARGS=/KUBELET_EXTRA_ARGS=\u0026#34;--fail-swap-on=false\u0026#34;/\u0026#39; /etc/sysconfig/kubelet $ cat /etc/sysconfig/kubelet ### Reload and restart kubelet $ systemctl daemon-reload $ systemctl restart kubelet 1.4 Set up Helm Install Helm v3.3.4+.\na. Download Helm from https://github.com/helm/helm/releases.\nFor example, to download Helm v3.5.4:\n$ wget https://get.helm.sh/helm-v3.5.4-linux-amd64.tar.gz b. Unpack tar.gz:\n$ tar -zxvf helm-v3.5.4-linux-amd64.tar.gz c. Find the Helm binary in the unpacked directory, and move it to its desired destination:\n$ mv linux-amd64/helm /usr/bin/helm Run helm version to verify its installation:\n$ helm version version.BuildInfo{Version:\u0026#34;v3.5.4\u0026#34;, GitCommit:\u0026#34;1b5edb69df3d3a08df77c9902dc17af864ff05d1\u0026#34;, GitTreeState:\u0026#34;clean\u0026#34;, GoVersion:\u0026#34;go1.15.11\u0026#34;} 2. Set up a single instance Kubernetes cluster Notes:\n These steps must be run with the root user, unless specified otherwise! If you choose to use a different CIDR block (that is, other than 10.244.0.0/16 for the --pod-network-cidr= in the kubeadm init command), then also update NO_PROXY and no_proxy with the appropriate value. Also make sure to update kube-flannel.yaml with the new value before deploying. Replace the following with appropriate values: ADD-YOUR-INTERNAL-NO-PROXY-LIST REPLACE-WITH-YOUR-COMPANY-PROXY-HOST:PORT 2.1 Set up the master node Create a shell script that sets up the necessary environment variables. You can append this to the user’s .bashrc so that it will run at login. You must also configure your proxy settings here if you are behind an HTTP proxy:\n## grab my IP address to pass into kubeadm init, and to add to no_proxy vars ip_addr=`nslookup $(hostname -f) | grep -m2 Address | tail -n1| awk -F: \u0026#39;{print $2}\u0026#39;| tr -d \u0026#34; \u0026#34;` export pod_network_cidr=\u0026#34;10.244.0.0/16\u0026#34; export service_cidr=\u0026#34;10.96.0.0/12\u0026#34; export PATH=$PATH:/sbin:/usr/sbin ### Set the proxies export NO_PROXY=localhost,127.0.0.0/8,ADD-YOUR-INTERNAL-NO-PROXY-LIST,/var/run/docker.sock,$ip_addr,$pod_network_cidr,$service_cidr export no_proxy=localhost,127.0.0.0/8,ADD-YOUR-INTERNAL-NO-PROXY-LIST,/var/run/docker.sock,$ip_addr,$pod_network_cidr,$service_cidr export http_proxy=http://REPLACE-WITH-YOUR-COMPANY-PROXY-HOST:PORT export https_proxy=http://REPLACE-WITH-YOUR-COMPANY-PROXY-HOST:PORT export HTTPS_PROXY=http://REPLACE-WITH-YOUR-COMPANY-PROXY-HOST:PORT export HTTP_PROXY=http://REPLACE-WITH-YOUR-COMPANY-PROXY-HOST:PORT Source the script to set up your environment variables:\n$ . ~/.bashrc To implement command completion, add the following to the script:\n$ [ -f /usr/share/bash-completion/bash_completion ] \u0026amp;\u0026amp; . /usr/share/bash-completion/bash_completion $ source \u0026lt;(kubectl completion bash) Run kubeadm init to create the master node:\n$ kubeadm init \\ --pod-network-cidr=$pod_network_cidr \\ --apiserver-advertise-address=$ip_addr \\ --ignore-preflight-errors=Swap \u0026gt; /tmp/kubeadm-init.out 2\u0026gt;\u0026amp;1 Log in to the terminal with YOUR_USERID:YOUR_GROUP. Then set up the ~/.bashrc similar to steps 1 to 3 with YOUR_USERID:YOUR_GROUP.\n Note that from now on we will be using YOUR_USERID:YOUR_GROUP to execute any kubectl commands and not root.\n Set up YOUR_USERID:YOUR_GROUP to access the Kubernetes cluster:\n$ mkdir -p $HOME/.kube $ sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config $ sudo chown $(id -u):$(id -g) $HOME/.kube/config Verify that YOUR_USERID:YOUR_GROUP is set up to access the Kubernetes cluster using the kubectl command:\n$ kubectl get nodes Note: At this step, the node is not in ready state as we have not yet installed the pod network add-on. After the next step, the node will show status as Ready.\n Install a pod network add-on (flannel) so that your pods can communicate with each other.\n Note: If you are using a different CIDR block than 10.244.0.0/16, then download and update kube-flannel.yml with the correct CIDR address before deploying into the cluster:\n $ kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/v0.12.0/Documentation/kube-flannel.yml Verify that the master node is in Ready status:\n$ kubectl get nodes Sample output:\nNAME STATUS ROLES AGE VERSION mymasternode Ready master 8m26s v1.18.4 or:\n$ kubectl get pods -n kube-system Sample output:\nNAME READY STATUS RESTARTS AGE pod/coredns-86c58d9df4-58p9f 1/1 Running 0 3m59s pod/coredns-86c58d9df4-mzrr5 1/1 Running 0 3m59s pod/etcd-mymasternode 1/1 Running 0 3m4s pod/kube-apiserver-node 1/1 Running 0 3m21s pod/kube-controller-manager-mymasternode 1/1 Running 0 3m25s pod/kube-flannel-ds-amd64-6npx4 1/1 Running 0 49s pod/kube-proxy-4vsgm 1/1 Running 0 3m59s pod/kube-scheduler-mymasternode 1/1 Running 0 2m58s To schedule pods on the master node, taint the node:\n$ kubectl taint nodes --all node-role.kubernetes.io/master- Congratulations! Your Kubernetes cluster environment is ready to deploy your Oracle SOA Suite domain.\nFor additional references on Kubernetes cluster setup, check the cheat sheet.\n3. Get scripts and images 3.1 Set up the code repository to deploy Oracle SOA Suite domains Follow these steps to set up the source code repository required to deploy Oracle SOA Suite domains.\n3.2 Get required Docker images and add them to your local registry Pull the WebLogic Kubernetes Operator image:\n$ docker pull ghcr.io/oracle/weblogic-kubernetes-operator:3.3.0 $ docker tag ghcr.io/oracle/weblogic-kubernetes-operator:3.3.0 oracle/weblogic-kubernetes-operator:3.3.0 Obtain the Oracle Database image and Oracle SOA Suite Docker image from the Oracle Container Registry:\na. For first time users, to pull an image from the Oracle Container Registry, navigate to https://container-registry.oracle.com and log in using the Oracle Single Sign-On (SSO) authentication service. If you do not already have SSO credentials, you can create an Oracle Account using: https://profile.oracle.com/myprofile/account/create-account.jspx.\nUse the web interface to accept the Oracle Standard Terms and Restrictions for the Oracle software images that you intend to deploy. Your acceptance of these terms are stored in a database that links the software images to your Oracle Single Sign-On login credentials.\nTo obtain the image, log in to the Oracle Container Registry:\n$ docker login container-registry.oracle.com b. Find and then pull the Oracle Database image for 12.2.0.1:\n$ docker pull container-registry.oracle.com/database/enterprise:12.2.0.1-slim c. Find and then pull the prebuilt Oracle SOA Suite image 12.2.1.4 install image:\n$ docker pull container-registry.oracle.com/middleware/soasuite:12.2.1.4 Note: This image does not contain any Oracle SOA Suite product patches and can only be used for test and development purposes.\n 4. Install the WebLogic Kubernetes Operator 4.1 Prepare for the WebLogic Kubernetes Operator. Create a namespace opns for the operator:\n$ kubectl create namespace opns Create a service account op-sa for the operator in the operator’s namespace:\n$ kubectl create serviceaccount -n opns op-sa 4.2 Install the WebLogic Kubernetes Operator Use Helm to install and start the operator from the directory you just cloned:\n$ cd ${WORKDIR} $ helm install weblogic-kubernetes-operator charts/weblogic-operator \\ --namespace opns \\ --set image=oracle/weblogic-kubernetes-operator:3.3.0 \\ --set serviceAccount=op-sa \\ --set \u0026#34;domainNamespaces={}\u0026#34; \\ --wait 4.3 Verify the WebLogic Kubernetes Operator Verify that the operator’s pod is running by listing the pods in the operator’s namespace. You should see one for the operator:\n$ kubectl get pods -n opns Verify that the operator is up and running by viewing the operator pod\u0026rsquo;s logs:\n$ kubectl logs -n opns -c weblogic-operator deployments/weblogic-operator The WebLogic Kubernetes Operator v3.3.0 has been installed. Continue with the load balancer and Oracle SOA Suite domain setup.\n5. Install the Traefik (ingress-based) load balancer The WebLogic Kubernetes Operator supports these load balancers: Traefik, NGINX, and Apache. Samples are provided in the documentation.\nThis Quick Start demonstrates how to install the Traefik ingress controller to provide load balancing for an Oracle SOA Suite domain.\n Create a namespace for Traefik:\n$ kubectl create namespace traefik Set up Helm for 3rd party services:\n$ helm repo add traefik https://containous.github.io/traefik-helm-chart Install the Traefik operator in the traefik namespace with the provided sample values:\n$ cd ${WORKDIR} $ helm install traefik traefik/traefik \\ --namespace traefik \\ --values charts/traefik/values.yaml \\ --set \u0026#34;kubernetes.namespaces={traefik}\u0026#34; \\ --set \u0026#34;service.type=NodePort\u0026#34; \\ --wait 6. Create and configure an Oracle SOA Suite domain 6.1 Prepare for an Oracle SOA Suite domain Create a namespace that can host Oracle SOA Suite domains:\n$ kubectl create namespace soans Use Helm to configure the operator to manage Oracle SOA Suite domains in this namespace:\n$ cd ${WORKDIR} $ helm upgrade weblogic-kubernetes-operator charts/weblogic-operator \\ --reuse-values \\ --namespace opns \\ --set \u0026#34;domainNamespaces={soans}\u0026#34; \\ --wait Create Kubernetes secrets.\na. Create a Kubernetes secret for the domain in the same Kubernetes namespace as the domain. In this example, the username is weblogic, the password is Welcome1, and the namespace is soans:\n$ cd ${WORKDIR}/create-weblogic-domain-credentials $ ./create-weblogic-credentials.sh \\ -u weblogic \\ -p Welcome1 \\ -n soans \\ -d soainfra \\ -s soainfra-domain-credentials b. Create a Kubernetes secret for the RCU in the same Kubernetes namespace as the domain:\n Schema user : SOA1 Schema password : Oradoc_db1 DB sys user password : Oradoc_db1 Domain name : soainfra Domain Namespace : soans Secret name : soainfra-rcu-credentials $ cd ${WORKDIR}/create-rcu-credentials $ ./create-rcu-credentials.sh \\ -u SOA1 \\ -p Oradoc_db1 \\ -a sys \\ -q Oradoc_db1 \\ -d soainfra \\ -n soans \\ -s soainfra-rcu-credentials Create the Kubernetes persistence volume and persistence volume claim.\na. Create the Oracle SOA Suite domain home directory. Determine if a user already exists on your host system with uid:gid of 1000:0:\n$ sudo getent passwd 1000 If this command returns a username (which is the first field), you can skip the following useradd command. If not, create the oracle user with useradd:\n$ sudo useradd -u 1000 -g 0 oracle Create the directory that will be used for the Oracle SOA Suite domain home:\n$ sudo mkdir /scratch/k8s_dir $ sudo chown -R 1000:0 /scratch/k8s_dir b. The create-pv-pvc-inputs.yaml has the following values by default:\n baseName: domain domainUID: soainfra namespace: soans weblogicDomainStoragePath: /scratch/k8s_dir Review and update if any changes required.\n$ cd ${WORKDIR}/create-weblogic-domain-pv-pvc $ vim create-pv-pvc-inputs.yaml c. Run the create-pv-pvc.sh script to create the PV and PVC configuration files:\n$ cd ${WORKDIR}/create-weblogic-domain-pv-pvc $ ./create-pv-pvc.sh -i create-pv-pvc-inputs.yaml -o output d. Create the PV and PVC using the configuration files created in the previous step:\n$ kubectl create -f output/pv-pvcs/soainfra-domain-pv.yaml $ kubectl create -f output/pv-pvcs/soainfra-domain-pvc.yaml Install and configure the database for the Oracle SOA Suite domain.\nThis step is required only when a standalone database is not already set up and you want to use the database in a container.\nThe Oracle Database Docker images are supported only for non-production use. For more details, see My Oracle Support note: Oracle Support for Database Running on Docker (Doc ID 2216342.1). For production, it is suggested to use a standalone database. This example provides steps to create the database in a container.\n a. Create a database in a container:\n$ cd ${WORKDIR}/create-oracle-db-service $ ./start-db-service.sh -i container-registry.oracle.com/database/enterprise:12.2.0.1-slim -p none Once the database is successfully created, you can use the database connection string oracle-db.default.svc.cluster.local:1521/devpdb.k8s as an rcuDatabaseURL parameter in the create-domain-inputs.yaml file.\nb. Create Oracle SOA Suite schemas for the domain type (for example, soaosb).\nTo install the Oracle SOA Suite schemas, run the create-rcu-schema.sh script with the following inputs:\n -s \u0026lt;RCU PREFIX\u0026gt; -t \u0026lt;SOA domain type\u0026gt; -d \u0026lt;Oracle Database URL\u0026gt; -i \u0026lt;SOASuite image\u0026gt; -n \u0026lt;Namespace\u0026gt; -q \u0026lt;SYSDBA Database password\u0026gt; -r \u0026lt;Schema password\u0026gt; -c \u0026lt;Comma-separated variables\u0026gt; -l \u0026lt;Timeout limit in seconds. (optional). (default: 300)\u0026gt; For example:\n$ cd ${WORKDIR}/create-rcu-schema $ ./create-rcu-schema.sh \\ -s SOA1 \\ -t soaosb \\ -d oracle-db.default.svc.cluster.local:1521/devpdb.k8s \\ -i container-registry.oracle.com/middleware/soasuite:12.2.1.4 \\ -n default \\ -q Oradoc_db1 \\ -r Oradoc_db1 \\ -c SOA_PROFILE_TYPE=SMALL,HEALTHCARE_INTEGRATION=NO Now the environment is ready to start the Oracle SOA Suite domain creation.\n6.2 Create an Oracle SOA Suite domain The sample scripts for Oracle SOA Suite domain deployment are available at OracleSOASuite/create-soa-domain. You must edit create-domain-inputs.yaml (or a copy of it) to provide the details for your domain.\nUpdate create-domain-inputs.yaml with the following values for domain creation:\n domainType: soaosb initialManagedServerReplicas: 1 $ cd ${WORKDIR}/create-soa-domain/domain-home-on-pv/ $ cp create-domain-inputs.yaml create-domain-inputs.yaml.orig $ sed -i -e \u0026#34;s:domainType\\: soa:domainType\\: soaosb:g\u0026#34; create-domain-inputs.yaml $ sed -i -e \u0026#34;s:initialManagedServerReplicas\\: 2:initialManagedServerReplicas\\: 1:g\u0026#34; create-domain-inputs.yaml $ sed -i -e \u0026#34;s:image\\: soasuite\\:12.2.1.4:image\\: container-registry.oracle.com/middleware/soasuite\\:12.2.1.4:g\u0026#34; create-domain-inputs.yaml Run the create-domain.sh script to create a domain:\n$ cd ${WORKDIR}/create-soa-domain/domain-home-on-pv/ $ ./create-domain.sh -i create-domain-inputs.yaml -o output Create a Kubernetes domain object:\nOnce the create-domain.sh is successful, it generates output/weblogic-domains/soainfra/domain.yaml, which you can use to create the Kubernetes resource domain to start the domain and servers:\n$ cd ${WORKDIR}/create-soa-domain/domain-home-on-pv $ kubectl create -f output/weblogic-domains/soainfra/domain.yaml Verify that the Kubernetes domain object named soainfra is created:\n$ kubectl get domain -n soans NAME AGE soainfra 3m18s Once you create the domain, the introspect pod is created. This inspects the domain home and then starts the soainfra-adminserver pod. Once the soainfra-adminserver pod starts successfully, the Managed Server pods are started in parallel. Watch the soans namespace for the status of domain creation:\n$ kubectl get pods -n soans -w Verify that the Oracle SOA Suite domain server pods and services are created and in Ready state:\n$ kubectl get all -n soans 6.3 Configure Traefik to access Oracle SOA Suite domain services Configure Traefik to manage ingresses created in the Oracle SOA Suite domain namespace (soans):\n$ helm upgrade traefik traefik/traefik \\ --reuse-values \\ --namespace traefik \\ --set \u0026#34;kubernetes.namespaces={traefik,soans}\u0026#34; \\ --wait Create an ingress for the domain in the domain namespace by using the sample Helm chart:\n$ cd ${WORKDIR} $ helm install soa-traefik-ingress charts/ingress-per-domain \\ --namespace soans \\ --values charts/ingress-per-domain/values.yaml \\ --set \u0026#34;traefik.hostname=$(hostname -f)\u0026#34; \\ --set domainType=soaosb Verify the created ingress per domain details:\n$ kubectl describe ingress soainfra-traefik -n soans 6.4 Verify that you can access the Oracle SOA Suite domain URL Get the LOADBALANCER_HOSTNAME for your environment:\nexport LOADBALANCER_HOSTNAME=$(hostname -f) Verify the following URLs are available for Oracle SOA Suite domains of domain type soaosb:\nCredentials:\nusername: weblogic password: Welcome1\nhttp://${LOADBALANCER_HOSTNAME}:30305/console http://${LOADBALANCER_HOSTNAME}:30305/em http://${LOADBALANCER_HOSTNAME}:30305/servicebus http://${LOADBALANCER_HOSTNAME}:30305/soa-infra http://${LOADBALANCER_HOSTNAME}:30305/soa/composer http://${LOADBALANCER_HOSTNAME}:30305/integration/worklistapp http://${LOADBALANCER_HOSTNAME}:30305/ess http://${LOADBALANCER_HOSTNAME}:30305/EssHealthCheck " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oid/prerequisites/", + "title": "Prerequisites", + "tags": [], + "description": "Prerequisites for deploying and running Oracle Internet Directory in a Kubernetes environment.", + "content": "Introduction This document provides information about the system requirements for deploying and running Oracle Internet Directory 12c PS4 (12.2.1.4.0) in a Kubernetes environment.\nSystem Requirements for Oracle Internet Directory on Kubernetes A running Kubernetes cluster with Helm and Docker installed. For the minimum version requirements refer to document ID 2723908.1 on My Oracle Support. " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oud/prerequisites/", + "title": "Prerequisites", + "tags": [], + "description": "Oracle Unified Directory Prerequisites.", + "content": "Introduction This document provides information about the system requirements for deploying and running Oracle Unified Directory 12c PS4 (12.2.1.4.0) in a Kubernetes environment.\nSystem Requirements for Oracle Unified Directory on Kubernetes A running Kubernetes cluster with Helm and Docker installed. For the minimum version requirements refer to document ID 2723908.1 on My Oracle Support. " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oudsm/prerequisites/", + "title": "Prerequisites", + "tags": [], + "description": "Oracle Unified Directory Services Manager Prerequisites.", + "content": "Introduction This document provides information about the system requirements for deploying and running Oracle Unified Directory Services Manager 12c PS4 (12.2.1.4.0) in a Kubernetes environment.\nSystem Requirements for Oracle Unified Directory Services Manager on Kubernetes A running Kubernetes cluster with Helm and Docker installed. For the minimum version requirements refer to document ID 2723908.1 on My Oracle Support. " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oud/create-oud-instances/create-oud-instances-helm/", + "title": "b. Create Oracle Unified Directory Instances Using Helm", + "tags": [], + "description": "This document provides steps to create Oracle Unified Directory instances using Helm Charts.", + "content": " Introduction Install Helm Deploy an Application using the Helm Chart Undeploy an Application using the Helm Chart Helm Chart(s) for Oracle Unified Directory Introduction This chapter demonstrates how to deploy Oracle Unified Directory 12c instance(s) using the Helm package manager for Kubernetes. Helm Chart(s) described here can be used to facilitate installation, configuration, and environment setup within a Kubernetes environment.\nInstall Helm Helm can be used to create and deploy the Oracle Unified Directory resources in a Kubernetes cluster. For Helm installation and usage information, refer to the README.\nDeploy an Application using the Helm Chart The helm install command is used to deploy applications to a Kubernetes environment, using the Helm Chart supplied.\n$ helm install [Deployment NAME] [CHART Reference] [flags] For example:\n$ helm install my-oud-ds-rs oud-ds-rs --namespace oudns Undeploy an Application using the Helm Chart To uninstall an application deployed using a Helm chart you need to identify the release name and then issue a delete command:\nTo get the release name:\n$ helm --namespace \u0026lt;namespace\u0026gt; list For example:\n$ helm --namespace oudns list NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION my-oud-ds-rs- oudns 1 2020-03-31 10:37:30.616927678 -0700 PDT deployed oud-ds-rs-12.2.1.4.0 12.2.1.4.0 To delete the chart:\n$ helm uninstall --namespace \u0026lt;namespace\u0026gt; \u0026lt;release\u0026gt; For example:\n$ helm uninstall --namespace oudns my-oud-ds-rs release \u0026quot;my-oud-ds-rs\u0026quot; uninstalled Helm Chart(s) for Oracle Unified Directory The following list provides Helm charts for deploying Oracle Unified Directory in a Kubernetes environment. Helm charts provided can be found in the project at the following location:\nhttps://github.com/oracle/fmw-kubernetes/tree/master/OracleUnifiedDirectory/kubernetes/helm\nDetails about each Helm Chart can be found in the relevant README listed below:\n oud-ds-rs : A Helm chart for deployment of Oracle Unified Directory Directory (DS+RS) instances on Kubernetes. " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oudsm/create-oudsm-instances/create-oudsm-instances-helm/", + "title": "b. Create Oracle Unified Directory Services Manager Instances Using Helm", + "tags": [], + "description": "This document provides steps to create OUDSM instances using Helm Charts.", + "content": " Introduction Install Helm Deploy an Application using the Helm Chart Undeploy an Application using the Helm Chart Helm Chart(s) for Oracle Unified Directory Services Manager Introduction This chapter demonstrates how to deploy Oracle Unified Directory Services Manager 12c instance(s) using the Helm package manager for Kubernetes. Helm Chart(s) described here can be used to facilitate installation, configuration, and environment setup within a Kubernetes environment.\nInstall Helm Helm can be used to create and deploy the Oracle Unified Directory Services Manager resources in a Kubernetes cluster. For Helm installation and usage information, refer to the README.\nDeploy an Application using the Helm Chart The helm install command is used to deploy applications to a Kubernetes environment, using the Helm Chart supplied.\n$ helm install [Deployment NAME] [CHART Reference] [flags] For example:\n$ helm install oudsm oudsm --namespace oudns Undeploy an Application using the Helm Chart To uninstall an application deployed using a Helm chart you need to identify the release name and then issue a delete command:\nTo get the release name:\n$ helm --namespace \u0026lt;namespace\u0026gt; list For example:\n$ helm --namespace oudns list NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION oudsm oudns 1 2020-03-31 10:37:30.616927678 -0700 PDT deployed oudsm-12.2.1.4.0 12.2.1.4.0 To delete the chart:\n$ helm uninstall --namespace \u0026lt;namespace\u0026gt; \u0026lt;release\u0026gt; For example:\n$ helm uninstall --namespace oudns oudsm release \u0026quot;oudsm\u0026quot; uninstalled Helm Chart(s) for Oracle Unified Directory Services Manager The following list provides Helm charts for deploying Oracle Unified Directory Services Manager in a Kubernetes environment. The following list provides Helm charts for deploying Oracle Unified Directory Services Manager in a Kubernetes environment. Helm charts provided can be found in the project at the following location:\nhttps://github.com/oracle/fmw-kubernetes/tree/master/OracleUnifiedDirectorySM/kubernetes/helm\nDetails about each Helm Chart can be found in the relevant README listed below:\n oudsm : A Helm chart for deployment of Oracle Unified Directory Services Manager instances on Kubernetes. " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/soa-domains/installguide/prepare-your-environment/", + "title": "Prepare your environment", + "tags": [], + "description": "Prepare for creating Oracle SOA Suite domains, including required secrets creation, persistent volume and volume claim creation, database creation, and database schema creation.", + "content": "To prepare your Oracle SOA Suite in Kubernetes environment, complete the following steps.\nRefer to the troubleshooting page to troubleshoot issues during the domain deployment process.\n Set up your Kubernetes cluster\n Install Helm\n Get dependent images\n Set up the code repository to deploy Oracle SOA Suite domains\n Obtain the Oracle SOA Suite Docker image\n Install the WebLogic Kubernetes Operator\n Prepare the environment for Oracle SOA Suite domains\na. Create a namespace for an Oracle SOA Suite domain\nb. Create a persistent storage for an Oracle SOA Suite domain\nc. Create a Kubernetes secret with domain credentials\nd. Create a Kubernetes secret with the RCU credentials\ne. Configure access to your database\nf. Run the Repository Creation Utility to set up your database schemas\n Create an Oracle SOA Suite domain\n Set up your Kubernetes cluster Refer the official Kubernetes set up documentation to set up a production grade Kubernetes cluster.\nInstall Helm The operator uses Helm to create and deploy the necessary resources and then run the operator in a Kubernetes cluster. For Helm installation and usage information, see here.\nGet dependent images Obtain dependent images and add them to your local registry.\n For first time users, to pull an image from the Oracle Container Registry, navigate to https://container-registry.oracle.com and log in using the Oracle Single Sign-On (SSO) authentication service. If you do not already have an SSO account, you can create an Oracle Account here.\nUse the web interface to accept the Oracle Standard Terms and Restrictions for the Oracle software images that you intend to deploy. Your acceptance of these terms are stored in a database that links the software images to your Oracle Single Sign-On login credentials.\nLog in to the Oracle Container Registry (container-registry.oracle.com) from your Docker client:\n$ docker login container-registry.oracle.com Pull the operator image:\n$ docker pull ghcr.io/oracle/weblogic-kubernetes-operator:3.3.0 $ docker tag ghcr.io/oracle/weblogic-kubernetes-operator:3.3.0 oracle/weblogic-kubernetes-operator:3.3.0 Set up the code repository to deploy Oracle SOA Suite domains Oracle SOA Suite domain deployment on Kubernetes leverages the WebLogic Kubernetes Operator infrastructure. To deploy an Oracle SOA Suite domain, you must set up the deployment scripts.\n Create a working directory to set up the source code:\n$ mkdir $HOME/soa_21.4.2 $ cd $HOME/soa_21.4.2 Download the WebLogic Kubernetes Operator source code and Oracle SOA Suite Kubernetes deployment scripts from the SOA repository. Required artifacts are available at OracleSOASuite/kubernetes.\n$ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/21.4.2 $ export WORKDIR=$HOME/soa_21.4.2/OracleSOASuite/kubernetes Obtain the Oracle SOA Suite Docker image The Oracle SOA Suite image with latest bundle patch and required interim patches can be obtained from My Oracle Support (MOS). This is the only image supported for production deployments. Follow the below steps to download the Oracle SOA Suite image from My Oracle Support.\n Download patch 33467899 from My Oracle Support (MOS).\n Unzip the downloaded patch zip file.\n Load the image archive using the docker load command.\nFor example:\n$ docker load \u0026lt; soasuite-12.2.1.4.0-8-ol7-211129.1734.tar Loaded image: oracle/soasuite:12.2.1.4.0-8-ol7-211129.1734 $ Run the docker inspect command to verify that the downloaded image is the latest released image. The value of label com.oracle.weblogic.imagetool.buildid must match to 2fd643ce-8ada-4841-9a0a-ed369cc08023.\nFor example:\n$ docker inspect --format=\u0026#39;{{ index .Config.Labels \u0026#34;com.oracle.weblogic.imagetool.buildid\u0026#34; }}\u0026#39; oracle/soasuite:12.2.1.4.0-8-ol7-211129.1734 2fd643ce-8ada-4841-9a0a-ed369cc08023 $ If you want to build and use an Oracle SOA Suite Docker image with any additional bundle patch or interim patches that are not part of the image obtained from My Oracle Support, then follow these steps to create the image.\n Note: The default Oracle SOA Suite image name used for Oracle SOA Suite domains deployment is soasuite:12.2.1.4. The image obtained must be tagged as soasuite:12.2.1.4 using the docker tag command. If you want to use a different name for the image, make sure to update the new image tag name in the create-domain-inputs.yaml file and also in other instances where the soasuite:12.2.1.4 image name is used.\n Install the WebLogic Kubernetes Operator The WebLogic Kubernetes Operator supports the deployment of Oracle SOA Suite domains in the Kubernetes environment. Follow the steps in this document to install the operator.\n Note: Optionally, you can execute these steps to send the contents of the operator’s logs to Elasticsearch.\n In the following example commands to install the WebLogic Kubernetes Operator, opns is the namespace and op-sa is the service account created for the Operator:\n$ kubectl create namespace opns $ kubectl create serviceaccount -n opns op-sa $ cd ${WORKDIR} $ helm install weblogic-kubernetes-operator charts/weblogic-operator --namespace opns --set image=oracle/weblogic-kubernetes-operator:3.3.0 --set serviceAccount=op-sa --set \u0026quot;domainNamespaces={}\u0026quot; --set \u0026quot;javaLoggingLevel=FINE\u0026quot; --wait Prepare the environment for Oracle SOA Suite domains Create a namespace for an Oracle SOA Suite domain Create a Kubernetes namespace (for example, soans) for the domain unless you intend to use the default namespace. Use the new namespace in the remaining steps in this section. For details, see Prepare to run a domain.\n$ kubectl create namespace soans $ helm upgrade --reuse-values --namespace opns --set \u0026quot;domainNamespaces={soans}\u0026quot; --wait weblogic-kubernetes-operator charts/weblogic-operator Create a persistent storage for an Oracle SOA Suite domain In the Kubernetes namespace you created, create the PV and PVC for the domain by running the create-pv-pvc.sh script. Follow the instructions for using the script to create a dedicated PV and PVC for the Oracle SOA Suite domain.\n Review the configuration parameters for PV creation here. Based on your requirements, update the values in the create-pv-pvc-inputs.yaml file located at ${WORKDIR}/create-weblogic-domain-pv-pvc/. Sample configuration parameter values for an Oracle SOA Suite domain are:\n baseName: domain domainUID: soainfra namespace: soans weblogicDomainStorageType: HOST_PATH weblogicDomainStoragePath: /scratch/k8s_dir/SOA Ensure that the path for the weblogicDomainStoragePath property exists and have the ownership for 1000:0. If not, you need to create it as follows:\n$ sudo mkdir /scratch/k8s_dir/SOA $ sudo chown -R 1000:0 /scratch/k8s_dir/SOA Run the create-pv-pvc.sh script:\n$ cd ${WORKDIR}/create-weblogic-domain-pv-pvc $ ./create-pv-pvc.sh -i create-pv-pvc-inputs.yaml -o output_soainfra The create-pv-pvc.sh script will create a subdirectory pv-pvcs under the given /path/to/output-directory directory and creates two YAML configuration files for PV and PVC. Apply these two YAML files to create the PV and PVC Kubernetes resources using the kubectl create -f command:\n$ kubectl create -f output_soainfra/pv-pvcs/soainfra-domain-pv.yaml $ kubectl create -f output_soainfra/pv-pvcs/soainfra-domain-pvc.yaml Create a Kubernetes secret with domain credentials Create the Kubernetes secrets username and password of the administrative account in the same Kubernetes namespace as the domain:\n $ cd ${WORKDIR}/create-weblogic-domain-credentials $ ./create-weblogic-credentials.sh -u weblogic -p Welcome1 -n soans -d soainfra -s soainfra-domain-credentials For more details, see this document.\nYou can check the secret with the kubectl get secret command.\nFor example:\n Click here to see the sample secret description. $ kubectl get secret soainfra-domain-credentials -o yaml -n soans apiVersion: v1 data: password: T3JhZG9jX2RiMQ== sys_password: T3JhZG9jX2RiMQ== sys_username: c3lz username: U09BMQ== kind: Secret metadata: creationTimestamp: \u0026quot;2020-06-25T14:08:16Z\u0026quot; labels: weblogic.domainName: soainfra weblogic.domainUID: soainfra managedFields: - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:data: .: {} f:password: {} f:sys_password: {} f:sys_username: {} f:username: {} f:metadata: f:labels: .: {} f:weblogic.domainName: {} f:weblogic.domainUID: {} f:type: {} manager: kubectl operation: Update time: \u0026quot;2020-06-25T14:08:16Z\u0026quot; name: soainfra-rcu-credentials namespace: soans resourceVersion: \u0026quot;265386\u0026quot; selfLink: /api/v1/namespaces/soans/secrets/soainfra-rcu-credentials uid: 2d93941c-656b-43a4-8af2-78ca8be0f293 type: Opaque Create a Kubernetes secret with the RCU credentials You also need to create a Kubernetes secret containing the credentials for the database schemas. When you create your domain, it will obtain the RCU credentials from this secret.\nUse the provided sample script to create the secret:\n$ cd ${WORKDIR}/create-rcu-credentials $ ./create-rcu-credentials.sh \\ -u SOA1 \\ -p Oradoc_db1 \\ -a sys \\ -q Oradoc_db1 \\ -d soainfra \\ -n soans \\ -s soainfra-rcu-credentials The parameter values are:\n -u username for schema owner (regular user), required. -p password for schema owner (regular user), required. -a username for SYSDBA user, required. -q password for SYSDBA user, required. -d domainUID. Example: soainfra -n namespace. Example: soans -s secretName. Example: soainfra-rcu-credentials You can confirm the secret was created as expected with the kubectl get secret command.\nFor example:\n Click here to see the sample secret description. $ kubectl get secret soainfra-rcu-credentials -o yaml -n soans apiVersion: v1 data: password: T3JhZG9jX2RiMQ== sys_password: T3JhZG9jX2RiMQ== sys_username: c3lz username: U09BMQ== kind: Secret metadata: creationTimestamp: \u0026#34;2020-06-25T14:08:16Z\u0026#34; labels: weblogic.domainName: soainfra weblogic.domainUID: soainfra managedFields: - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:data: .: {} f:password: {} f:sys_password: {} f:sys_username: {} f:username: {} f:metadata: f:labels: .: {} f:weblogic.domainName: {} f:weblogic.domainUID: {} f:type: {} manager: kubectl operation: Update time: \u0026#34;2020-06-25T14:08:16Z\u0026#34; name: soainfra-rcu-credentials namespace: soans resourceVersion: \u0026#34;265386\u0026#34; selfLink: /api/v1/namespaces/soans/secrets/soainfra-rcu-credentials uid: 2d93941c-656b-43a4-8af2-78ca8be0f293 type: Opaque Configure access to your database Oracle SOA Suite domains require a database with the necessary schemas installed in them. The Repository Creation Utility (RCU) allows you to create those schemas. You must set up the database before you create your domain. There are no additional requirements added by running Oracle SOA Suite in Kubernetes; the same existing requirements apply.\nFor production deployments, you must set up and use the standalone (non-container) based database running outside of Kubernetes.\nBefore creating a domain, you will need to set up the necessary schemas in your database.\nRun the Repository Creation Utility to set up your database schemas Create schemas To create the database schemas for Oracle SOA Suite, run the create-rcu-schema.sh script.\nFor example:\n$ cd ${WORKDIR}/create-rcu-schema $ ./create-rcu-schema.sh -h usage: ./create-rcu-schema.sh -s \u0026lt;schemaPrefix\u0026gt; -t \u0026lt;schemaType\u0026gt; -d \u0026lt;dburl\u0026gt; -i \u0026lt;image\u0026gt; -u \u0026lt;imagePullPolicy\u0026gt; -p \u0026lt;docker-store\u0026gt; -n \u0026lt;namespace\u0026gt; -q \u0026lt;sysPassword\u0026gt; -r \u0026lt;schemaPassword\u0026gt; -o \u0026lt;rcuOutputDir\u0026gt; -c \u0026lt;customVariables\u0026gt; [-l] \u0026lt;timeoutLimit\u0026gt; [-h] -s RCU Schema Prefix (required) -t RCU Schema Type (optional) (supported values: osb,soa,soaosb) -d RCU Oracle Database URL (optional) (default: oracle-db.default.svc.cluster.local:1521/devpdb.k8s) -p OracleSOASuite ImagePullSecret (optional) (default: none) -i OracleSOASuite Image (optional) (default: soasuite:12.2.1.4) -u OracleSOASuite ImagePullPolicy (optional) (default: IfNotPresent) -n Namespace for RCU pod (optional) (default: default) -q password for database SYSDBA user. (optional) (default: Oradoc_db1) -r password for all schema owner (regular user). (optional) (default: Oradoc_db1) -o Output directory for the generated YAML file. (optional) (default: rcuoutput) -c Comma-separated custom variables in the format variablename=value. (optional). (default: none) -l Timeout limit in seconds. (optional). (default: 300) -h Help $ ./create-rcu-schema.sh \\ -s SOA1 \\ -t soaosb \\ -d oracle-db.default.svc.cluster.local:1521/devpdb.k8s \\ -i soasuite:12.2.1.4 \\ -n default \\ -q Oradoc_db1 \\ -r Oradoc_db1 \\ -c SOA_PROFILE_TYPE=SMALL,HEALTHCARE_INTEGRATION=NO For Oracle SOA Suite domains, the create-rcu-schema.sh script supports:\n domain types: soa, osb, and soaosb. You must specify one of these using the -t flag. For Oracle SOA Suite you must specify the Oracle SOA schema profile type using the -c flag. For example, -c SOA_PROFILE_TYPE=SMALL. Supported values for SOA_PROFILE_TYPE are SMALL, MED, and LARGE. Note: To use the LARGE schema profile type, make sure that the partitioning feature is enabled in the Oracle Database.\n Make sure that you maintain the association between the database schemas and the matching domain just like you did in a non-Kubernetes environment. There is no specific functionality provided to help with this.\nDrop schemas If you want to drop a schema, you can use the drop-rcu-schema.sh script.\nFor example:\n$ cd ${WORKDIR}/create-rcu-schema $ ./drop-rcu-schema.sh -h usage: ./drop-rcu-schema.sh -s \u0026lt;schemaPrefix\u0026gt; -d \u0026lt;dburl\u0026gt; -n \u0026lt;namespace\u0026gt; -q \u0026lt;sysPassword\u0026gt; -r \u0026lt;schemaPassword\u0026gt; -c \u0026lt;customVariables\u0026gt; [-h] -s RCU Schema Prefix (required) -t RCU Schema Type (optional) (supported values: osb,soa,soaosb) -d Oracle Database URL (optional) (default: oracle-db.default.svc.cluster.local:1521/devpdb.k8s) -n Namespace where RCU pod is deployed (optional) (default: default) -q password for database SYSDBA user. (optional) (default: Oradoc_db1) -r password for all schema owner (regular user). (optional) (default: Oradoc_db1) -c Comma-separated custom variables in the format variablename=value. (optional). (default: none) -h Help $ ./drop-rcu-schema.sh \\ -s SOA1 \\ -t soaosb \\ -d oracle-db.default.svc.cluster.local:1521/devpdb.k8s \\ -n default \\ -q Oradoc_db1 \\ -r Oradoc_db1 \\ -c SOA_PROFILE_TYPE=SMALL,HEALTHCARE_INTEGRATION=NO For Oracle SOA Suite domains, the drop-rcu-schema.sh script supports:\n Domain types: soa, osb, and soaosb. You must specify one of these using the -t flag. For Oracle SOA Suite, you must specify the Oracle SOA schema profile type using the -c flag. For example, -c SOA_PROFILE_TYPE=SMALL. Supported values for SOA_PROFILE_TYPE are SMALL, MED, and LARGE. Create an Oracle SOA Suite domain Now that you have your Docker images and you have created your RCU schemas, you are ready to create your domain. To continue, follow the instructions in Create Oracle SOA Suite domains.\n" +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oid/", + "title": "Oracle Internet Directory", + "tags": [], + "description": "Oracle Internet Directory provides a comprehensive Directory Solution for robust Identity Management", + "content": "Oracle Internet Directory provides a comprehensive Directory Solution for robust Identity Management. Oracle Internet Directory is an all-in-one directory solution with storage, proxy, synchronization and virtualization capabilities. While unifying the approach, it provides all the services required for high-performance Enterprise and carrier-grade environments. Oracle Internet Directory ensures scalability to billions of entries, ease of installation, elastic deployments, enterprise manageability and effective monitoring.\nThis project supports deployment of Oracle Internet Directory (OID) Docker images based on the 12cPS4 (12.2.1.4.0) release within a Kubernetes environment. The OID Docker Image refers to binaries for OID Release 12.2.1.4.0.\nImage: oracle/oid:12.2.1.4.0\nThis project has several key features to assist you with deploying and managing Oracle Internet Directory in a Kubernetes environment. You can:\n Create Oracle Internet Directory instances in a Kubernetes persistent volume (PV). This PV can reside in an NFS file system or other Kubernetes volume types. Start servers based on declarative startup parameters and desired states. Expose the Oracle Internet Directory services for external access. Scale Oracle Internet Directory by starting and stopping servers on demand. Monitor the Oracle Internet Directory instance using Prometheus and Grafana. Follow the instructions in this guide to set up Oracle Internet Directory on Kubernetes.\nGetting started For detailed information about deploying Oracle Internet Directory, start at Prerequisites and follow this documentation sequentially.\nCurrent release The current production release for Oracle Internet Directory 12c PS4 (12.2.1.4.0) deployment on Kubernetes is 21.4.1.\n" +}, +{ + "uri": "/fmw-kubernetes/21.4.2/soa-domains/adminguide/deploying-composites/deploy-using-maven-ant/", + "title": "Deploy using Maven and Ant", + "tags": [], + "description": "Deploy Oracle SOA Suite and Oracle Service Bus composite applications using the Maven and Ant based approach in an Oracle SOA Suite deployment.", + "content": "Learn how to deploy Oracle SOA Suite and Oracle Service Bus composite applications using the Maven and Ant based approach in an Oracle SOA Suite in WebLogic Kubernetes Operator environment.\nBefore deploying composite applications, we need to create a Kubernetes pod in the same cluster where the Oracle SOA Suite domain is running, so that composite applications can be deployed using the internal Kubernetes Service for the Administration Server URL.\nPlace the SOA/Oracle Service Bus composite project at a share location (for example at /share/soa-deploy) mounted at /composites inside container. Make sure to provide oracle user ( uid: 1000 and gid: 0) permission to directory /share/soa-deploy, so that it is accessible and writable inside the container.\n$ sudo chown -R 1000:0 /share/soa-deploy Follow the steps in this section to create a container and then use it to deploy Oracle SOA Suite and Oracle Service Bus composite applications using Maven or Ant.\nCreate a composite deployment container Before creating a Kubernetes pod, make sure that the Oracle SOA Suite Docker image is available on a node, or you can create an image pull secret so that the pod can pull the Docker image on the host where it gets created.\n Create an image pull secret to pull image soasuite:12.2.1.4 by the Kubernetes pod:\n$ kubectl create secret docker-registry image-secret -n soans --docker-server=your-registry.com --docker-username=xxxxxx --docker-password=xxxxxxx --docker-email=my@company.com Create a PersistentVolume and PersistentVolumeClaim (soadeploy-pv.yaml and soadeploy-pvc.yaml) with sample composites for build and deploy placed at /share/soa-deploy.\na) Create a PersistentVolume with the sample provided (soadeploy-pv.yaml), which uses NFS (you can use hostPath or any other supported PV type):\napiVersion: v1 kind: PersistentVolume metadata: name: soadeploy-pv spec: storageClassName: soadeploy-storage-class capacity: storage: 10Gi accessModes: - ReadWriteMany # Valid values are Retain, Delete or Recycle persistentVolumeReclaimPolicy: Retain # hostPath: nfs: server: X.X.X.X path: \u0026quot;/share/soa-deploy\u0026quot; b) Apply the YAML:\n$ kubectl apply -f soadeploy-pv.yaml c) Create a PersistentVolumeClaim (soadeploy-pvc.yaml):\nkind: PersistentVolumeClaim apiVersion: v1 metadata: name: soadeploy-pvc namespace: soans spec: storageClassName: soadeploy-storage-class accessModes: - ReadWriteMany resources: requests: storage: 10Gi d) Apply the YAML:\n$ kubectl apply -f soadeploy-pvc.yaml Create a composite deploy pod using soadeploy.yaml to mount the composites inside pod at /composites:\napiVersion: v1 kind: Pod metadata: labels: run: soadeploy name: soadeploy namespace: soans spec: imagePullSecrets: - name: image-secret containers: - image: soasuite:12.2.1.4 name: soadeploy env: - name: M2_HOME value: /u01/oracle/oracle_common/modules/org.apache.maven_3.2.5 command: [\u0026quot;/bin/bash\u0026quot;, \u0026quot;-c\u0026quot;, \u0026quot;echo 'export PATH=$PATH:$M2_HOME/bin' \u0026gt;\u0026gt; $HOME/.bashrc; sleep infinity\u0026quot;] imagePullPolicy: IfNotPresent volumeMounts: - name: mycomposite mountPath: /composites volumes: - name: mycomposite persistentVolumeClaim: claimName: soadeploy-pvc Create the pod:\n$ kubectl apply -f soadeploy.yaml Once the Kubernetes pod is deployed, exec into the pod to perform Maven/Ant based build and deploy:\n$ kubectl exec -it -n soans soadeploy -- bash Maven based build and deploy Note: Make sure to execute these commands inside the soadeploy pod.\n Set up proxy details for Maven to pull dependencies from the internet.\nIf your environment is not running behind a proxy, then skip this step. Otherwise, replace REPLACE-WITH-PROXY-HOST, REPLACE-WITH-PROXY-PORT and the value for nonProxyHosts attribute per your environment and create the settings.xml:\n$ mkdir $HOME/.m2 $ cat \u0026lt;\u0026lt;EOF \u0026gt; $HOME/.m2/settings.xml \u0026lt;settings\u0026gt; \u0026lt;proxies\u0026gt; \u0026lt;proxy\u0026gt; \u0026lt;active\u0026gt;true\u0026lt;/active\u0026gt; \u0026lt;protocol\u0026gt;http\u0026lt;/protocol\u0026gt; \u0026lt;host\u0026gt;REPLACE-WITH-PROXY-HOST\u0026lt;/host\u0026gt; \u0026lt;port\u0026gt;REPLACE-WITH-PROXY-PORT\u0026lt;/port\u0026gt; \u0026lt;nonProxyHosts\u0026gt;soainfra-cluster-soa-cluster|soainfra-adminserver\u0026lt;/nonProxyHosts\u0026gt; \u0026lt;/proxy\u0026gt; \u0026lt;/proxies\u0026gt; \u0026lt;/settings\u0026gt; EOF For Oracle SOA Suite composite applications Set up the environment for Maven:\n#Perform Maven Sync $ cd /u01/oracle/oracle_common/plugins/maven/com/oracle/maven/oracle-maven-sync/12.2.1/ $ mvn install:install-file \\ -DpomFile=oracle-maven-sync-12.2.1.pom \\ -Dfile=oracle-maven-sync-12.2.1.jar #install Maven plugin $ mvn help:describe \\ -Dplugin=com.oracle.maven:oracle-maven-sync \\ -Ddetail #push libraries into internal repository $ mvn com.oracle.maven:oracle-maven-sync:push \\ -DoracleHome=/u01/oracle \\ -DtestingOnly=false $ mvn archetype:crawl \\ -Dcatalog=$HOME/.m2/archetype-catalog.xml \\ -DarchetypeArtifactId=oracle-soa-application \\ -DarchetypeVersion=12.2.1-4-0 Build the SOA Archive (SAR) for your sample deployment available at /composites/mavenproject/my-soa-app:\n$ cd /composites/mavenproject/my-soa-app $ mvn package The SAR will be generated at /composites/mavenproject/my-soa-app/my-project/target/sca_my-project.jar.\n Deploy into the Oracle SOA Suite instance. For example, if the instance URL is http://soainfra-cluster-soa-cluster:8001 with credentials username: weblogic and password: Welcome1, enter the following commands:\n$ cd /composites/mavenproject/my-soa-app $ mvn pre-integration-test \\ -DoracleServerUrl=http://soainfra-cluster-soa-cluster:8001 \\ -DsarLocation=/composites/mavenproject/my-soa-app/my-project/target/sca_my-project.jar \\ -Doverwrite=true \\ -DforceDefault=true \\ -Dcomposite.partition=default \\ -Duser=weblogic -Dpassword=Welcome1 For Oracle Service Bus composite applications Set up the environment for Maven:\n#Perform Maven Sync $ cd /u01/oracle/oracle_common/plugins/maven/com/oracle/maven/oracle-maven-sync/12.2.1/ $ mvn install:install-file \\ -DpomFile=oracle-maven-sync-12.2.1.pom \\ -Dfile=oracle-maven-sync-12.2.1.jar #push libraries into internal repository $ mvn com.oracle.maven:oracle-maven-sync:push \\ -DoracleHome=$ORACLE_HOME $ mvn archetype:crawl \\ -Dcatalog=$HOME/.m2/archetype-catalog.xml #Verify the mvn setup $ mvn help:describe \\ -DgroupId=com.oracle.servicebus.plugin \\ -DartifactId=oracle-servicebus-plugin \\ -Dversion=12.2.1-4-0 Build the Oracle Service Bus Archive (sbconfig.sbar)\nBuild sbconfig.sbar for your sample deployment, available at /composites/mavenproject/HelloWorldSB:\n$ cd /composites/mavenproject/HelloWorldSB $ mvn com.oracle.servicebus.plugin:oracle-servicebus-plugin:package The Oracle Service Bus Archive (SBAR) will be generated at /composites/mavenproject/HelloWorldSB/.data/maven/sbconfig.sbar.\n Deploy the generated sbconfig.sbar into the Oracle Service Bus instance. For example, if the Administration URL is http://soainfra-adminserver:7001 with credentials username: weblogic and password: Welcome1, enter the following commands: :\n$ cd /composites/mavenproject/HelloWorldSB $ mvn pre-integration-test \\ -DoracleServerUrl=t3://soainfra-adminserver:7001 \\ -DoracleUsername=weblogic -DoraclePassword=Welcome1 Ant based build and deploy Note: Make sure to execute these commands inside the soadeploy pod.\n For Oracle SOA Suite composite applications Build an Oracle SOA Suite composite application using Ant. For example, if the composite application to be deployed is available at /composites/antproject/Project, enter the following commands:\n$ cd /u01/oracle/soa/bin $ ant -f ant-sca-package.xml \\ -DcompositeDir=/composites/antproject/Project \\ -DcompositeName=Project \\ -Drevision=0.1 The SOA Archive is generated at /composites/antproject/Project/deploy/sca_Project_rev0.1.jar, which will be used for deploying.\n Deploy into the Oracle SOA Suite instance using Ant:\n$ cd /u01/oracle/soa/bin $ ant -f ant-sca-deploy.xml \\ -DserverURL=http://soainfra-cluster-soa-cluster:8001 \\ -DsarLocation=/composites/antproject/Project/deploy/sca_Project_rev0.1.jar \\ -Doverwrite=true \\ -Duser=weblogic -Dpassword=Welcome1 For Oracle Service Bus composite applications See Developing Services Using Oracle Service Bus to deploy Oracle Service Bus composite applications using Ant.\n" +}, +{ + "uri": "/fmw-kubernetes/21.4.2/soa-domains/adminguide/enable-additional-url-access/", + "title": "Enable additional URL access", + "tags": [], + "description": "Extend an existing ingress to enable additional application URL access for Oracle SOA Suite domains.", + "content": "This section provides information about how to extend an existing ingress (Non-SSL and SSL termination) to enable additional application URL access for Oracle SOA Suite domains.\nThe ingress per domain created in the steps in Set up a load balancer exposes the application paths defined in template YAML files present at ${WORKDIR}/charts/ingress-per-domain/templates/.\nTo extend an existing ingress with additional application URL access:\n Update the template YAML file at ${WORKDIR}/charts/ingress-per-domain/templates/ to define additional path rules.\nFor example, to extend an existing NGINX-based ingress with additional paths /path1 and /path2 of an Oracle SOA Suite cluster, update nginx-ingress.yaml (for the supported Kubernetes versions up to 1.18.x) with additional paths:\n Note: For Kubernetes versions, 1.19+, you need to update the nginx-ingress-k8s1.19.yaml file.\n # Copyright (c) 2020, 2021, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. {{- if eq .Values.type \u0026quot;NGINX\u0026quot; }} --- apiVersion: extensions/v1beta1 kind: Ingress . . spec: rules: - host: '{{ .Values.nginx.hostname }}' http: paths: # Add new paths -- start - path: /path1 backend: serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace \u0026quot;_\u0026quot; \u0026quot;-\u0026quot; }}' servicePort: {{ .Values.wlsDomain.soaManagedServerPort }} - path: /path2 backend: serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace \u0026quot;_\u0026quot; \u0026quot;-\u0026quot; }}' servicePort: {{ .Values.wlsDomain.soaManagedServerPort }} # Add new paths -- end - path: /console backend: serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace \u0026quot;_\u0026quot; \u0026quot;-\u0026quot; }}' servicePort: {{ .Values.wlsDomain.adminServerPort }} . . {{- end }} Get the Helm release name for the ingress installed in your domain namespace:\n$ helm ls -n \u0026lt;domain_namespace\u0026gt; For example, in the soans namespace:\n$ helm ls -n soans Sample output, showing the Helm release name for a NGINX-based ingress as soa-nginx-ingress:\nNAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION soa-nginx-ingress soans 1 2021-02-17 13:42:03.252742314 +0000 UTC deployed ingress-per-domain-0.1.0 1.0 $ To extend the existing ingress per domain with additional paths defined in the template YAML, use the helm upgrade command:\n$ cd ${WORKDIR} $ helm upgrade \u0026lt;helm_release_for_ingress\u0026gt; \\ charts/ingress-per-domain \\ --namespace \u0026lt;domain_namespace\u0026gt; \\ --reuse-values Note: helm_release_for_ingress is the ingress name used in the corresponding helm install command for the ingress installation.\n Sample command for a NGINX-based ingress soa-nginx-ingress in the soans namespace:\n$ cd ${WORKDIR} $ helm upgrade soa-nginx-ingress \\ charts/ingress-per-domain \\ --namespace soans \\ --reuse-values This will upgrade the existing ingress to pick up the additional paths updated in the template YAML.\n Verify that additional paths are updated into the existing ingress.\na. Get the existing ingress deployed in the domain namespace:\n$ kubectl get ingress -n \u0026lt;domain_namespace\u0026gt; For example, in the soans namespace:\n$ kubectl get ingress -n soans Sample output, showing the existing ingress as soainfra-nginx:\nNAME CLASS HOSTS ADDRESS PORTS AGE soainfra-nginx \u0026lt;none\u0026gt; domain1.org 10.109.211.160 80, 443 xxd b. Describe the ingress object and verify that new paths are available and pointing to desired backends.\nSample command and output, showing path and backend details for /path1 and /path2:\n$ kubectl describe ingress soainfra-nginx -n soans|grep path /path1 soainfra-cluster-soa-cluster:8001 (172.17.0.19:8001,172.17.0.20:8001) /path2 soainfra-cluster-soa-cluster:8001 (172.17.0.19:8001,172.17.0.20:8001) " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/soa-domains/adminguide/configure-load-balancer/nginx/", + "title": "NGINX", + "tags": [], + "description": "Configure the ingress-based NGINX load balancer for Oracle SOA Suite domains.", + "content": "This section provides information about how to install and configure the ingress-based NGINX load balancer to load balance Oracle SOA Suite domain clusters. You can configure NGINX for non-SSL, SSL termination, and end-to-end SSL access of the application URL.\nFollow these steps to set up NGINX as a load balancer for an Oracle SOA Suite domain in a Kubernetes cluster:\nSee the official installation document for prerequisites.\n Install the NGINX load balancer for non-SSL and SSL termination configuration Generate secret for SSL access Install NGINX load balancer for end-to-end SSL configuration Configure NGINX to manage ingresses Verify domain application URL access Uninstall NGINX ingress Uninstall NGINX To get repository information, enter the following Helm commands:\n$ helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx $ helm repo update Install the NGINX load balancer for non-SSL and SSL termination configuration Deploy the ingress-nginx controller by using Helm on the domain namespace:\nFor Kubernetes versions up to v1.18.x:\n$ helm install nginx-ingress -n soans \\ --version=3.34.0 \\ --set controller.service.type=NodePort \\ --set controller.admissionWebhooks.enabled=false \\ ingress-nginx/ingress-nginx For Kubernetes versions v1.19.x+ onwards (NGINX version 4.0.6+):\n$ helm install nginx-ingress -n soans \\ --set controller.service.type=NodePort \\ --set controller.admissionWebhooks.enabled=false \\ ingress-nginx/ingress-nginx Click here to see the sample output. NAME: nginx-ingress LAST DEPLOYED: Tue Sep 15 08:40:47 2020 NAMESPACE: soans STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: The ingress-nginx controller has been installed. Get the application URL by running these commands: export HTTP_NODE_PORT=$(kubectl --namespace soans get services -o jsonpath=\u0026quot;{.spec.ports[0].nodePort}\u0026quot; nginx-ingress-ingress-nginx-controller) export HTTPS_NODE_PORT=$(kubectl --namespace soans get services -o jsonpath=\u0026quot;{.spec.ports[1].nodePort}\u0026quot; nginx-ingress-ingress-nginx-controller) export NODE_IP=$(kubectl --namespace soans get nodes -o jsonpath=\u0026quot;{.items[0].status.addresses[1].address}\u0026quot;) echo \u0026quot;Visit http://$NODE_IP:$HTTP_NODE_PORT to access your application via HTTP.\u0026quot; echo \u0026quot;Visit https://$NODE_IP:$HTTPS_NODE_PORT to access your application via HTTPS.\u0026quot; An example ingress that makes use of the controller: apiVersion: networking.k8s.io/v1beta1 kind: Ingress metadata: annotations: kubernetes.io/ingress.class: nginx name: example namespace: foo spec: rules: - host: www.example.com http: paths: - backend: serviceName: exampleService servicePort: 80 path: / # This section is only required if TLS is to be enabled for the ingress tls: - hosts: - www.example.com secretName: example-tls If TLS is enabled for the ingress, a secret containing the certificate and key must also be provided: apiVersion: v1 kind: Secret metadata: name: example-tls namespace: foo data: tls.crt: \u0026lt;base64 encoded cert\u0026gt; tls.key: \u0026lt;base64 encoded key\u0026gt; type: kubernetes.io/tls Generate secret for SSL access For secured access (SSL and E2ESSL) to the Oracle SOA Suite application, create a certificate and generate secrets:\n$ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /tmp/tls1.key -out /tmp/tls1.crt -subj \u0026#34;/CN=domain1.org\u0026#34; $ kubectl -n soans create secret tls domain1-tls-cert --key /tmp/tls1.key --cert /tmp/tls1.crt Note: The value of CN is the host on which this ingress is to be deployed.\n Install NGINX load balancer for end-to-end SSL configuration Deploy the ingress-nginx controller by using Helm on the domain namespace:\nFor Kubernetes versions up to v1.18.x:\n$ helm install nginx-ingress -n soans \\ --version=3.34.0 \\ --set controller.extraArgs.default-ssl-certificate=soans/domain1-tls-cert \\ --set controller.service.type=NodePort \\ --set controller.admissionWebhooks.enabled=false \\ --set controller.extraArgs.enable-ssl-passthrough=true \\ ingress-nginx/ingress-nginx For Kubernetes versions v1.19.x+ onwards (NGINX version 4.0.6+):\n$ helm install nginx-ingress -n soans \\ --set controller.extraArgs.default-ssl-certificate=soans/domain1-tls-cert \\ --set controller.service.type=NodePort \\ --set controller.admissionWebhooks.enabled=false \\ --set controller.extraArgs.enable-ssl-passthrough=true \\ ingress-nginx/ingress-nginx Click here to see the sample output. NAME: nginx-ingress LAST DEPLOYED: Tue Sep 15 08:40:47 2020 NAMESPACE: soans STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: The ingress-nginx controller has been installed. Get the application URL by running these commands: export HTTP_NODE_PORT=$(kubectl --namespace soans get services -o jsonpath=\u0026#34;{.spec.ports[0].nodePort}\u0026#34; nginx-ingress-ingress-nginx-controller) export HTTPS_NODE_PORT=$(kubectl --namespace soans get services -o jsonpath=\u0026#34;{.spec.ports[1].nodePort}\u0026#34; nginx-ingress-ingress-nginx-controller) export NODE_IP=$(kubectl --namespace soans get nodes -o jsonpath=\u0026#34;{.items[0].status.addresses[1].address}\u0026#34;) echo \u0026#34;Visit http://$NODE_IP:$HTTP_NODE_PORTto access your application via HTTP.\u0026#34; echo \u0026#34;Visit https://$NODE_IP:$HTTPS_NODE_PORTto access your application via HTTPS.\u0026#34; An example Ingress that makes use of the controller: apiVersion: networking.k8s.io/v1beta1 kind: Ingress metadata: annotations: kubernetes.io/ingress.class: nginx name: example namespace: foo spec: rules: - host: www.example.com http: paths: - backend: serviceName: exampleService servicePort: 80 path: / # This section is only required if TLS is to be enabled for the Ingress tls: - hosts: - www.example.com secretName: example-tls If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: apiVersion: v1 kind: Secret metadata: name: example-tls namespace: foo data: tls.crt: \u0026lt;base64 encoded cert\u0026gt; tls.key: \u0026lt;base64 encoded key\u0026gt; type: kubernetes.io/tls Check the status of the deployed ingress controller:\n$ kubectl --namespace soans get services | grep ingress-nginx-controller Sample output:\nnginx-ingress-ingress-nginx-controller NodePort 10.106.186.235 \u0026lt;none\u0026gt; 80:32125/TCP,443:31376/TCP 19m Configure NGINX to manage ingresses Create an ingress for the domain in the domain namespace by using the sample Helm chart. Here path-based routing is used for ingress. Sample values for default configuration are shown in the file ${WORKDIR}/charts/ingress-per-domain/values.yaml. By default, type is TRAEFIK , sslType is NONSSL, and domainType is soa. These values can be overridden by passing values through the command line or can be edited in the sample file values.yaml.\nIf needed, you can update the ingress YAML file to define more path rules (in section spec.rules.host.http.paths) based on the domain application URLs that need to be accessed. Update the template YAML file for the NGINX load balancer located at ${WORKDIR}/charts/ingress-per-domain/templates/nginx-ingress.yaml.\n Note: See here for all the configuration parameters.\n $ cd ${WORKDIR} $ helm install soa-nginx-ingress charts/ingress-per-domain \\ --namespace soans \\ --values charts/ingress-per-domain/values.yaml \\ --set \u0026#34;nginx.hostname=$(hostname -f)\u0026#34; \\ --set type=NGINX Sample output:\nNAME: soa-nginx-ingress LAST DEPLOYED: Fri Jul 24 09:34:03 2020 NAMESPACE: soans STATUS: deployed REVISION: 1 TEST SUITE: None Install ingress-per-domain using Helm for SSL termination configuration:\n$ cd ${WORKDIR} $ helm install soa-nginx-ingress charts/ingress-per-domain \\ --namespace soans \\ --values charts/ingress-per-domain/values.yaml \\ --set \u0026#34;nginx.hostname=$(hostname -f)\u0026#34; \\ --set type=NGINX --set sslType=SSL Sample output:\nNAME: soa-nginx-ingress LAST DEPLOYED: Fri Jul 24 09:34:03 2020 NAMESPACE: soans STATUS: deployed REVISION: 1 TEST SUITE: None Install ingress-per-domain using Helm for E2ESSL configuration.\n$ cd ${WORKDIR} $ helm install soa-nginx-ingress charts/ingress-per-domain \\ --namespace soans \\ --values charts/ingress-per-domain/values.yaml \\ --set type=NGINX --set sslType=E2ESSL Sample output:\nNAME: soa-nginx-ingress LAST DEPLOYED: Fri Jul 24 09:34:03 2020 NAMESPACE: soans STATUS: deployed REVISION: 1 TEST SUITE: None For NONSSL access to the Oracle SOA Suite application, get the details of the services by the ingress:\n$ kubectl describe ingress soainfra-nginx -n soans Click here to see the sample output of the services supported by the above deployed ingress. Name: soainfra-nginx Namespace: soans Address: 100.111.150.225 Default backend: default-http-backend:80 (\u0026lt;error: endpoints \u0026quot;default-http-backend\u0026quot; not found\u0026gt;) Rules: Host Path Backends ---- ---- -------- domain1.org /console soainfra-adminserver:7001 (10.244.0.45:7001) /em soainfra-adminserver:7001 (10.244.0.45:7001) /weblogic/ready soainfra-adminserver:7001 (10.244.0.45:7001) / soainfra-cluster-soa-cluster:8001 (10.244.0.46:8001,10.244.0.47:8001) /soa-infra soainfra-cluster-soa-cluster:8001 (10.244.0.46:8001,10.244.0.47:8001) /soa/composer soainfra-cluster-soa-cluster:8001 (10.244.0.46:8001,10.244.0.47:8001) /integration/worklistapp soainfra-cluster-soa-cluster:8001 (10.244.0.46:8001,10.244.0.47:8001) Annotations: \u0026lt;none\u0026gt; Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal CREATE 2m32s nginx-ingress-controller Ingress soans/soainfra-nginx Normal UPDATE 94s nginx-ingress-controller Ingress soans/soainfra-nginx For SSL access to the Oracle SOA Suite application, get the details of the services by the above deployed ingress:\n$ kubectl describe ingress soainfra-nginx -n soans Click here to see the sample output of the services supported by the above deployed ingress. Name: soainfra-nginx Namespace: soans Address: 100.111.150.225 Default backend: default-http-backend:80 (\u0026lt;error: endpoints \u0026quot;default-http-backend\u0026quot; not found\u0026gt;) TLS: domain1-tls-cert terminates domain1.org Rules: Host Path Backends ---- ---- -------- domain1.org /console soainfra-adminserver:7001 (10.244.0.45:7001) /em soainfra-adminserver:7001 (10.244.0.45:7001) /weblogic/ready soainfra-adminserver:7001 (10.244.0.45:7001) / soainfra-cluster-soa-cluster:8001 (10.244.0.46:8001,10.244.0.47:8001) /soa-infra soainfra-cluster-soa-cluster:8001 (10.244.0.46:8001,10.244.0.47:8001) /soa/composer soainfra-cluster-soa-cluster:8001 (10.244.0.46:8001,10.244.0.47:8001) /integration/worklistapp soainfra-cluster-soa-cluster:8001 (10.244.0.46:8001,10.244.0.47:8001) Annotations: kubernetes.io/ingress.class: nginx nginx.ingress.kubernetes.io/configuration-snippet: more_set_input_headers \u0026quot;X-Forwarded-Proto: https\u0026quot;; more_set_input_headers \u0026quot;WL-Proxy-SSL: true\u0026quot;; nginx.ingress.kubernetes.io/ingress.allow-http: false Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal CREATE 3m47s nginx-ingress-controller Ingress soans/soainfra-nginx Normal UPDATE 3m25s nginx-ingress-controller Ingress soans/soainfra-nginx For E2ESSL access to the Oracle SOA Suite application, get the details of the services by the above deployed ingress:\n$ kubectl describe ingress soainfra-nginx-e2essl -n soans Click here to see the sample output of the services supported by the above deployed ingress. Name: soainfra-nginx-e2essl-admin Namespace: soans Address: Default backend: default-http-backend:80 (\u0026lt;error: endpoints \u0026quot;default-http-backend\u0026quot; not found\u0026gt;) TLS: domain1-tls-cert terminates admin.org Rules: Host Path Backends ---- ---- -------- admin.org soainfra-adminserver-nginx-ssl:7002 (10.244.0.247:7002) Annotations: kubernetes.io/ingress.class: nginx meta.helm.sh/release-name: soa-nginx-ingress meta.helm.sh/release-namespace: soans nginx.ingress.kubernetes.io/ssl-passthrough: true Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Sync 4s nginx-ingress-controller Scheduled for sync Name: soainfra-nginx-e2essl-soa Namespace: soans Address: Default backend: default-http-backend:80 (\u0026lt;error: endpoints \u0026quot;default-http-backend\u0026quot; not found\u0026gt;) TLS: domain1-tls-cert terminates soa.org Rules: Host Path Backends ---- ---- -------- soa.org / soainfra-cluster-soa-cluster:8002 (10.244.0.249:8002) Annotations: kubernetes.io/ingress.class: nginx meta.helm.sh/release-name: soa-nginx-ingress meta.helm.sh/release-namespace: soans nginx.ingress.kubernetes.io/ssl-passthrough: true Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Sync 4s nginx-ingress-controller Scheduled for sync Verify domain application URL access NONSSL configuration Verify that the Oracle SOA Suite domain application URLs are accessible through the LOADBALANCER-Non-SSLPORT 32125:\nhttp://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-Non-SSLPORT}/weblogic/ready http://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-Non-SSLPORT}/console http://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-Non-SSLPORT}/em http://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-Non-SSLPORT}/soa-infra http://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-Non-SSLPORT}/soa/composer http://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-Non-SSLPORT}/integration/worklistapp SSL configuration Verify that the Oracle SOA Suite domain application URLs are accessible through the LOADBALANCER-SSLPORT 30233:\nhttps://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-SSLPORT}/weblogic/ready https://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-SSLPORT}/console https://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-SSLPORT}/em https://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-SSLPORT}/soa-infra https://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-SSLPORT}/soa/composer https://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-SSLPORT}/integration/worklistapp E2ESSL configuration Before accessing the SOA Suite domain application URLs, update the system host config file with the IP address of the host on which the ingress is deployed.\n To access the application URLs from the browser, update /etc/hosts on the browser host (in Windows, C:\\Windows\\System32\\Drivers\\etc\\hosts) with the entries below\nX.X.X.X admin.org X.X.X.X soa.org X.X.X.X osb.org Note: The value of X.X.X.X is the host IP address on which this ingress is deployed.\n Note: If you are behind any corporate proxy, make sure to update the browser proxy settings appropriately to access the host names updated /etc/hosts file.\n Verify that the Oracle SOA Suite domain application URLs are accessible through LOADBALANCER-E2ESSLPORT 30233:\nhttps://admin.org:${LOADBALANCER-SSLPORT}/weblogic/ready https://admin.org:${LOADBALANCER-SSLPORT}/console https://admin.org:${LOADBALANCER-SSLPORT}/em https://soa.org:${LOADBALANCER-SSLPORT}/soa-infra https://soa.org:${LOADBALANCER-SSLPORT}/soa/composer https://soa.org:${LOADBALANCER-SSLPORT}/integration/worklistapp Note: This is the default host name. If you have updated the host name in value.yaml, then use the updated values.\n Uninstall NGINX ingress Uninstall and delete the ingress-nginx deployment:\n$ helm delete soa-nginx-ingress -n soans Uninstall NGINX $ helm delete nginx-ingress -n soans " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/soa-domains/patch_and_upgrade/upgrade-operator-release/", + "title": "Upgrade an operator release", + "tags": [], + "description": "Upgrade the WebLogic Kubernetes Operator release to a newer version.", + "content": "To upgrade the WebLogic Kubernetes operator, use the helm upgrade command with new Helm chart and operator image. See the steps here to pull the operator image and set up the Oracle SOA Suite repository that contains the operator chart. To upgrade the operator run the following command:\n$ cd ${WORKDIR} $ helm upgrade \\ --reuse-values \\ --set image=oracle/weblogic-kubernetes-operator:3.3.0 \\ --namespace weblogic-operator-namespace \\ --wait \\ weblogic-kubernetes-operator \\ charts/weblogic-operator Note: When the WebLogic Kubernetes Operator is upgraded from release version 3.2.1 to 3.3.0 or later, it may be expected that the Administration Server pod in the domain gets restarted.\n Post upgrade steps From operator 3.1.1, the T3 channel Kubernetes service name extension is changed from -external to -ext. If the Administration Server was configured to expose a T3 channel in your domain, then follow these steps to recreate the Kubernetes service (for T3 channel) with the new name -ext.\n Note: If these steps are not performed, then the domain restart using spec.serverStartPolicy, would fail to bring up the servers.\n Get the existing Kubernetes service name for T3 channel from the domain namespace. For example, if the domainUID is soainfra, and the Administration Server name is adminserver, then the service would be:\nsoainfra-adminserver-external Delete the existing Kubernetes service for T3 channel, so that operator 3.1.1 creates a new one:\n$ kubectl delete service \u0026lt;T3 channel service\u0026gt; --namespace \u0026lt;domain-namespace\u0026gt; For example, if the domainUID is soainfra, the Administration Server name is adminserver and domain namespace is soans, then the command would be:\n$ kubectl delete service soainfra-adminserver-external --namespace soans Then the operator automatically creates a new Kubernetes service with -ext instead of -external:\nsoainfra-adminserver-ext " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oig/configure-design-console/using-the-design-console-with-nginx-ssl/", + "title": "b. Using Design Console with NGINX(SSL)", + "tags": [], + "description": "Configure Design Console with NGINX(SSL).", + "content": "Configure an NGINX ingress (SSL) to allow Design Console to connect to your Kubernetes cluster.\n Prerequisites\n Setup routing rules for the Design Console ingress\n Create the ingress\n Update the T3 channel\n Restart the OIG domain\n Design Console client\na. Using an on-premises installed Design Console\nb. Using a container image for Design Console\n Login to the Design Console\n Prerequisites If you haven\u0026rsquo;t already configured an NGINX ingress controller (SSL) for OIG, follow Using an Ingress with NGINX (SSL).\nMake sure you know the master hostname and ingress port for NGINX before proceeding e.g https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}. Also make sure you know the Kubernetes secret for SSL that was generated e.g governancedomain-tls-cert.\nSetup routing rules for the Design Console ingress Setup routing rules by running the following commands:\n$ cd $WORKDIR/kubernetes/design-console-ingress Edit values.yaml and ensure that tls: SSL is set. Change domainUID: and secretName: to match the values for your \u0026lt;domain_uid\u0026gt; and your SSL Kubernetes secret, for example:\n# Load balancer type. Supported values are: NGINX type: NGINX # Type of Configuration Supported Values are : NONSSL,SSL # tls: NONSSL tls: SSL # TLS secret name if the mode is SSL secretName: governancedomain-tls-cert # WLS domain as backend to the load balancer wlsDomain: domainUID: governancedomain oimClusterName: oim_cluster oimServerT3Port: 14002 Create the ingress Run the following command to create the ingress:\n$ cd $WORKDIR $ helm install governancedomain-nginx-designconsole kubernetes/design-console-ingress --namespace oigns --values kubernetes/design-console-ingress/values.yaml The output will look similar to the following:\nNAME: governancedomain-nginx-designconsole Mon Nov 15 04:19:33 2021 NAMESPACE: oigns STATUS: deployed REVISION: 1 TEST SUITE: None Run the following command to show the ingress is created successfully:\n$ kubectl describe ing governancedomain-nginx-designconsole -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl describe ing governancedomain-nginx-designconsole -n oigns The output will look similar to the following:\nName: governancedomain-nginx-designconsole Namespace: oigns Address: Default backend: default-http-backend:80 (\u0026lt;error: endpoints \u0026quot;default-http-backend\u0026quot; not found\u0026gt;) Rules: Host Path Backends ---- ---- -------- * governancedomain-cluster-oim-cluster:14002 (10.244.2.103:14002) Annotations: kubernetes.io/ingress.class: nginx meta.helm.sh/release-name: governancedomain-nginx-designconsole meta.helm.sh/release-namespace: oigns nginx.ingress.kubernetes.io/affinity: cookie nginx.ingress.kubernetes.io/configuration-snippet: more_set_input_headers \u0026quot;X-Forwarded-Proto: https\u0026quot;; more_set_input_headers \u0026quot;WL-Proxy-SSL: true\u0026quot;; nginx.ingress.kubernetes.io/enable-access-log: false nginx.ingress.kubernetes.io/ingress.allow-http: false nginx.ingress.kubernetes.io/proxy-buffer-size: 2000k Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Sync 6s nginx-ingress-controller Scheduled for sync Update the T3 channel Log in to the WebLogic Console using https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console.\n Navigate to Environment, click Servers, and then select oim_server1.\n Click Protocols, and then Channels.\n Click the default T3 channel called T3Channel.\n Click Lock and Edit.\n Set the External Listen Address to a worker node where oim_server1 is running.\nNote: Use kubectl get pods -n \u0026lt;domain_namespace\u0026gt; -o wide to see the worker node it is running on. For example, below the governancedomain-oim-server1 is running on worker-node2:\n$ kubectl get pods -n oigns -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES governancedomain-adminserver 1/1 Running 0 33m 10.244.2.96 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 11d 10.244.2.45 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; governancedomain-oim-server1 1/1 Running 0 31m 10.244.2.98 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; governancedomain-soa-server1 1/1 Running 0 31m 10.244.2.97 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; helper 1/1 Running 0 11d 10.244.2.30 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; logstash-wls-f448b44c8-92l27 1/1 Running 0 7d23h 10.244.1.27 worker-node1 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; Set the External Listen Port to the ingress controller port.\n Click Save.\n Click Activate Changes.\n Restart the OIG domain Restart the domain for the above changes to take effect by following Stopping and starting the administration server and managed servers.\nDesign Console Client It is possible to use Design Console from an on-premises install, or from a container image.\nUsing an on-premises installed Design Console The instructions below should be performed on the client where Design Console is installed.\n Import the CA certificate into the java keystore\nIf in Generate SSL Certificate you requested a certificate from a Certificate Authority (CA), then you must import the CA certificate (e.g cacert.crt) that signed your certificate, into the java truststore used by Design Console.\nIf in Generate SSL Certificate you generated a self-signed certicate (e.g tls.crt), you must import the self-signed certificate into the java truststore used by Design Console.\nImport the certificate using the following command:\n$ keytool -import -trustcacerts -alias dc -file \u0026lt;certificate\u0026gt; -keystore $JAVA_HOME/jre/lib/security/cacerts where \u0026lt;certificate\u0026gt; is the CA certificate, or self-signed certicate.\n Once complete follow Login to the Design Console.\n Using a container image for Design Console The Design Console can be run from a container using X windows emulation.\n On the parent machine where the Design Console is to be displayed, run xhost+.\n Execute the following command to start a container to run Design Console:\n$ docker run -u root --name oigdcbase -it \u0026lt;image\u0026gt; bash For example:\n$ docker run -u root -it --name oigdcbase oracle/oig:12.2.1.4.0-8-ol7-211022.0723 bash This will take you into a bash shell inside the container:\nbash-4.2# Inside the container set the proxy, for example:\nbash-4.2# export https_proxy=http://proxy.example.com:80 Install the relevant X windows packages in the container:\nbash-4.2# yum install libXext libXrender libXtst Execute the following outside the container to create a new Design Console image from the container:\n$ docker commit \u0026lt;container_name\u0026gt; \u0026lt;design_console_image_name\u0026gt; For example:\n$ docker commit oigdcbase oigdc Exit the container bash session:\nbash-4.2# exit Start a new container using the Design Console image:\n$ docker run --name oigdc -it oigdc /bin/bash This will take you into a bash shell for the container:\nbash-4.2# Copy the Ingress CA certificate into the container\nIf in Generate SSL Certificate you requested a certificate from a Certificate Authority (CA), then you must copy the CA certificate (e.g cacert.crt) that signed your certificate, into the container\nIf in Generate SSL Certificate you generated a self-signed certicate (e.g tls.crt), you must copy the self-signed certificate into the container\nRun the following command outside the container:\n$ cd \u0026lt;workdir\u0026gt;/ssl $ docker cp \u0026lt;certificate\u0026gt; \u0026lt;container_name\u0026gt;:/u01/jdk/jre/lib/security/\u0026lt;certificate\u0026gt; For example:\n$ cd /scratch/OIGK8S/ssl $ docker cp tls.crt oigdc:/u01/jdk/jre/lib/security/tls.crt Import the certificate using the following command:\nbash-4.2# /u01/jdk/bin/keytool -import -trustcacerts -alias dc -file /u01/jdk/jre/lib/security/\u0026lt;certificate\u0026gt; -keystore /u01/jdk/jre/lib/security/cacerts For example:\nbash-4.2# /u01/jdk/bin/keytool -import -trustcacerts -alias dc -file /u01/jdk/jre/lib/security/tls.crt -keystore /u01/jdk/jre/lib/security/cacerts In the container run the following to export the DISPLAY:\n$ export DISPLAY=\u0026lt;parent_machine_hostname:1\u0026gt; Start the Design Console from the container:\nbash-4.2# cd idm/designconsole bash-4.2# sh xlclient.sh The Design Console login should be displayed. Now follow Login to the Design Console.\n Login to the Design Console Launch the Design Console and in the Oracle Identity Manager Design Console login page enter the following details:\nEnter the following details and click Login:\n Server URL: \u0026lt;url\u0026gt; User ID: xelsysadm Password: \u0026lt;password\u0026gt;. where \u0026lt;url\u0026gt; is where \u0026lt;url\u0026gt; is https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}.\n If successful the Design Console will be displayed.\n " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oam/prerequisites/", + "title": "Prerequisites", + "tags": [], + "description": "System requirements and limitations for deploying and running an OAM domain home", + "content": "Introduction This document provides information about the system requirements and limitations for deploying and running OAM domains with the WebLogic Kubernetes Operator 3.3.0.\nSystem requirements for oam domains A running Kubernetes cluster with Helm and Docker installed. For the minimum version requirements refer to document ID 2723908.1 on My Oracle Support. You must have the cluster-admin role to install the operator. We do not currently support running OAM in non-Linux containers. A running Oracle Database 12.2.0.1 or later. The database must be a supported version for OAM as outlined in Oracle Fusion Middleware 12c certifications. It must meet the requirements as outlined in About Database Requirements for an Oracle Fusion Middleware Installation and in RCU Requirements for Oracle Databases. Limitations Compared to running a WebLogic Server domain in Kubernetes using the operator, the following limitations currently exist for OAM domains:\n In this release, OAM domains are supported using the “domain on a persistent volume” model only, where the domain home is located in a persistent volume (PV).The \u0026ldquo;domain in image\u0026rdquo; model is not supported. Only configured clusters are supported. Dynamic clusters are not supported for OAM domains. Note that you can still use all of the scaling features, you just need to define the maximum size of your cluster at domain creation time. The WebLogic Monitoring Exporter currently supports the WebLogic MBean trees only. Support for JRF MBeans has not been added yet. " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oig/prerequisites/", + "title": "Prerequisites", + "tags": [], + "description": "Sample for creating an OIG Suite domain home on an existing PV or PVC, and the domain resource YAML file for deploying the generated OIG domain.", + "content": "Introduction This document provides information about the system requirements and limitations for deploying and running OIG domains with the WebLogic Kubernetes Operator 3.3.0.\nSystem requirements for OIG domains A running Kubernetes cluster with Helm and Docker installed. For the minimum version requirements refer to document ID 2723908.1 on My Oracle Support. You must have the cluster-admin role to install the operator. We do not currently support running OIG in non-Linux containers. A running Oracle Database 12.2.0.1 or later. The database must be a supported version for OIG as outlined in Oracle Fusion Middleware 12c certifications. It must meet the requirements as outlined in About Database Requirements for an Oracle Fusion Middleware Installation and in RCU Requirements for Oracle Databases. Limitations Compared to running a WebLogic Server domain in Kubernetes using the operator, the following limitations currently exist for OIG domains:\n In this release, OIG domains are supported using the “domain on a persistent volume” model only, where the domain home is located in a persistent volume (PV). The \u0026ldquo;domain in image\u0026rdquo; model is not supported. Only configured clusters are supported. Dynamic clusters are not supported for OIG domains. Note that you can still use all of the scaling features, you just need to define the maximum size of your cluster at domain creation time. The WebLogic Monitoring Exporter currently supports the WebLogic MBean trees only. Support for JRF MBeans has not been added yet. " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oig/manage-oig-domains/wlst-admin-operations/", + "title": "WLST administration operations", + "tags": [], + "description": "Describes the steps for WLST administration using helper pod running in the same Kubernetes Cluster as OIG Domain.", + "content": "Invoke WLST and access Administration Server To use WLST to administer the OIG domain, use a helper pod in the same Kubernetes cluster as the OIG Domain.\n Run the following command to create a helper pod if one doesn\u0026rsquo;t already exist:\n$ kubectl run helper --image \u0026lt;image_name\u0026gt; -n \u0026lt;domain_namespace\u0026gt; -- sleep infinity For example:\n$ kubectl run helper --image 12.2.1.4.0-8-ol7-211022.0723 -n oigns -- sleep infinity The output will look similar to the following:\npod/helper created Run the following command to start a bash shell in the helper pod:\n$ kubectl exec -it helper -n \u0026lt;domain_namespace\u0026gt; -- /bin/bash For example:\n$ kubectl exec -it helper -n oigns -- /bin/bash This will take you into a bash shell in the running helper pod:\n[oracle@helper ~]$ Connect to WLST using the following commands:\n[oracle@helper ~]$ cd $ORACLE_HOME/oracle_common/common/bin [oracle@helper ~]$ ./wlst.sh The output will look similar to the following:\nInitializing WebLogic Scripting Tool (WLST) ... Jython scans all the jar files it can find at first startup. Depending on the system, this process may take a few minutes to complete, and WLST may not return a prompt right away. Welcome to WebLogic Server Administration Scripting Shell Type help() for help on available commands wls:/offline\u0026gt; To access t3 for the Administration Server connect as follows:\nwls:/offline\u0026gt; connect(\u0026#39;weblogic\u0026#39;,\u0026#39;\u0026lt;password\u0026gt;\u0026#39;,\u0026#39;t3://governancedomain-adminserver:7001\u0026#39;) The output will look similar to the following:\nConnecting to t3://governancedomain-adminserver:7001 with userid weblogic ... Successfully connected to Admin Server \u0026quot;AdminServer\u0026quot; that belongs to domain \u0026quot;governancedomain\u0026quot;. Warning: An insecure protocol was used to connect to the server. To ensure on-the-wire security, the SSL port or Admin port should be used instead. wls:/governancedomain/serverConfig/\u0026gt; Or to access t3 for the OIG Cluster service, connect as follows:\nwls:/offline\u0026gt; connect(\u0026#39;weblogic\u0026#39;,\u0026#39;\u0026lt;password\u0026gt;\u0026#39;,\u0026#39;t3://governancedomain-cluster-oim-cluster:14000\u0026#39;) The output will look similar to the following:\nConnecting to t3://governancedomain-cluster-oim-cluster:14000 with userid weblogic ... Successfully connected to managed Server \u0026quot;oim_server1\u0026quot; that belongs to domain \u0026quot;governancedomain\u0026quot;. Warning: An insecure protocol was used to connect to the server. To ensure on-the-wire security, the SSL port or Admin port should be used instead. wls:/governancedomain/serverConfig/\u0026gt; Sample operations For a full list of WLST operations refer to WebLogic Server WLST Online and Offline Command Reference.\nDisplay servers wls:/governancedomain/serverConfig/\u0026gt; cd('/Servers') wls:/governancedomain/serverConfig/Servers\u0026gt; ls () dr-- AdminServer dr-- oim_server1 dr-- oim_server2 dr-- oim_server3 dr-- oim_server4 dr-- oim_server5 dr-- soa_server1 dr-- soa_server2 dr-- soa_server3 dr-- soa_server4 dr-- soa_server5 wls:/governancedomain/serverConfig/Servers\u0026gt; Performing WLST administration via SSL By default the SSL port is not enabled for the Administration Server or OIG Managed Servers. To configure the SSL port for the Administration Server and Managed Servers login to WebLogic Administration console https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console and navigate to Lock \u0026amp; Edit -\u0026gt; Environment -\u0026gt;Servers -\u0026gt; server_name -\u0026gt;Configuration -\u0026gt; General -\u0026gt; SSL Listen Port Enabled -\u0026gt; Provide SSL Port ( For Administration Server: 7002 and for OIG Managed Server (oim_server1): 14101) - \u0026gt; Save -\u0026gt; Activate Changes.\nNote: If configuring the OIG Managed Servers for SSL you must enable SSL on the same port for all servers (oim_server1 through oim_server4)\n Create a myscripts directory as follows:\n$ cd $WORKDIR/kubernetes $ mkdir myscripts $ cd myscripts Create a sample yaml template file in the myscripts directory called \u0026lt;domain_uid\u0026gt;-adminserver-ssl.yaml to create a Kubernetes service for the Administration Server:\nNote: Update the domainName, domainUID and namespace based on your environment.\napiVersion: v1 kind: Service metadata: labels: serviceType: SERVER weblogic.domainName: governancedomain weblogic.domainUID: governancedomain weblogic.resourceVersion: domain-v2 weblogic.serverName: AdminServer name: governancedomain-adminserver-ssl namespace: oigns spec: clusterIP: None ports: - name: default port: 7002 protocol: TCP targetPort: 7002 selector: weblogic.createdByOperator: \u0026quot;true\u0026quot; weblogic.domainUID: governancedomain weblogic.serverName: AdminServer type: ClusterIP and create the following sample yaml template file \u0026lt;domain_uid\u0026gt;-oim-cluster-ssl.yaml for the OIG Managed Server:\napiVersion: v1 kind: Service metadata: labels: serviceType: SERVER weblogic.domainName: governancedomain weblogic.domainUID: governancedomain weblogic.resourceVersion: domain-v2 name: governancedomain-cluster-oim-cluster-ssl namespace: oigns spec: clusterIP: None ports: - name: default port: 14101 protocol: TCP targetPort: 14101 selector: weblogic.clusterName: oim_cluster weblogic.createdByOperator: \u0026quot;true\u0026quot; weblogic.domainUID: governancedomain type: ClusterIP Apply the template using the following command for the Administration Server:\n$ kubectl apply -f governancedomain-adminserver-ssl.yaml service/governancedomain-adminserver-ssl created or using the following command for the OIG Managed Server:\n$ kubectl apply -f governancedomain-oim-cluster-ssl.yaml service/governancedomain-cluster-oim-cluster-ssl created Validate that the Kubernetes Services to access SSL ports are created successfully:\n$ kubectl get svc -n \u0026lt;domain_namespace\u0026gt; |grep ssl For example:\n$ kubectl get svc -n oigns |grep ssl The output will look similar to the following:\ngovernancedomain-adminserver-ssl ClusterIP None \u0026lt;none\u0026gt; 7002/TCP 74s governancedomain-cluster-oim-cluster-ssl ClusterIP None \u0026lt;none\u0026gt; 14101/TCP 21s Connect to a bash shell of the helper pod:\n$ kubectl exec -it helper -n oigns -- /bin/bash In the bash shell run the following:\n[oracle@governancedomain-adminserver oracle]$ export WLST_PROPERTIES=\u0026#34;-Dweblogic.security.SSL.ignoreHostnameVerification=true -Dweblogic.security.TrustKeyStore=DemoTrust\u0026#34; [oracle@governancedomain-adminserver oracle]$ cd /u01/oracle/oracle_common/common/bin [oracle@governancedomain-adminserver oracle]$ ./wlst.sh Initializing WebLogic Scripting Tool (WLST) ... Welcome to WebLogic Server Administration Scripting Shell Type help() for help on available commands wls:/offline\u0026gt; Connect to the Administration Server t3s service:\nwls:/offline\u0026gt; connect(\u0026#39;weblogic\u0026#39;,\u0026#39;\u0026lt;password\u0026gt;\u0026#39;,\u0026#39;t3s://governancedomain-adminserver-ssl:7002\u0026#39;) Connecting to t3s://governancedomain-adminserver-ssl:7002 with userid weblogic ... \u0026lt;Nov 15, 2021 4:51:43 PM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;Security\u0026gt; \u0026lt;BEA-090905\u0026gt; \u0026lt;Disabling the CryptoJ JCE Provider self-integrity check for better startup performance. To enable this check, specify -Dweblogic.security.allowCryptoJDefaultJCEVerification=true.\u0026gt; \u0026lt;Nov 15, 2021 4:51:43 PM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;Security\u0026gt; \u0026lt;BEA-090906\u0026gt; \u0026lt;Changing the default Random Number Generator in RSA CryptoJ from ECDRBG128 to HMACDRBG. To disable this change, specify -Dweblogic.security.allowCryptoJDefaultPRNG=true.\u0026gt; \u0026lt;Nov 15, 2021 4:51:43 PM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;Security\u0026gt; \u0026lt;BEA-090909\u0026gt; \u0026lt;Using the configured custom SSL Hostname Verifier implementation: weblogic.security.utils.SSLWLSHostnameVerifier$NullHostnameVerifier.\u0026gt; Successfully connected to Admin Server \u0026#34;AdminServer\u0026#34; that belongs to domain \u0026#34;governancedomain\u0026#34;. wls:/governancedomain/serverConfig/\u0026gt; To connect to the OIG Managed Server t3s service:\nwls:/offline\u0026gt; connect(\u0026#39;weblogic\u0026#39;,\u0026#39;\u0026lt;password\u0026gt;\u0026#39;,\u0026#39;t3s://governancedomain-cluster-oim-cluster-ssl:14101\u0026#39;) Connecting to t3s://governancedomain-cluster-oim-cluster-ssl:14101 with userid weblogic ... \u0026lt;Nov 15, 2021 4:53:06 PM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;Security\u0026gt; \u0026lt;BEA-090905\u0026gt; \u0026lt;Disabling the CryptoJ JCE Provider self-integrity check for better startup performance. To enable this check, specify -Dweblogic.security.allowCryptoJDefaultJCEVerification=true.\u0026gt; \u0026lt;Nov 15, 2021 4:53:06 PM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;Security\u0026gt; \u0026lt;BEA-090906\u0026gt; \u0026lt;Changing the default Random Number Generator in RSA CryptoJ from ECDRBG128 to HMACDRBG. To disable this change, specify -Dweblogic.security.allowCryptoJDefaultPRNG=true.\u0026gt; \u0026lt;Nov 15, 2021 4:53:06 PM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;Security\u0026gt; \u0026lt;BEA-090909\u0026gt; \u0026lt;Using the configured custom SSL Hostname Verifier implementation: weblogic.security.utils.SSLWLSHostnameVerifier$NullHostnameVerifier.\u0026gt; Successfully connected to managed Server \u0026#34;oim_server1\u0026#34; that belongs to domain \u0026#34;governancedomain\u0026#34;. wls:/governancedomain/serverConfig/\u0026gt; " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/soa-domains/adminguide/deploying-composites/deploy-artifacts/", + "title": "Deploy using composites in a persistent volume or image", + "tags": [], + "description": "Deploy Oracle SOA Suite and Oracle Service Bus composite applications artifacts in a persistent volume or in an image.", + "content": "Learn how to deploy Oracle SOA Suite and Oracle Service Bus composite applications artifacts in a Kubernetes persistent volume or in an image to an Oracle SOA Suite environment deployed using a WebLogic Kubernetes Operator.\nThe deployment methods described in Deploy using JDeveloper and Deploy using Maven and Ant are manual processes. If you have the deployment artifacts (archives) already built, then you can package them either into a Kubernetes persistent volume or in an image and use this automated process to deploy the artifacts to an Oracle SOA Suite domain.\nPrepare to use the deploy artifacts script The sample scripts for deploying artifacts are available at ${WORKDIR}/create-soa-domain/domain-home-on-pv/\nYou must edit deploy-artifacts-inputs.yaml (or a copy of it) to provide the details of your domain and artifacts. Refer to the configuration parameters below to understand the information that you must provide in this file.\nConfiguration parameters The following parameters can be provided in the inputs file.\n Parameter Definition Default adminPort Port number of the Administration Server inside the Kubernetes cluster. 7001 adminServerName Name of the Administration Server. AdminServer domainUID Unique ID that is used to identify the domain. This ID cannot contain any characters that are not valid in a Kubernetes service name. soainfra domainType Type of the domain. Mandatory input for Oracle SOA Suite domains. You must provide one of the supported domain type values: soa (deploys artifacts into an Oracle SOA Suite domain), osb (deploys artifacts into an Oracle Service Bus domain), or soaosb (deploys artifacts into both Oracle SOA Suite and Oracle Service Bus domains). soa soaClusterName Name of the SOA WebLogic Server cluster instance in the domain. By default, the cluster name is soa_cluster. This configuration parameter is applicable only for soa and soaosb domain types. soa_cluster image SOA Suite Docker image. The artifacts deployment process requires Oracle SOA Suite 12.2.1.4. Refer to Obtain the Oracle SOA Suite Docker image for details on how to obtain or create the image. soasuite:12.2.1.4 imagePullPolicy Oracle SOA Suite Docker image pull policy. Valid values are IfNotPresent, Always, Never. IfNotPresent imagePullSecretName Name of the Kubernetes secret to access the Docker Store to pull the Oracle SOA Suite Docker image. The presence of the secret will be validated when this parameter is specified. weblogicCredentialsSecretName Name of the Kubernetes secret for the Administration Server\u0026rsquo;s user name and password. If not specified, then the value is derived from the domainUID as \u0026lt;domainUID\u0026gt;-weblogic-credentials. soainfra-domain-credentials namespace Kubernetes namespace in which the domain was created. soans artifactsSourceType The deploy artifacts source type. Set to PersistentVolume for deploy artifacts available in a persistent volume and Image for deploy artifacts available as an image. Image persistentVolumeClaimName Name of the persistent volume claim created that hosts the deployment artifacts. If not specified, the value is derived from the domainUID as \u0026lt;domainUID\u0026gt;-deploy-artifacts-pvc. soainfra-deploy-artifacts-pvc artifactsImage Deploy artifacts image. Required if artifactsSourceType is Image. artifacts:12.2.1.4 artifactsImagePullPolicy Deploy artifacts image pull policy. Valid values are IfNotPresent, Always, Never. IfNotPresent artifactsImagePullSecretName Name of the Kubernetes secret to access the deploy artifacts image. The presence of the secret will be validated when this parameter is specified. deployScriptFilesDir Directory on the host machine to locate the required files to deploy artifacts to the Oracle SOA Suite domain, including the script that is specified in the deployScriptName parameter. By default, this directory is set to the relative path deploy. deploy deployScriptsMountPath Mount path where the deploy artifacts scripts are located inside a pod. The deploy-artifacts.sh script creates a Kubernetes job to run the script (specified by the deployScriptName parameter) in a Kubernetes pod to deploy the artifacts. Files in the deployScriptFilesDir directory are mounted to this location in the pod, so that the Kubernetes pod can use the scripts and supporting files to deploy artifacts. /u01/weblogic deployScriptName Script that the deploy artifacts script uses to deploy artifacts to the Oracle SOA Suite domain. For Oracle SOA Suite, the script placed in the soa directory is used. For Oracle Service Bus, the script placed in the osb directory is used. The deploy-artifacts.sh script creates a Kubernetes job to run this script to deploy artifacts. The script is located in the in-pod directory that is specified by the deployScriptsMountPath parameter. deploy.sh soaArtifactsArchivePath Directory inside container where Oracle SOA Suite archives are placed. /u01/sarchives osbArtifactsArchivePath Directory inside container where Oracle Service Bus archives are placed. /u01/sbarchives The sample demonstrates how to deploy Oracle SOA Suite composites or Oracle Service Bus applications to an Oracle SOA Suite domain home.\nRun the deploy artifacts script Run the deploy artifacts script, specifying your inputs file and an output directory to store the generated artifacts:\n$ ./deploy-artifacts.sh \\ -i deploy-artifacts-inputs.yaml \\ -o \u0026lt;path to output-directory\u0026gt; The script performs the following steps:\n Creates a directory for the generated Kubernetes YAML files for the artifacts deployment process if it does not already exist. The path name is \u0026lt;path to output-directory\u0026gt;/weblogic-domains/\u0026lt;domainUID\u0026gt;/\u0026lt;YYYYMMDD-hhmmss\u0026gt;. If the directory already exists, its contents must be removed before running this script. Creates a Kubernetes job that starts a utility Oracle SOA Suite container and run scripts to deploy artifacts provided either in an image or in a persistent volume. Deploy artifacts from an image Create an image with artifacts\na. A sample Dockerfile to create the artifacts in an image is available at $WORKDIR/create-soa-domain/domain-home-on-pv/deploy-docker-file. This expects the Oracle SOA Suite related archives to be available in the soa directory and Oracle Service Bus archives to be available in the osb directory.\nb. Create the soa directory and copy the Oracle SOA Suite archives to be deployed to the directory:\n$ cd $WORKDIR/create-soa-domain/domain-home-on-pv/deploy-docker-file $ mkdir soa $ cp /path/sca_sampleBPEL.jar soa c. Create the osb directory and copy the Oracle Service Bus archives to be deployed to the directory:\n$ cd $WORKDIR/create-soa-domain/domain-home-on-pv/deploy-docker-file $ mkdir osb $ cp /path/simple_sbconfig.jar osb d. Create the image using build.sh. This script creates the image with default tag 12.2.1.4 (artifacts:12.2.1.4):\n$ cd $WORKDIR/create-soa-domain/domain-home-on-pv/deploy-docker-file $ ./build.sh -h Usage: build.sh -t [tag] Builds a Docker Image with Oracle SOA/OSB artifacts Parameters: -h: view usage -t: tag for image, default is 12.2.1.4 Click here to see sample output of script with tag 12.2.1.4-v1 ``` $ ./build.sh -t 12.2.1.4-v1 Sending build context to Docker daemon 36.35kB Step 1/13 : FROM busybox ---\u0026gt; 16ea53ea7c65 Step 2/13 : ARG SOA_ARTIFACTS_ARCHIVE_PATH=/u01/sarchives ---\u0026gt; Using cache ---\u0026gt; 411edf07f267 Step 3/13 : ARG OSB_ARTIFACTS_ARCHIVE_PATH=/u01/sbarchives ---\u0026gt; Using cache ---\u0026gt; c4214b9cf0ae Step 4/13 : ARG USER=oracle ---\u0026gt; Using cache ---\u0026gt; c8ebcd5ee546 Step 5/13 : ARG USERID=1000 ---\u0026gt; Using cache ---\u0026gt; 5780beb0c3cf Step 6/13 : ARG GROUP=root ---\u0026gt; Using cache ---\u0026gt; 048e67c71f92 Step 7/13 : ENV SOA_ARTIFACTS_ARCHIVE_PATH=${SOA_ARTIFACTS_ARCHIVE_PATH} ---\u0026gt; Using cache ---\u0026gt; 31ae33cfd9bb Step 8/13 : ENV OSB_ARTIFACTS_ARCHIVE_PATH=${OSB_ARTIFACTS_ARCHIVE_PATH} ---\u0026gt; Using cache ---\u0026gt; 79602bf64dc0 Step 9/13 : RUN adduser -D -u ${USERID} -G $GROUP $USER ---\u0026gt; Using cache ---\u0026gt; 07c12cea52f9 Step 10/13 : COPY soa/ ${SOA_ARTIFACTS_ARCHIVE_PATH}/ ---\u0026gt; bfeb138516d8 Step 11/13 : COPY osb/ ${OSB_ARTIFACTS_ARCHIVE_PATH}/ ---\u0026gt; 0359a11f8f76 Step 12/13 : RUN chown -R $USER:$GROUP ${SOA_ARTIFACTS_ARCHIVE_PATH}/ ${OSB_ARTIFACTS_ARCHIVE_PATH}/ ---\u0026gt; Running in 285fb2bd8434 Removing intermediate container 285fb2bd8434 ---\u0026gt; 2e8d8c337de0 Step 13/13 : USER $USER ---\u0026gt; Running in c9db494e46ab Removing intermediate container c9db494e46ab ---\u0026gt; 40295aa15317 Successfully built 40295aa15317 Successfully tagged artifacts:12.2.1.4-v1 INFO: Artifacts image for Oracle SOA suite is ready to be extended. --\u0026gt; artifacts:12.2.1.4-v1 INFO: Build completed in 4 seconds. ``` Update the image details in deploy-artifacts-inputs.yaml for parameter artifactsImage and invoke deploy-artifacts.sh to perform deployment of artifacts.\n Click here to see sample output of deployment for domainType of soaosb $ ./deploy-artifacts.sh -i deploy-artifacts-inputs.yaml -o out-deploy Input parameters being used export version=\u0026quot;deploy-artifacts-inputs-v1\u0026quot; export adminPort=\u0026quot;7001\u0026quot; export adminServerName=\u0026quot;AdminServer\u0026quot; export domainUID=\u0026quot;soainfra\u0026quot; export domainType=\u0026quot;soaosb\u0026quot; export soaClusterName=\u0026quot;soa_cluster\u0026quot; export soaManagedServerPort=\u0026quot;8001\u0026quot; export image=\u0026quot;soasuite:12.2.1.4\u0026quot; export imagePullPolicy=\u0026quot;IfNotPresent\u0026quot; export weblogicCredentialsSecretName=\u0026quot;soainfra-domain-credentials\u0026quot; export namespace=\u0026quot;soans\u0026quot; export artifactsSourceType=\u0026quot;Image\u0026quot; export artifactsImage=\u0026quot;artifacts:12.2.1.4-v1\u0026quot; export artifactsImagePullPolicy=\u0026quot;IfNotPresent\u0026quot; export deployScriptsMountPath=\u0026quot;/u01/weblogic\u0026quot; export deployScriptName=\u0026quot;deploy.sh\u0026quot; export deployScriptFilesDir=\u0026quot;deploy\u0026quot; export soaArtifactsArchivePath=\u0026quot;/u01/sarchives\u0026quot; export osbArtifactsArchivePath=\u0026quot;/u01/sbarchives\u0026quot; Generating out-deploy/deploy-artifacts/soainfra/20211022-152335/deploy-artifacts-job.yaml Checking to see if the secret soainfra-domain-credentials exists in namespace soans configmap/soainfra-deploy-scripts-soa-job-cm created Checking the configmap soainfra-deploy-scripts-soa-job-cm was created configmap/soainfra-deploy-scripts-soa-job-cm labeled configmap/soainfra-deploy-scripts-osb-job-cm created Checking the configmap soainfra-deploy-scripts-osb-job-cm was created configmap/soainfra-deploy-scripts-osb-job-cm labeled Checking if object type job with name soainfra-deploy-artifacts-job-20211022-152335 exists Deploying artifacts by creating the job out-deploy/deploy-artifacts/soainfra/20211022-152335/deploy-artifacts-job.yaml job.batch/soainfra-deploy-artifacts-job-20211022-152335 created Waiting for the job to complete... status on iteration 1 of 20 for soainfra pod soainfra-deploy-artifacts-job-20211022-152335-r7ffj status is NotReady status on iteration 2 of 20 for soainfra pod soainfra-deploy-artifacts-job-20211022-152335-r7ffj status is Completed configmap \u0026quot;soainfra-deploy-scripts-soa-job-cm\u0026quot; deleted configmap \u0026quot;soainfra-deploy-scripts-osb-job-cm\u0026quot; deleted The following files were generated: out-deploy/deploy-artifacts/soainfra/20211022-152335/deploy-artifacts-inputs.yaml out-deploy/deploy-artifacts/soainfra/20211022-152335/deploy-artifacts-job.yaml Completed $ kubectl get all -n soans|grep deploy pod/soainfra-deploy-artifacts-job-20211022-152335-r7ffj 0/2 Completed 0 15m job.batch/soainfra-deploy-artifacts-job-20211022-152335 1/1 43s 15m $ Note: When you are running the script for domainType soaosb, a deployment pod is created with two containers, one for Oracle SOA Suite artifacts deployments and another for Oracle Service Bus artifacts deployments. When the deployment completes for one container while other container is still running, the pod status will move from Ready to NotReady. Once both the deployments complete successfully, the status of the pod moves to Completed.\n Deploy artifacts from a persistent volume Copy the artifacts for Oracle SOA Suite to the soa directory and Oracle Service Bus to the osb directory at the share location. For example, with location /share, artifacts for Oracle SOA Suite are in /share/soa and Oracle Service Bus are in /share/osb.\n$ ls /share/soa sca_sampleBPEL.jar $ $ ls /share/osb/ simple_sbconfig.jar $ Create a PersistentVolume with the sample provided (artifacts-pv.yaml):\napiVersion: v1 kind: PersistentVolume metadata: name: soainfra-deploy-artifacts-pv spec: storageClassName: deploy-storage-class capacity: storage: 10Gi accessModes: - ReadOnlyMany persistentVolumeReclaimPolicy: Retain hostPath: path: \u0026quot;/share\u0026quot; $ kubectl apply -f artifacts-pv.yaml Create a PersistentVolumeClaim with the sample provided (artifacts-pvc.yaml):\napiVersion: v1 kind: PersistentVolumeClaim metadata: name: soainfra-deploy-artifacts-pvc namespace: soans spec: storageClassName: deploy-storage-class accessModes: - ReadOnlyMany resources: requests: storage: 10Gi $ kubectl apply -f artifacts-pvc.yaml Update the artifactsSourceType to PersistentVolume and provide the name for persistentVolumeClaimName in deploy-artifacts-inputs.yaml.\n Invoke deploy-artifacts.sh to deploy artifacts for artifacts present in persistentVolumeClaimName.\n Click here to see sample output of deployment for domainType of soaosb $ ./deploy-artifacts.sh -i deploy-artifacts-inputs.yaml -o out-deploy Input parameters being used export version=\u0026quot;deploy-artifacts-inputs-v1\u0026quot; export adminPort=\u0026quot;7001\u0026quot; export adminServerName=\u0026quot;AdminServer\u0026quot; export domainUID=\u0026quot;soainfra\u0026quot; export domainType=\u0026quot;soaosb\u0026quot; export soaClusterName=\u0026quot;soa_cluster\u0026quot; export soaManagedServerPort=\u0026quot;8001\u0026quot; export image=\u0026quot;soasuite:12.2.1.4\u0026quot; export imagePullPolicy=\u0026quot;IfNotPresent\u0026quot; export weblogicCredentialsSecretName=\u0026quot;soainfra-domain-credentials\u0026quot; export namespace=\u0026quot;soans\u0026quot; export artifactsSourceType=\u0026quot;PersistentVolume\u0026quot; export persistentVolumeClaimName=\u0026quot;soainfra-deploy-artifacts-pvc\u0026quot; export deployScriptsMountPath=\u0026quot;/u01/weblogic\u0026quot; export deployScriptName=\u0026quot;deploy.sh\u0026quot; export deployScriptFilesDir=\u0026quot;deploy\u0026quot; export soaArtifactsArchivePath=\u0026quot;/u01/sarchives\u0026quot; export osbArtifactsArchivePath=\u0026quot;/u01/sbarchives\u0026quot; Generating out-deploy/deploy-artifacts/soainfra/20211022-164735/deploy-artifacts-job.yaml Checking to see if the secret soainfra-domain-credentials exists in namespace soans configmap/soainfra-deploy-scripts-soa-job-cm created Checking the configmap soainfra-deploy-scripts-soa-job-cm was created configmap/soainfra-deploy-scripts-soa-job-cm labeled configmap/soainfra-deploy-scripts-osb-job-cm created Checking the configmap soainfra-deploy-scripts-osb-job-cm was created configmap/soainfra-deploy-scripts-osb-job-cm labeled Checking if object type job with name soainfra-deploy-artifacts-job-20211022-164735 exists Deploying artifacts by creating the job out-deploy/deploy-artifacts/soainfra/20211022-164735/deploy-artifacts-job.yaml job.batch/soainfra-deploy-artifacts-job-20211022-164735 created Waiting for the job to complete... status on iteration 1 of 20 for soainfra pod soainfra-deploy-artifacts-job-20211022-164735-66fvn status is NotReady status on iteration 2 of 20 for soainfra pod soainfra-deploy-artifacts-job-20211022-164735-66fvn status is Completed configmap \u0026quot;soainfra-deploy-scripts-soa-job-cm\u0026quot; deleted configmap \u0026quot;soainfra-deploy-scripts-osb-job-cm\u0026quot; deleted The following files were generated: out-deploy/deploy-artifacts/soainfra/20211022-164735/deploy-artifacts-inputs.yaml out-deploy/deploy-artifacts/soainfra/20211022-164735/deploy-artifacts-job.yaml Completed $ kubectl get all -n soans |grep deploy pod/soainfra-deploy-artifacts-job-20211022-164735-66fvn 0/2 Completed 0 3m1s job.batch/soainfra-deploy-artifacts-job-20211022-164735 1/1 37s 3m1s $ Note: When you are running the script for domainType of soaosb, a deployment pod is created with two containers, one for Oracle SOA Suite artifacts deployments and one for Oracle Service Bus artifacts deployments. When the deployment completes for one container while other container is still running, the pod status moves from Ready to NotReady. Once both the deployments successfully complete, the status of the pod moves to Completed.\n Verify the deployment logs To confirm the deployment of artifacts was successful, verify the output using the kubectl logs command:\n Note: Replace \u0026lt;YYYYMMDD-hhmmss\u0026gt;, \u0026lt;domainUID\u0026gt; and \u0026lt;namespace\u0026gt; with values for your environment.\n For Oracle SOA Suite artifacts:\n$ kubectl logs job.batch/\u0026lt;domainUID\u0026gt;-deploy-artifacts-job-\u0026lt;YYYYMMDD-hhmmss\u0026gt; -n \u0026lt;namespace\u0026gt; soa-deploy-artifacts-job For Oracle Service Bus artifacts:\n$ kubectl logs job.batch/\u0026lt;domainUID\u0026gt;-deploy-artifacts-job-\u0026lt;YYYYMMDD-hhmmss\u0026gt; -n \u0026lt;namespace\u0026gt; osb-deploy-artifacts-job " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/soa-domains/installguide/create-soa-domains/", + "title": "Create Oracle SOA Suite domains", + "tags": [], + "description": "Create an Oracle SOA Suite domain home on an existing PV or PVC, and create the domain resource YAML file for deploying the generated Oracle SOA Suite domain.", + "content": "The SOA deployment scripts demonstrate the creation of an Oracle SOA Suite domain home on an existing Kubernetes persistent volume (PV) and persistent volume claim (PVC). The scripts also generate the domain YAML file, which can then be used to start the Kubernetes artifacts of the corresponding domain.\nPrerequisites Before you begin, complete the following steps:\n Review the Domain resource documentation. Review the requirements and limitations. Ensure that you have executed all the preliminary steps in Prepare your environment. Ensure that the database and the WebLogic Kubernetes Operator are running. Prepare to use the create domain script The sample scripts for Oracle SOA Suite domain deployment are available at ${WORKDIR}/create-soa-domain.\nYou must edit create-domain-inputs.yaml (or a copy of it) to provide the details for your domain. Refer to the configuration parameters below to understand the information that you must provide in this file.\nConfiguration parameters The following parameters can be provided in the inputs file.\n Parameter Definition Default sslEnabled Boolean value indicating whether to enable SSL for each WebLogic Server instance. false adminPort Port number for the Administration Server inside the Kubernetes cluster. 7001 adminServerSSLPort SSL port number of the Administration Server inside the Kubernetes cluster. 7002 adminNodePort Port number of the Administration Server outside the Kubernetes cluster. 30701 adminServerName Name of the Administration Server. AdminServer configuredManagedServerCount Number of Managed Server instances to generate for the domain. 5 soaClusterName Name of the SOA WebLogic Server cluster instance to generate for the domain. By default, the cluster name is soa_cluster. This configuration parameter is applicable only for soa and soaosb domain types. soa_cluster osbClusterName Name of the Oracle Service Bus WebLogic Server cluster instance to generate for the domain. By default, the cluster name is osb_cluster. This configuration parameter is applicable only for osb and soaosb domain types. osb_cluster createDomainFilesDir Directory on the host machine to locate all the files to create a WebLogic Server domain, including the script that is specified in the createDomainScriptName parameter. By default, this directory is set to the relative path wlst, and the create script will use the built-in WLST offline scripts in the wlst directory to create the WebLogic Server domain. An absolute path is also supported to point to an arbitrary directory in the file system. The built-in scripts can be replaced by the user-provided scripts as long as those files are in the specified directory. Files in this directory are put into a Kubernetes config map, which in turn is mounted to the createDomainScriptsMountPath, so that the Kubernetes pod can use the scripts and supporting files to create a domain home. wlst createDomainScriptsMountPath Mount path where the create domain scripts are located inside a pod. The create-domain.sh script creates a Kubernetes job to run the script (specified by the createDomainScriptName parameter) in a Kubernetes pod to create a domain home. Files in the createDomainFilesDir directory are mounted to this location in the pod, so that the Kubernetes pod can use the scripts and supporting files to create a domain home. /u01/weblogic createDomainScriptName Script that the create domain script uses to create a WebLogic Server domain. The create-domain.sh script creates a Kubernetes job to run this script to create a domain home. The script is located in the in-pod directory that is specified by the createDomainScriptsMountPath parameter. If you need to provide your own scripts to create the domain home, instead of using the built-in scripts, you must use this property to set the name of the script that you want the create domain job to run. create-domain-job.sh domainHome Home directory of the SOA domain. If not specified, the value is derived from the domainUID as /shared/domains/\u0026lt;domainUID\u0026gt;. /u01/oracle/user_projects/domains/soainfra domainPVMountPath Mount path of the domain persistent volume. /u01/oracle/user_projects domainUID Unique ID that will be used to identify this particular domain. Used as the name of the generated WebLogic Server domain as well as the name of the Kubernetes domain resource. This ID must be unique across all domains in a Kubernetes cluster. This ID cannot contain any character that is not valid in a Kubernetes service name. soainfra domainType Type of the domain. Mandatory input for Oracle SOA Suite domains. You must provide one of the supported domain type values: soa (deploys a SOA domain with Enterprise Scheduler (ESS)), osb (deploys an Oracle Service Bus domain), and soaosb (deploys a domain with SOA, Oracle Service Bus, and Enterprise Scheduler (ESS)). soa exposeAdminNodePort Boolean value indicating if the Administration Server is exposed outside of the Kubernetes cluster. false exposeAdminT3Channel Boolean value indicating if the T3 administrative channel is exposed outside the Kubernetes cluster. false httpAccessLogInLogHome Boolean value indicating if server HTTP access log files should be written to the same directory as logHome. If false, server HTTP access log files will be written to the directory specified in the WebLogic Server domain home configuration. true image SOA Suite Docker image. The operator requires Oracle SOA Suite 12.2.1.4. Refer to Obtain the Oracle SOA Suite Docker image for details on how to obtain or create the image. soasuite:12.2.1.4 imagePullPolicy Oracle SOA Suite Docker image pull policy. Valid values are IfNotPresent, Always, Never. IfNotPresent imagePullSecretName Name of the Kubernetes secret to access the Docker Store to pull the WebLogic Server Docker image. The presence of the secret will be validated when this parameter is specified. includeServerOutInPodLog Boolean value indicating whether to include the server .out to the pod\u0026rsquo;s stdout. true initialManagedServerReplicas Number of Managed Servers to initially start for the domain. 2 javaOptions Java options for starting the Administration Server and Managed Servers. A Java option can have references to one or more of the following predefined variables to obtain WebLogic Server domain information: $(DOMAIN_NAME), $(DOMAIN_HOME), $(ADMIN_NAME), $(ADMIN_PORT), and $(SERVER_NAME). If sslEnabled is set to true and the WebLogic Server demo certificate is used, add -Dweblogic.security.SSL.ignoreHostnameVerification=true to allow the Managed Servers to connect to the Administration Server while booting up. The WebLogic Server generated demo certificate in this environment typically contains a host name that is different from the runtime container\u0026rsquo;s host name. -Dweblogic.StdoutDebugEnabled=false logHome The in-pod location for the domain log, server logs, server out, and Node Manager log files. If not specified, the value is derived from the domainUID as /shared/logs/\u0026lt;domainUID\u0026gt;. /u01/oracle/user_projects/domains/logs/soainfra soaManagedServerNameBase Base string used to generate Managed Server names in the SOA cluster. The default value is soa_server. This configuration parameter is applicable only for soa and soaosb domain types. soa_server osbManagedServerNameBase Base string used to generate Managed Server names in the Oracle Service Bus cluster. The default value is osb_server. This configuration parameter is applicable only for osb and soaosb domain types. osb_server soaManagedServerPort Port number for each Managed Server in the SOA cluster. This configuration parameter is applicable only for soa and soaosb domain types. 8001 osbManagedServerPort Port number for each Managed Server in the Oracle Service Bus cluster. This configuration parameter is applicable only for osb and soaosb domain types. 9001 soaManagedServerSSLPort SSL port number for each Managed Server in the SOA cluster. This configuration parameter is applicable only for soa and soaosb domain types. 8002 osbManagedServerSSLPort SSL port number for each Managed Server in the Oracle Service Bus cluster. This configuration parameter is applicable only for osb and soaosb domain types. 9002 namespace Kubernetes namespace in which to create the domain. soans persistentVolumeClaimName Name of the persistent volume claim created to host the domain home. If not specified, the value is derived from the domainUID as \u0026lt;domainUID\u0026gt;-weblogic-sample-pvc. soainfra-domain-pvc productionModeEnabled Boolean value indicating if production mode is enabled for the domain. true serverStartPolicy Determines which WebLogic Server instances will be started. Valid values are NEVER, IF_NEEDED, ADMIN_ONLY. IF_NEEDED t3ChannelPort Port for the T3 channel of the NetworkAccessPoint. 30012 t3PublicAddress Public address for the T3 channel. This should be set to the public address of the Kubernetes cluster. This would typically be a load balancer address. For development environments only: In a single server (all-in-one) Kubernetes deployment, this may be set to the address of the master, or at the very least, it must be set to the address of one of the worker nodes. If not provided, the script will attempt to set it to the IP address of the Kubernetes cluster. weblogicCredentialsSecretName Name of the Kubernetes secret for the Administration Server\u0026rsquo;s user name and password. If not specified, then the value is derived from the domainUID as \u0026lt;domainUID\u0026gt;-weblogic-credentials. soainfra-domain-credentials weblogicImagePullSecretName Name of the Kubernetes secret for the Docker Store, used to pull the WebLogic Server image. serverPodCpuRequest, serverPodMemoryRequest, serverPodCpuCLimit, serverPodMemoryLimit The maximum amount of compute resources allowed, and minimum amount of compute resources required, for each server pod. Refer to the Kubernetes documentation on Managing Compute Resources for Containers for details. Resource requests and resource limits are not specified. rcuSchemaPrefix The schema prefix to use in the database. For example SOA1. You may wish to make this the same as the domainUID in order to simplify matching domains to their RCU schemas. SOA1 rcuDatabaseURL The database URL. oracle-db.default.svc.cluster.local:1521/devpdb.k8s rcuCredentialsSecret The Kubernetes secret containing the database credentials. soainfra-rcu-credentials persistentStore The persistent store for \u0026lsquo;JMS servers\u0026rsquo; and \u0026lsquo;Transaction log store\u0026rsquo; in the domain. Valid values are jdbc, file. jdbc Note that the names of the Kubernetes resources in the generated YAML files may be formed with the value of some of the properties specified in the create-domain-inputs.yaml file. Those properties include the adminServerName, soaClusterName, and soaManagedServerNameBase etc. If those values contain any characters that are invalid in a Kubernetes service name, those characters are converted to valid values in the generated YAML files. For example, an uppercase letter is converted to a lowercase letter and an underscore (\u0026quot;_\u0026quot;) is converted to a hyphen (\u0026quot;-\u0026quot;).\nThe sample demonstrates how to create an Oracle SOA Suite domain home and associated Kubernetes resources for the domain. In addition, the sample provides the capability for users to supply their own scripts to create the domain home for other use cases. The generated domain YAML file could also be modified to cover more use cases.\nRun the create domain script Run the create domain script, specifying your inputs file and an output directory to store the generated artifacts:\n$ ./create-domain.sh \\ -i create-domain-inputs.yaml \\ -o \u0026lt;path to output-directory\u0026gt; The script will perform the following steps:\n Create a directory for the generated Kubernetes YAML files for this domain if it does not already exist. The path name is \u0026lt;path to output-directory\u0026gt;/weblogic-domains/\u0026lt;domainUID\u0026gt;. If the directory already exists, its contents must be removed before using this script.\n Create a Kubernetes job that will start up a utility Oracle SOA Suite container and run offline WLST scripts to create the domain on the shared storage.\n Run and wait for the job to finish.\n Create a Kubernetes domain YAML file, domain.yaml, in the \u0026ldquo;output\u0026rdquo; directory that was created above. This YAML file can be used to create the Kubernetes resource using the kubectl create -f or kubectl apply -f command:\n$ kubectl apply -f \u0026lt;path to output-directory\u0026gt;/weblogic-domains/\u0026lt;domainUID\u0026gt;/domain.yaml Create a convenient utility script, delete-domain-job.yaml, to clean up the domain home created by the create script.\n The default domain created by the script has the following characteristics:\n An Administration Server named AdminServer listening on port 7001. A configured cluster named soa_cluster of size 5. Two Managed Servers, named soa_server1 and soa_server2, listening on port 8001. Log files that are located in /shared/logs/\u0026lt;domainUID\u0026gt;. SOA Infra, SOA Composer, and WorklistApp applications deployed. Refer to the troubleshooting page to troubleshoot issues during the domain creation.\n Verify the results The create domain script verifies that the domain was created, and reports failure if there is an error. However, it may be desirable to manually verify the domain, even if just to gain familiarity with the various Kubernetes objects that were created by the script.\nGenerated YAML files with the default inputs Click here to see sample content of the generated `domain.yaml` for `soaosb` domainType that creates SOA and Oracle Service Bus clusters. $ cat output/weblogic-domains/soainfra/domain.yaml # Copyright (c) 2020, 2021, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. # # This is an example of how to define a Domain resource. # apiVersion: \u0026quot;weblogic.oracle/v8\u0026quot; kind: Domain metadata: name: soainfra namespace: soans labels: weblogic.domainUID: soainfra spec: # The WebLogic Domain Home domainHome: /u01/oracle/user_projects/domains/soainfra # The domain home source type # Set to PersistentVolume for domain-in-pv, Image for domain-in-image, or FromModel for model-in-image domainHomeSourceType: PersistentVolume # The WebLogic Server image that the Operator uses to start the domain image: \u0026quot;soasuite:12.2.1.4\u0026quot; # imagePullPolicy defaults to \u0026quot;Always\u0026quot; if image version is :latest imagePullPolicy: \u0026quot;IfNotPresent\u0026quot; # Identify which Secret contains the credentials for pulling an image #imagePullSecrets: #- name: # Identify which Secret contains the WebLogic Admin credentials (note that there is an example of # how to create that Secret at the end of this file) webLogicCredentialsSecret: name: soainfra-domain-credentials # Whether to include the server out file into the pod's stdout, default is true includeServerOutInPodLog: true # Whether to enable log home logHomeEnabled: true # Whether to write HTTP access log file to log home httpAccessLogInLogHome: true # The in-pod location for domain log, server logs, server out, introspector out, and Node Manager log files logHome: /u01/oracle/user_projects/domains/logs/soainfra # An (optional) in-pod location for data storage of default and custom file stores. # If not specified or the value is either not set or empty (e.g. dataHome: \u0026quot;\u0026quot;) then the # data storage directories are determined from the WebLogic domain home configuration. dataHome: \u0026quot;\u0026quot; # serverStartPolicy legal values are \u0026quot;NEVER\u0026quot;, \u0026quot;IF_NEEDED\u0026quot;, or \u0026quot;ADMIN_ONLY\u0026quot; # This determines which WebLogic Servers the Operator will start up when it discovers this Domain # - \u0026quot;NEVER\u0026quot; will not start any server in the domain # - \u0026quot;ADMIN_ONLY\u0026quot; will start up only the administration server (no managed servers will be started) # - \u0026quot;IF_NEEDED\u0026quot; will start all non-clustered servers, including the administration server and clustered servers up to the replica count serverStartPolicy: \u0026quot;IF_NEEDED\u0026quot; serverPod: # an (optional) list of environment variable to be set on the servers env: - name: JAVA_OPTIONS value: \u0026quot;-Dweblogic.StdoutDebugEnabled=false -Dweblogic.ssl.Enabled=true -Dweblogic.security.SSL.ignoreHostnameVerification=true\u0026quot; - name: USER_MEM_ARGS value: \u0026quot;-Djava.security.egd=file:/dev/./urandom -Xms256m -Xmx1024m \u0026quot; volumes: - name: weblogic-domain-storage-volume persistentVolumeClaim: claimName: soainfra-domain-pvc volumeMounts: - mountPath: /u01/oracle/user_projects name: weblogic-domain-storage-volume # adminServer is used to configure the desired behavior for starting the administration server. adminServer: # serverStartState legal values are \u0026quot;RUNNING\u0026quot; or \u0026quot;ADMIN\u0026quot; # \u0026quot;RUNNING\u0026quot; means the listed server will be started up to \u0026quot;RUNNING\u0026quot; mode # \u0026quot;ADMIN\u0026quot; means the listed server will be start up to \u0026quot;ADMIN\u0026quot; mode serverStartState: \u0026quot;RUNNING\u0026quot; adminService: channels: # The Admin Server's NodePort # - channelName: default # nodePort: 30701 # Uncomment to export the T3Channel as a service - channelName: T3Channel serverPod: # an (optional) list of environment variable to be set on the admin servers env: - name: USER_MEM_ARGS value: \u0026quot;-Djava.security.egd=file:/dev/./urandom -Xms512m -Xmx1024m \u0026quot; # clusters is used to configure the desired behavior for starting member servers of a cluster. # If you use this entry, then the rules will be applied to ALL servers that are members of the named clusters. clusters: - clusterName: osb_cluster serverService: precreateService: true serverStartState: \u0026quot;RUNNING\u0026quot; serverPod: env: # This parameter can be used to pass in new system properties, use the space delimiter to append multiple values. # Do not change the below value, only append new values to it. - name: K8S_REFCONF_OVERRIDES value: \u0026quot;-Doracle.sb.tracking.resiliency.MemoryMetricEnabled=false \u0026quot; # Instructs Kubernetes scheduler to prefer nodes for new cluster members where there are not # already members of the same cluster. affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 podAffinityTerm: labelSelector: matchExpressions: - key: \u0026quot;weblogic.clusterName\u0026quot; operator: In values: - $(CLUSTER_NAME) topologyKey: \u0026quot;kubernetes.io/hostname\u0026quot; replicas: 2 # The number of managed servers to start for unlisted clusters # replicas: 1 # Istio # configuration: # istio: # enabled: # readinessPort: - clusterName: soa_cluster serverService: precreateService: true serverStartState: \u0026quot;RUNNING\u0026quot; serverPod: env: # This parameter can be used to pass in new system properties, use the space delimiter to append multiple values. # Do not change the below value, only append new values to it. - name: K8S_REFCONF_OVERRIDES value: \u0026quot;-Doracle.soa.tracking.resiliency.MemoryMetricEnabled=false \u0026quot; # Instructs Kubernetes scheduler to prefer nodes for new cluster members where there are not # already members of the same cluster. affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 podAffinityTerm: labelSelector: matchExpressions: - key: \u0026quot;weblogic.clusterName\u0026quot; operator: In values: - $(CLUSTER_NAME) topologyKey: \u0026quot;kubernetes.io/hostname\u0026quot; replicas: 2 # The number of managed servers to start for unlisted clusters # replicas: 1 Verify the domain To confirm that the domain was created, enter the following command:\n$ kubectl describe domain DOMAINUID -n NAMESPACE Replace DOMAINUID with the domainUID and NAMESPACE with the actual namespace.\n Click here to see a sample domain description. $ kubectl describe domain soainfra -n soans Name: soainfra Namespace: soans Labels: weblogic.domainUID=soainfra Annotations: \u0026lt;none\u0026gt; API Version: weblogic.oracle/v8 Kind: Domain Metadata: Creation Timestamp: 2021-03-01T05:27:38Z Generation: 1 Managed Fields: API Version: weblogic.oracle/v8 Fields Type: FieldsV1 fieldsV1: f:metadata: f:labels: .: f:weblogic.domainUID: Manager: kubectl Operation: Update Time: 2021-03-01T05:27:38Z API Version: weblogic.oracle/v8 Fields Type: FieldsV1 fieldsV1: f:status: .: f:clusters: f:conditions: f:introspectJobFailureCount: f:servers: f:startTime: Manager: Kubernetes Java Client Operation: Update Time: 2021-03-02T10:26:59Z Resource Version: 13351862 Self Link: /apis/weblogic.oracle/v8/namespaces/soans/domains/soainfra UID: 295dfc48-999e-45e3-b275-9d752587b8d9 Spec: Admin Server: Admin Service: Channels: Channel Name: T3Channel Server Pod: Env: Name: USER_MEM_ARGS Value: -Djava.security.egd=file:/dev/./urandom -Xms512m -Xmx1024m Server Start State: RUNNING Clusters: Cluster Name: osb_cluster Replicas: 2 Server Pod: Affinity: Pod Anti Affinity: Preferred During Scheduling Ignored During Execution: Pod Affinity Term: Label Selector: Match Expressions: Key: weblogic.clusterName Operator: In Values: $(CLUSTER_NAME) Topology Key: kubernetes.io/hostname Weight: 100 Env: Name: K8S_REFCONF_OVERRIDES Value: -Doracle.sb.tracking.resiliency.MemoryMetricEnabled=false Server Service: Precreate Service: true Server Start State: RUNNING Cluster Name: soa_cluster Replicas: 2 Server Pod: Affinity: Pod Anti Affinity: Preferred During Scheduling Ignored During Execution: Pod Affinity Term: Label Selector: Match Expressions: Key: weblogic.clusterName Operator: In Values: $(CLUSTER_NAME) Topology Key: kubernetes.io/hostname Weight: 100 Env: Name: K8S_REFCONF_OVERRIDES Value: -Doracle.soa.tracking.resiliency.MemoryMetricEnabled=false Server Service: Precreate Service: true Server Start State: RUNNING Data Home: Domain Home: /u01/oracle/user_projects/domains/soainfra Domain Home Source Type: PersistentVolume Http Access Log In Log Home: true Image: soasuite:12.2.1.4 Image Pull Policy: IfNotPresent Include Server Out In Pod Log: true Log Home: /u01/oracle/user_projects/domains/logs/soainfra Log Home Enabled: true Server Pod: Env: Name: JAVA_OPTIONS Value: -Dweblogic.StdoutDebugEnabled=false -Dweblogic.ssl.Enabled=true -Dweblogic.security.SSL.ignoreHostnameVerification=true Name: USER_MEM_ARGS Value: -Djava.security.egd=file:/dev/./urandom -Xms256m -Xmx1024m Volume Mounts: Mount Path: /u01/oracle/user_projects Name: weblogic-domain-storage-volume Volumes: Name: weblogic-domain-storage-volume Persistent Volume Claim: Claim Name: soainfra-domain-pvc Server Start Policy: IF_NEEDED Web Logic Credentials Secret: Name: soainfra-domain-credentials Status: Clusters: Cluster Name: osb_cluster Maximum Replicas: 5 Minimum Replicas: 0 Ready Replicas: 2 Replicas: 2 Replicas Goal: 2 Cluster Name: soa_cluster Maximum Replicas: 5 Minimum Replicas: 0 Ready Replicas: 2 Replicas: 2 Replicas Goal: 2 Conditions: Last Transition Time: 2021-03-02T10:26:59.683Z Reason: ManagedServersStarting Status: True Type: Progressing Introspect Job Failure Count: 0 Servers: Desired State: RUNNING Node Name: k8sdev Server Name: AdminServer State: UNKNOWN Cluster Name: osb_cluster Desired State: RUNNING Node Name: k8sdev Server Name: osb_server1 State: UNKNOWN Cluster Name: osb_cluster Desired State: RUNNING Node Name: k8sdev Server Name: osb_server2 State: UNKNOWN Cluster Name: osb_cluster Desired State: SHUTDOWN Server Name: osb_server3 Cluster Name: osb_cluster Desired State: SHUTDOWN Server Name: osb_server4 Cluster Name: osb_cluster Desired State: SHUTDOWN Server Name: osb_server5 Cluster Name: soa_cluster Desired State: RUNNING Node Name: k8sdev Server Name: soa_server1 State: UNKNOWN Cluster Name: soa_cluster Desired State: RUNNING Node Name: k8sdev Server Name: soa_server2 State: UNKNOWN Cluster Name: soa_cluster Desired State: SHUTDOWN Server Name: soa_server3 Cluster Name: soa_cluster Desired State: SHUTDOWN Server Name: soa_server4 Cluster Name: soa_cluster Desired State: SHUTDOWN Server Name: soa_server5 Start Time: 2021-03-01T05:27:38.844Z Events: \u0026lt;none\u0026gt; In the Status section of the output, the available servers and clusters are listed. Note that if this command is issued very soon after the script finishes, there may be no servers available yet, or perhaps only the Administration Server but no Managed Servers. The operator will start up the Administration Server first and wait for it to become ready before starting the Managed Servers.\nVerify the pods Enter the following command to see the pods running the servers:\n$ kubectl get pods -n NAMESPACE Here is an example of the output of this command. You can verify that an Administration Server and two Managed Servers for each cluster (SOA and Oracle Service Bus) are running for soaosb domain type.\n$ kubectl get pods -n soans NAME READY STATUS RESTARTS AGE soainfra-adminserver 1/1 Running 0 53m soainfra-osb-server1 1/1 Running 0 50m soainfra-osb-server2 1/1 Running 0 50m soainfra-soa-server1 1/1 Running 0 50m soainfra-soa-server2 1/1 Running 0 50m Verify the services Enter the following command to see the services for the domain:\n$ kubectl get services -n NAMESPACE Here is an example of the output of this command. You can verify that services for Administration Server and Managed Servers (for SOA and Oracle Service Bus clusters) are created for soaosb domain type.\n Click here to see a sample list of services. $ kubectl get services -n soans NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE soainfra-adminserver ClusterIP None \u0026lt;none\u0026gt; 30012/TCP,7001/TCP,7002/TCP 54m soainfra-cluster-osb-cluster ClusterIP 10.100.138.57 \u0026lt;none\u0026gt; 9001/TCP,9002/TCP 51m soainfra-cluster-soa-cluster ClusterIP 10.99.117.240 \u0026lt;none\u0026gt; 8001/TCP,8002/TCP 51m soainfra-osb-server1 ClusterIP None \u0026lt;none\u0026gt; 9001/TCP,9002/TCP 51m soainfra-osb-server2 ClusterIP None \u0026lt;none\u0026gt; 9001/TCP,9002/TCP 51m soainfra-osb-server3 ClusterIP 10.108.71.8 \u0026lt;none\u0026gt; 9001/TCP,9002/TCP 51m soainfra-osb-server4 ClusterIP 10.100.1.144 \u0026lt;none\u0026gt; 9001/TCP,9002/TCP 51m soainfra-osb-server5 ClusterIP 10.108.57.147 \u0026lt;none\u0026gt; 9001/TCP,9002/TCP 51m soainfra-soa-server1 ClusterIP None \u0026lt;none\u0026gt; 8001/TCP,8002/TCP 51m soainfra-soa-server2 ClusterIP None \u0026lt;none\u0026gt; 8001/TCP,8002/TCP 51m soainfra-soa-server3 ClusterIP 10.98.160.126 \u0026lt;none\u0026gt; 8001/TCP,8002/TCP 51m soainfra-soa-server4 ClusterIP 10.105.164.133 \u0026lt;none\u0026gt; 8001/TCP,8002/TCP 51m soainfra-soa-server5 ClusterIP 10.109.168.179 \u0026lt;none\u0026gt; 8001/TCP,8002/TCP 51m " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/soa-domains/adminguide/", + "title": "Administration Guide", + "tags": [], + "description": "Describes how to use some of the common utility tools and configurations to administer Oracle SOA Suite domains.", + "content": "Administer Oracle SOA Suite domains in Kubernetes.\n Set up a load balancer Configure different load balancers for Oracle SOA Suite domains.\n Enable additional URL access Extend an existing ingress to enable additional application URL access for Oracle SOA Suite domains.\n Configure SSL certificates Create and configure custom SSL certificates for Oracle SOA Suite domains.\n Monitor a domain and publish logs Monitor an Oracle SOA Suite domain and publish the WebLogic Server logs to Elasticsearch.\n Expose the T3/T3S protocol Create a T3/T3S channel and the corresponding Kubernetes service to expose the T3/T3S protocol for the Administration Server and Managed Servers in an Oracle SOA Suite domain.\n Deploy composite applications Deploy composite applications for Oracle SOA Suite and Oracle Service Bus domains.\n Persist adapter customizations Persist the customizations done for Oracle SOA Suite adapters.\n Perform WLST operations Perform WLST administration operations using a helper pod running in the same Kubernetes cluster as the Oracle SOA Suite domain.\n " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oid/prepare-your-environment/", + "title": "Prepare Your Environment", + "tags": [], + "description": "Prepare your environment", + "content": " Set up your Kubernetes Cluster Check the Kubernetes Cluster is Ready Install the Oracle Internet Directory Image Setup the Code Repository To Deploy Oracle Internet Directory Set up your Kubernetes Cluster If you need help setting up a Kubernetes environment, check our cheat sheet.\nIt is recommended you have a master node and one or more worker nodes. The examples in this documentation assume one master and two worker nodes.\nAfter creating Kubernetes clusters, you can optionally:\n Configure an Ingress to direct traffic to backend instances. Check the Kubernetes Cluster is Ready Run the following command on the master node to check the cluster and worker nodes are running: $ kubectl get nodes,pods -n kube-system The output will look similar to the following:\nNAME STATUS ROLES AGE VERSION node/10.89.73.203 Ready \u0026lt;none\u0026gt; 66d v1.18.4 node/10.89.73.204 Ready \u0026lt;none\u0026gt; 66d v1.18.4 node/10.89.73.42 Ready master 67d v1.18.4 NAME READY STATUS RESTARTS AGE pod/coredns-66bff467f8-slxdq 1/1 Running 1 67d pod/coredns-66bff467f8-v77qt 1/1 Running 1 67d pod/etcd-10.89.73.42 1/1 Running 1 67d pod/kube-apiserver-10.89.73.42 1/1 Running 1 67d pod/kube-controller-manager-10.89.73.42 1/1 Running 27 67d pod/kube-flannel-ds-amd64-r2m8r 1/1 Running 2 48d pod/kube-flannel-ds-amd64-rdhrf 1/1 Running 2 6d1h pod/kube-flannel-ds-amd64-vpcbj 1/1 Running 3 66d pod/kube-proxy-jtcxm 1/1 Running 1 67d pod/kube-proxy-swfmm 1/1 Running 1 66d pod/kube-proxy-w6x6t 1/1 Running 1 66d pod/kube-scheduler-10.89.73.42 1/1 Running 29 67d Install the Oracle Internet Directory Image You can deploy Oracle Internet Directory images in the following ways:\n Download a pre-built Oracle Internet Directory image from My Oracle Support. by referring to the document ID 2723908.1. This image is prebuilt by Oracle and includes Oracle Internet Directory 12.2.1.4.0 and the latest PSU. Build your own Oracle Internet Directory container image either by using the WebLogic Image Tool or by using the dockerfile, scripts and base image from Oracle Container Registry (OCR). You can also build your own image by using only the dockerfile and scripts. For more information about the various ways in which you can build your own container image, see Installing the Oracle Internet Directory Image. Choose one of these options based on your requirements.\nThe Oracle Internet Directory image must be installed on the master node and each of the worker nodes in your Kubernetes cluster. Alternatively you can place the image in a docker registry that your cluster can access.\n After installing the Oracle Internet Directory image run the following command to make sure the image is installed correctly on the master and worker nodes:\n$ docker images The output will look similar to the following:\nREPOSITORY TAG IMAGE ID CREATED SIZE oracle/oid 12.2.1.4.0 8a937042bef3 3 weeks ago 992MB k8s.gcr.io/kube-proxy v1.18.4 718fa77019f2 3 months ago 117MB k8s.gcr.io/kube-scheduler v1.18.4 c663567f869e 3 months ago 95.3MB k8s.gcr.io/kube-controller-manager v1.18.4 e8f1690127c4 3 months ago 162MB k8s.gcr.io/kube-apiserver v1.18.4 408913fc18eb 3 months ago 173MB quay.io/coreos/flannel v0.12.0-amd64 4e9f801d2217 6 months ago 52.8MB k8s.gcr.io/pause 3.2 80d28bedfe5d 7 months ago 683kB k8s.gcr.io/coredns 1.6.7 67da37a9a360 8 months ago 43.8MB k8s.gcr.io/etcd 3.4.3-0 303ce5db0e90 11 months ago 288MB quay.io/prometheus/node-exporter v0.18.1 e5a616e4b9cf 16 months ago 22.9MB quay.io/coreos/kube-rbac-proxy v0.4.1 70eeaa7791f2 20 months ago 41.3MB ... Setup the Code Repository To Deploy Oracle Internet Directory Oracle Internet Directory deployment on Kubernetes leverages deployment scripts provided by Oracle for creating Oracle Internet Directory containers using samples or Helm charts provided. To deploy Oracle Internet Directory on Kubernetes you should set up the deployment scripts on the master node as below:\nCreate a working directory to setup the source code.\n$ mkdir \u0026lt;work directory\u0026gt; For example:\n$ mkdir /scratch/OIDContainer From the directory you created, download the Oracle Internet Directory deployment scripts from the Oracle Internet Directory repository.\n$ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/21.4.2 You can now use the deployment scripts from \u0026lt;work directory\u0026gt;/fmw-kubernetes/OracleInternetDirectory/kubernetes/samples/ to set up the Oracle Internet Directory environments as further described in this document.\n" +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oud/prepare-your-environment/", + "title": "Prepare Your Environment", + "tags": [], + "description": "Prepare your environment", + "content": " Set up your Kubernetes Cluster Check the Kubernetes Cluster is Ready Install the Oracle Unified Directory Image Setup the Code Repository To Deploy Oracle Unified Directory Set up your Kubernetes Cluster If you need help setting up a Kubernetes environment, check our cheat sheet.\nIt is recommended you have a master node and one or more worker nodes. The examples in this documentation assume one master and two worker nodes.\nVerify that the system clocks on each host computer are synchronized. You can do this by running the date command simultaneously on all the hosts in each cluster.\nAfter creating Kubernetes clusters, you can optionally:\n Configure an Ingress to direct traffic to backend instances. Check the Kubernetes Cluster is Ready Run the following command on the master node to check the cluster and worker nodes are running: $ kubectl get nodes,pods -n kube-system The output will look similar to the following:\nNAME STATUS ROLES AGE VERSION node/10.89.73.203 Ready \u0026lt;none\u0026gt; 66d v1.18.4 node/10.89.73.204 Ready \u0026lt;none\u0026gt; 66d v1.18.4 node/10.89.73.42 Ready master 67d v1.18.4 NAME READY STATUS RESTARTS AGE pod/coredns-66bff467f8-slxdq 1/1 Running 1 67d pod/coredns-66bff467f8-v77qt 1/1 Running 1 67d pod/etcd-10.89.73.42 1/1 Running 1 67d pod/kube-apiserver-10.89.73.42 1/1 Running 1 67d pod/kube-controller-manager-10.89.73.42 1/1 Running 27 67d pod/kube-flannel-ds-amd64-r2m8r 1/1 Running 2 48d pod/kube-flannel-ds-amd64-rdhrf 1/1 Running 2 6d1h pod/kube-flannel-ds-amd64-vpcbj 1/1 Running 3 66d pod/kube-proxy-jtcxm 1/1 Running 1 67d pod/kube-proxy-swfmm 1/1 Running 1 66d pod/kube-proxy-w6x6t 1/1 Running 1 66d pod/kube-scheduler-10.89.73.42 1/1 Running 29 67d Install the Oracle Unified Directory Image You can deploy Oracle Unified Directory images in the following ways:\n Download a pre-built Oracle Unified Directory image from My Oracle Support. by referring to the document ID 2723908.1. This image is prebuilt by Oracle and includes Oracle Unified Directory 12.2.1.4.0 and the latest PSU. Build your own Oracle Unified Directory container image either by using the WebLogic Image Tool or by using the dockerfile, scripts and base image from Oracle Container Registry (OCR). You can also build your own image by using only the dockerfile and scripts. For more information about the various ways in which you can build your own container image, see Installing the Oracle Unified Directory Image. Choose one of these options based on your requirements.\nThe Oracle Unified Directory image must be installed on the master node and each of the worker nodes in your Kubernetes cluster. Alternatively you can place the image in a docker registry that your cluster can access.\n After installing the Oracle Unified Directory image run the following command to make sure the image is installed correctly on the master and worker nodes:\n$ docker images The output will look similar to the following:\nREPOSITORY TAG IMAGE ID CREATED SIZE oracle/oud 12.2.1.4.0 8a937042bef3 3 weeks ago 992MB k8s.gcr.io/kube-proxy v1.18.4 718fa77019f2 3 months ago 117MB k8s.gcr.io/kube-scheduler v1.18.4 c663567f869e 3 months ago 95.3MB k8s.gcr.io/kube-controller-manager v1.18.4 e8f1690127c4 3 months ago 162MB k8s.gcr.io/kube-apiserver v1.18.4 408913fc18eb 3 months ago 173MB quay.io/coreos/flannel v0.12.0-amd64 4e9f801d2217 6 months ago 52.8MB k8s.gcr.io/pause 3.2 80d28bedfe5d 7 months ago 683kB k8s.gcr.io/coredns 1.6.7 67da37a9a360 8 months ago 43.8MB k8s.gcr.io/etcd 3.4.3-0 303ce5db0e90 11 months ago 288MB quay.io/prometheus/node-exporter v0.18.1 e5a616e4b9cf 16 months ago 22.9MB quay.io/coreos/kube-rbac-proxy v0.4.1 70eeaa7791f2 20 months ago 41.3MB ... Setup the Code Repository To Deploy Oracle Unified Directory Oracle Unified Directory deployment on Kubernetes leverages deployment scripts provided by Oracle for creating Oracle Unified Directory containers using samples or Helm charts provided. To deploy Oracle Unified Directory on Kubernetes you should set up the deployment scripts on the master node as below:\nCreate a working directory to setup the source code.\n$ mkdir \u0026lt;work directory\u0026gt; For example:\n$ mkdir /scratch/OUDContainer From the directory you created, download the Oracle Unified Directory deployment scripts from the Oracle Unified Directory repository.\n$ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/21.4.2 You can now use the deployment scripts from \u0026lt;work directory\u0026gt;/fmw-kubernetes/OracleUnifiedDirectory/kubernetes/samples/ to set up the Oracle Unified Directory environments as further described in this document.\n" +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oudsm/prepare-your-environment/", + "title": "Prepare Your Environment", + "tags": [], + "description": "Prepare your environment", + "content": " Set up your Kubernetes Cluster Check the Kubernetes Cluster is Ready Install the Oracle Unified Directory Services Manager Image Setup the Code Repository To Deploy Oracle Unified Directory Services Manager Set up your Kubernetes Cluster If you need help setting up a Kubernetes environment, check our cheat sheet.\nIt is recommended you have a master node and one or more worker nodes. The examples in this documentation assume one master and two worker nodes.\nVerify that the system clocks on each host computer are synchronized. You can do this by running the date command simultaneously on all the hosts in each cluster.\nAfter creating Kubernetes clusters, you can optionally:\n Configure an Ingress to direct traffic to backend instances. Check the Kubernetes Cluster is Ready Run the following command on the master node to check the cluster and worker nodes are running: $ kubectl get nodes,pods -n kube-system The output will look similar to the following:\nNAME STATUS ROLES AGE VERSION node/10.89.73.203 Ready \u0026lt;none\u0026gt; 66d v1.18.4 node/10.89.73.204 Ready \u0026lt;none\u0026gt; 66d v1.18.4 node/10.89.73.42 Ready master 67d v1.18.4 NAME READY STATUS RESTARTS AGE pod/coredns-66bff467f8-slxdq 1/1 Running 1 67d pod/coredns-66bff467f8-v77qt 1/1 Running 1 67d pod/etcd-10.89.73.42 1/1 Running 1 67d pod/kube-apiserver-10.89.73.42 1/1 Running 1 67d pod/kube-controller-manager-10.89.73.42 1/1 Running 27 67d pod/kube-flannel-ds-amd64-r2m8r 1/1 Running 2 48d pod/kube-flannel-ds-amd64-rdhrf 1/1 Running 2 6d1h pod/kube-flannel-ds-amd64-vpcbj 1/1 Running 3 66d pod/kube-proxy-jtcxm 1/1 Running 1 67d pod/kube-proxy-swfmm 1/1 Running 1 66d pod/kube-proxy-w6x6t 1/1 Running 1 66d pod/kube-scheduler-10.89.73.42 1/1 Running 29 67d Install the Oracle Unified Directory Services Manager Image You can deploy Oracle Unified Directory Services Manager images in the following ways:\n Download a pre-built Oracle Unified Directory Services Manager image from My Oracle Support. by referring to the document ID 2723908.1. This image is prebuilt by Oracle and includes Oracle Unified Directory 12.2.1.4.0 and the latest PSU. Build your own Oracle Unified Directory Services Manager container image either by using the WebLogic Image Tool or by using the dockerfile, scripts and base image from Oracle Container Registry (OCR). You can also build your own image by using only the dockerfile and scripts. For more information about the various ways in which you can build your own container image, see Installing the Oracle Unified Directory Services Manager Image. Choose one of these options based on your requirements.\nThe Oracle Unified Directory Services Manager image must be installed on the master node and each of the worker nodes in your Kubernetes cluster. Alternatively you can place the image in a docker registry that your cluster can access.\n After installing the Oracle Unified Directory Services Manager image run the following command to make sure the image is installed correctly on the master and worker nodes:\n$ docker images The output will look similar to the following:\nREPOSITORY TAG IMAGE ID CREATED SIZE oracle/oudsm 12.2.1.4.0 7157885054a2 2 weeks ago 2.74GB k8s.gcr.io/kube-proxy v1.18.4 718fa77019f2 3 months ago 117MB k8s.gcr.io/kube-scheduler v1.18.4 c663567f869e 3 months ago 95.3MB k8s.gcr.io/kube-controller-manager v1.18.4 e8f1690127c4 3 months ago 162MB k8s.gcr.io/kube-apiserver v1.18.4 408913fc18eb 3 months ago 173MB quay.io/coreos/flannel v0.12.0-amd64 4e9f801d2217 6 months ago 52.8MB k8s.gcr.io/pause 3.2 80d28bedfe5d 7 months ago 683kB k8s.gcr.io/coredns 1.6.7 67da37a9a360 8 months ago 43.8MB k8s.gcr.io/etcd 3.4.3-0 303ce5db0e90 11 months ago 288MB quay.io/prometheus/node-exporter v0.18.1 e5a616e4b9cf 16 months ago 22.9MB quay.io/coreos/kube-rbac-proxy v0.4.1 70eeaa7791f2 20 months ago 41.3MB ... Setup the Code Repository To Deploy Oracle Unified Directory Services Manager Oracle Unified Directory Services Manager deployment on Kubernetes leverages deployment scripts provided by Oracle for creating Oracle Unified Directory Services Manager containers using samples or Helm charts provided. To deploy Oracle Unified Directory Services Manager on Kubernetes you should set up the deployment scripts on the master node as below:\nCreate a working directory to setup the source code.\n$ mkdir \u0026lt;work directory\u0026gt; For example:\n$ mkdir /scratch/OUDSMContainer From the directory you created, download the Oracle Unified Directory Services Manager deployment scripts from the Oracle Unified Directory Services Manager repository.\n$ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/21.4.2 You can now use the deployment scripts from \u0026lt;work directory\u0026gt;/fmw-kubernetes/OracleUnifiedDirectorySM/kubernetes/samples/scripts/ to set up the Oracle Unified Directory Services Manager environments as further described in this document.\n" +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oig/", + "title": "Oracle Identity Governance", + "tags": [], + "description": "The WebLogic Kubernetes Operator supports deployment of Oracle Identity Governance. Follow the instructions in this guide to set up Oracle Identity Governance domains on Kubernetes.", + "content": "The WebLogic Kubernetes Operator supports deployment of Oracle Identity Governance (OIG).\nIn this release, OIG domains are supported using the “domain on a persistent volume” model only, where the domain home is located in a persistent volume (PV).\nThe operator has several key features to assist you with deploying and managing OIG domains in a Kubernetes environment. You can:\n Create OIG instances in a Kubernetes persistent volume. This persistent volume can reside in an NFS file system or other Kubernetes volume types. Start servers based on declarative startup parameters and desired states. Expose the OIG Services for external access. Scale OIG domains by starting and stopping Managed Servers on demand. Publish operator and WebLogic Server logs into Elasticsearch and interact with them in Kibana. Monitor the OIG instance using Prometheus and Grafana. Current production release The current production release for the Oracle Identity Governance domain deployment on Kubernetes is 21.4.2. This release uses the WebLogic Kubernetes Operator version 3.3.0.\nThis release of the documentation can also be used for 3.1.X and 3.2.0 WebLogic Kubernetes Operator. For 3.0.X WebLogic Kubernetes Operator refer to Version 21.4.1\nLimitations See here for limitations in this release.\nGetting started For detailed information about deploying Oracle Identity Governance domains, start at Prerequisites and follow this documentation sequentially.\nIf performing an Enterprise Deployment, refer to the Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster instead.\nDocumentation for earlier releases To view documentation for an earlier release, see:\n Version 21.4.1 " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/soa-domains/adminguide/configure-load-balancer/apache/", + "title": "Apache web tier", + "tags": [], + "description": "Configure the Apache web tier load balancer for Oracle SOA Suite domains.", + "content": "This section provides information about how to install and configure the Apache web tier to load balance Oracle SOA Suite domain clusters. You can configure Apache web tier for non-SSL and SSL termination access of the application URL.\nFollow these steps to set up the Apache web tier as a load balancer for an Oracle SOA Suite domain in a Kubernetes cluster:\n Build the Apache web tier image Create the Apache plugin configuration file Prepare the certificate and private key Install the Apache web tier Helm chart Verify domain application URL access Uninstall Apache web tier Build the Apache web tier image Refer to the sample, to build the Apache web tier Docker image.\nCreate the Apache plugin configuration file The configuration file named custom_mod_wl_apache.conf should have all the URL routing rules for the Oracle SOA Suite applications deployed in the domain that needs to be accessible externally. Update this file with values based on your environment. The file content is similar to below.\n Click here to see the sample content of the configuration file custom_mod_wl_apache.conf for soa domain # Copyright (c) 2020 Oracle and/or its affiliates. # # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. # \u0026lt;IfModule mod_weblogic.c\u0026gt; WebLogicHost \u0026lt;WEBLOGIC_HOST\u0026gt; WebLogicPort 7001 \u0026lt;/IfModule\u0026gt; # Directive for weblogic admin Console deployed on WebLogic Admin Server \u0026lt;Location /console\u0026gt; SetHandler weblogic-handler WebLogicHost soainfra-adminserver WebLogicPort 7001 \u0026lt;/Location\u0026gt; \u0026lt;Location /em\u0026gt; SetHandler weblogic-handler WebLogicHost soainfra-adminserver WebLogicPort 7001 \u0026lt;/Location\u0026gt; \u0026lt;Location /servicebus\u0026gt; SetHandler weblogic-handler WebLogicHost soainfra-adminserver WebLogicPort 7001 \u0026lt;/Location\u0026gt; \u0026lt;Location /lwpfconsole\u0026gt; SetHandler weblogic-handler WebLogicHost soainfra-adminserver WebLogicPort 7001 \u0026lt;/Location\u0026gt; \u0026lt;Location /xbusrouting\u0026gt; SetHandler weblogic-handler WebLogicHost soainfra-adminserver WebLogicPort 7001 \u0026lt;/Location\u0026gt; \u0026lt;Location /xbustransform\u0026gt; SetHandler weblogic-handler WebLogicHost soainfra-adminserver WebLogicPort 7001 \u0026lt;/Location\u0026gt; \u0026lt;Location /weblogic/ready\u0026gt; SetHandler weblogic-handler WebLogicHost soainfra-adminserver WebLogicPort 7001 \u0026lt;/Location\u0026gt; # Directive for all applications deployed on weblogic cluster with a prepath defined by LOCATION variable. # For example, if the LOCATION is set to \u0026#39;/weblogic\u0026#39;, all applications deployed on the cluster can be accessed via # http://myhost:myport/weblogic/application_end_url # where \u0026#39;myhost\u0026#39; is the IP of the machine that runs the Apache web tier, and # \u0026#39;myport\u0026#39; is the port that the Apache web tier is publicly exposed to. # Note that LOCATION cannot be set to \u0026#39;/\u0026#39; unless this is the only Location module configured. \u0026lt;Location /soa-infra\u0026gt; WLSRequest On WebLogicCluster soainfra-cluster-soa-cluster:8001 PathTrim /weblogic1 \u0026lt;/Location\u0026gt; \u0026lt;Location /soa/composer\u0026gt; WLSRequest On WebLogicCluster soainfra-cluster-soa-cluster:8001 PathTrim /weblogic1 \u0026lt;/Location\u0026gt; \u0026lt;Location /integration/worklistapp\u0026gt; WLSRequest On WebLogicCluster soainfra-cluster-soa-cluster:8001 PathTrim /weblogic1 \u0026lt;/Location\u0026gt; \u0026lt;Location /ess\u0026gt; WLSRequest On WebLogicCluster soainfra-cluster-soa-cluster:8001 PathTrim /weblogic1 \u0026lt;/Location\u0026gt; \u0026lt;Location /EssHealthCheck\u0026gt; WLSRequest On WebLogicCluster soainfra-cluster-soa-cluster:8001 PathTrim /weblogic1 \u0026lt;/Location\u0026gt; # Directive for all application deployed on weblogic cluster with a prepath defined by LOCATION2 variable # For example, if the LOCATION2 is set to \u0026#39;/weblogic2\u0026#39;, all applications deployed on the cluster can be accessed via # http://myhost:myport/weblogic2/application_end_url # where \u0026#39;myhost\u0026#39; is the IP of the machine that runs the Apache web tier, and # \u0026#39;myport\u0026#39; is the port that the Apache webt ier is publicly exposed to. #\u0026lt;Location /weblogic2\u0026gt; #WLSRequest On #WebLogicCluster domain2-cluster-cluster-1:8021 #PathTrim /weblogic2 #\u0026lt;/Location\u0026gt; Create a PV and PVC (pv-claim-name) that can be used to store the custom_mod_wl_apache.conf. Refer to the Sample for creating a PV or PVC.\n Prepare the certificate and private key (For the SSL termination configuration only) Run the following commands to generate your own certificate and private key using openssl.\n$ cd ${WORKDIR} $ cd charts/apache-samples/custom-sample $ export VIRTUAL_HOST_NAME=WEBLOGIC_HOST $ export SSL_CERT_FILE=WEBLOGIC_HOST.crt $ export SSL_CERT_KEY_FILE=WEBLOGIC_HOST.key $ sh certgen.sh NOTE: Replace WEBLOGIC_HOST with the host name on which Apache web tier is to be installed.\n Click here to see the output of the certifcate generation $ls certgen.sh custom_mod_wl_apache.conf custom_mod_wl_apache.conf_orig input.yaml README.md $ sh certgen.sh Generating certs for WEBLOGIC_HOST Generating a 2048 bit RSA private key ........................+++ .......................................................................+++ unable to write \u0026#39;random state\u0026#39; writing new private key to \u0026#39;apache-sample.key\u0026#39; ----- $ ls certgen.sh custom_mod_wl_apache.conf_orig WEBLOGIC_HOST.info config.txt input.yaml WEBLOGIC_HOST.key custom_mod_wl_apache.conf WEBLOGIC_HOST.crt README.md Prepare input values for the Apache web tier Helm chart.\nRun the following commands to prepare the input value file for the Apache web tier Helm chart.\n$ base64 -i ${SSL_CERT_FILE} | tr -d \u0026#39;\\n\u0026#39; $ base64 -i ${SSL_CERT_KEY_FILE} | tr -d \u0026#39;\\n\u0026#39; $ touch input.yaml Update the input parameters file, charts/apache-samples/custom-sample/input.yaml.\n Click here to see the snapshot of the sample input.yaml file $ cat apache-samples/custom-sample/input.yaml # Use this to provide your own Apache web tier configuration as needed; simply define this # Persistence Volume which contains your own custom_mod_wl_apache.conf file. persistentVolumeClaimName: \u0026lt;pv-claim-name\u0026gt; # The VirtualHostName of the Apache HTTP server. It is used to enable custom SSL configuration. virtualHostName: \u0026lt;WEBLOGIC_HOST\u0026gt; # The customer-supplied certificate to use for Apache web tier SSL configuration. # The value must be a string containing a base64 encoded certificate. Run following command to get it. # base64 -i ${SSL_CERT_FILE} | tr -d \u0026#39;\\n\u0026#39; customCert: \u0026lt;cert_data\u0026gt; # The customer-supplied private key to use for Apache web tier SSL configuration. # The value must be a string containing a base64 encoded key. Run following command to get it. # base64 -i ${SSL_KEY_FILE} | tr -d \u0026#39;\\n\u0026#39; customKey: \u0026lt;key_data\u0026gt; Install the Apache web tier Helm chart Install the Apache web tier Helm chart to the domain soans namespace with the specified input parameters:\n$ cd ${WORKDIR}/charts $ kubectl create namespace apache-webtier $ helm install apache-webtier --values apache-samples/custom-sample/input.yaml --namespace soans apache-webtier --set image=oracle/apache:12.2.1.3 Check the status of the Apache web tier:\n$ kubectl get all -n soans | grep apache Sample output of the status of the Apache web tier:\npod/apache-webtier-apache-webtier-65f69dc6bc-zg5pj 1/1 Running 0 22h service/apache-webtier-apache-webtier NodePort 10.108.29.98 \u0026lt;none\u0026gt; 80:30305/TCP,4433:30443/TCP 22h deployment.apps/apache-webtier-apache-webtier 1/1 1 1 22h replicaset.apps/apache-webtier-apache-webtier-65f69dc6bc 1 1 1 22h Verify domain application URL access After the Apache web tier load balancer is running, verify that the domain applications are accessible through the load balancer port 30305/30443. The application URLs for domain of type soa are:\n Note: Port 30305 is the LOADBALANCER-Non-SSLPORT and port 30443 is LOADBALANCER-SSLPORT.\n NONSSL configuration http://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-Non-SSLPORT}/weblogic/ready http://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-Non-SSLPORT}/console http://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-Non-SSLPORT}/em http://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-Non-SSLPORT}/soa-infra http://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-Non-SSLPORT}/soa/composer http://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-Non-SSLPORT}/integration/worklistapp SSL configuration https://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-SSLPORT}/weblogic/ready https://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-SSLPORT}/console https://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-SSLPORT}/em https://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-SSLPORT}/soa-infra https://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-SSLPORT}/soa/composer https://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-SSLPORT}/integration/worklistapp Uninstall Apache web tier $ helm delete apache-webtier -n soans " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/soa-domains/adminguide/configuring-custom-ssl-certificates/", + "title": "Configure SSL certificates", + "tags": [], + "description": "Create and configure custom SSL certificates for Oracle SOA Suite domains.", + "content": "Secure Socket Layer (SSL) provides a secured communication for data sent over unsecured networks. In an SSL termination scenario, you can configure SSL between the client browser and the load balancer in your Oracle SOA Suite instance to ensure that applications are accessed securely. In an SSL end-to-end scenario, an Oracle SOA Suite domain is configured to use a self-signed SSL certificate that was generated during domain creation. Clients will typically receive a message indicating that the signing CA for the certificate is unknown and not trusted.\nThis section provides details on how to create and configure custom (CA-issued) SSL certificates for Oracle SOA Suite domains in both SSL end-to-end and SSL termination scenarios.\n Create custom SSL certificates in an SSL end-to-end scenario Create custom SSL certificates in an SSL termination at a load balancer Create custom SSL certificates in an SSL end-to-end scenario These steps describe how to replace the identity and trust keystore of an Oracle SOA Suite domain with a custom identity and custom trust keystore and register with digital certificates procured from any third party authority.\nIn this documentation, the registered domain is mydomain.com and the CA signed certificates are taken from mydomain.\nCreate a custom identity and custom trust keystore and generate a certificate signing request (CSR) To create a custom identity and custom trust keystore and generate a CSR:\n Log in to the Enterprise Manager (EM) Console and access the Keystores page by opening WebLogic Domain \u0026gt; Security \u0026gt; Keystore.\n Under the system stripe, click Create Keystore to create a new keystore.\n Provide the following details for custom identity:\nKeystore Name: custIdentity Protection: Select the Password option. Keystore Password: Enter the password. Confirm Password: Confirm the password.\n Click Create Keystore to create another new keystore.\n Provide the following details for custom trust:\n Keystore Name: custTrust Protection: Select the Password option. Keystore Password: Enter the password. Confirm Password: Confirm the password. Click Manage on the custIdentity keystore name and provide the password that you specified previously.\n Click Generate Keypair to create a new key pair, and provide the following details for custIdentity with alias as custIdentity and password:\n Alias Name: custIdentity Common Name: Common name, for example, soak8s.mydomain.com (Registered domain name) Organizational Unit: Name of the organizational unit Organization: Organization name Enter City, State, and Country names Key Type: RSA Key Size: 2048 Password: Enter the password Click OK to generate the keypair.\n Select the newly created keypair and click Generate CSR.\n Export the created CSR, share it with Certificate Authority, such as digicert CA, and get root, intermediate, and signed certificates. The certificate is generated for the domain name you used in the Common Name field.\n It is not mandatory to create identity and trust keystore under the system stripe that comes with default provisioning. You can create a new custom stripe and create identity and trust keystores under it.\nShare the CSR with CA to get CA-signed certificates Select the new keypair under the custIdentity and click Generate CSR.\n Export the created CSR and share it with the Certificate Authority and get root, intermediate, and signed certificates. The certificate is generated for the domain name you used in the Common Name field.\n Download the certificates shared in the zip file from the CA. The zip file contains one of the following:\n the three certificates individually - root, intermediate, and signed certificates root and intermediate certificates in one chain and signed certificate separately Double-click the certificate chain for root and intermediate certificates. You can see the full chain when you click on the certification path.\n Extract the root and intermediate certificates individually by going to the certification path, select the certificate to be extracted (root or intermediate) and click View Certificate.\n On the View Certificates pop-up, select the Details tab and click Copy to File.\n In the Certificate Export wizard, click Next, then select Base 64 encoded X.509 (CER), and then click Next. Export the certificate.\n Name the exported certificate as root and intermediate certificates respectively.\n Import CA certificates Certificate Authority (CA) certificates must be imported in the following order: first the signed server certificate, then the intermediate certificate, and then the root certificate.\nTo import CA certificates:\n Use WLST commands to import the certificate chain in the identity keystore (custIdentity):\na. Combine the three certificates into a single text file called chain.pem in the following order: signed server certificate, followed by intermediate certificate, followed by root certificate:\n-----BEGIN CERTIFICATE----- \u0026lt;signed server certificate\u0026gt; -----END CERTIFICATE----- -----BEGIN CERTIFICATE----- \u0026lt;intermediate certificate\u0026gt; -----END CERTIFICATE----- -----BEGIN CERTIFICATE----- \u0026lt;root certificate\u0026gt; -----END CERTIFICATE----- b. Place the chain.pem in /tmp from where you will be executing the kubectl commands (for example, on the master node).\nc. Enter the following command to change the file ownership to 1000:1000 user/group:\n$ sudo chown 1000:1000 /tmp/chain.pem d. Copy /tmp/chain.pem into the Administration Server pod (for example, soainfra-adminserver):\n$ kubectl cp /tmp/chain.pem soans/soainfra-adminserver:/tmp/chain.pem e. Exec into the Administration Server pod to perform all operations:\n$ kubectl exec -it soainfra-adminserver -n soans -- bash f. Start WLST and access the Oracle Platform Security Services (OPSS) key store service:\n$ cd /u01/oracle/oracle_common/common/bin/ $ ./wlst.sh : : wls:/offline\u0026gt; connect(\u0026quot;weblogic\u0026quot;,\u0026quot;Welcome1\u0026quot;,\u0026quot;t3://soainfra-adminserver:7001\u0026quot;) : : wls:/soainfra/serverConfig/\u0026gt; svc = getOpssService(name='KeyStoreService') g. Use the WLST importKeyStoreCertificate command to import chain.pem:\nsvc.importKeyStoreCertificate(appStripe='stripe', name='keystore', password='password', alias='alias', keypassword='keypassword', type='entrytype',filepath='absolute_file_path') For example:\nwls:/soainfra/serverConfig/\u0026gt; svc.importKeyStoreCertificate(appStripe='system', name='custIdentity', password=welcome1, alias='custIdentity', keypassword='welcome1', type='CertificateChain', filepath='/tmp/chain.pem') e. Exit WLST:\nexit() Use Oracle Enterprise Manager to import the certificate chain into the trust keystore (custTrust):\na. Log in to the Enterprise Manager Console and access the Keystores page by opening WebLogic domain \u0026gt; Security \u0026gt; Keystore.\nb. Select the trust keystore (custTrust) and click Manage.\nc. Click Import Certificate and import the certificates in this order:\n the signed server certificate as a trusted certificate (alias mySignedCert)\n the intermediate certificate from CA as a trusted certificate (alias myInterCA)\n the root certificate from CA as a trusted certificate (alias myRootCA)\n Synchronize the local keystore with the security store Synchronize keystores to synchronize information between the domain home and the Oracle Platform Security Services (OPSS) store in the database.\nTo synchronize keystores:\n Exec into the Administration server pod (for example, soainfra-adminserver): $ kubectl exec -it soainfra-adminserver -n soans -- bash Start WLST and access the Oracle Platform Security Services (OPSS) keystore service: $ cd /u01/oracle/oracle_common/common/bin/ $ ./wlst.sh : : wls:/offline\u0026gt; connect(\u0026quot;weblogic\u0026quot;,\u0026quot;Welcome1\u0026quot;,\u0026quot;t3://soainfra-adminserver:7001\u0026quot;) : : wls:/soainfra/serverConfig/\u0026gt; svc = getOpssService(name='KeyStoreService') Enter the following commands to synchronize the custom identity and custom trust keystores: Note: This step is necessary only if you are using the system stripe. You do not need to synchronize the keystores if you are using a custom stripe.\n wls:/soainfra/serverConfig/\u0026gt; svc.listKeyStoreAliases(appStripe=\u0026quot;system\u0026quot;, name=\u0026quot;custIdentity\u0026quot;, password=\u0026quot; ****\u0026quot;, type=\u0026quot;*\u0026quot;) wls:/soainfra/serverConfig/\u0026gt; syncKeyStores(appStripe='system',keystoreFormat='KSS') wls:/soainfra/serverConfig/\u0026gt; svc.listKeyStoreAliases (appStripe=\u0026quot;system\u0026quot;, name=\u0026quot;myKSSTrust\u0026quot;, password=\u0026quot;****\u0026quot;, type=\u0026quot;*\u0026quot;) wls:/soainfra/serverConfig/\u0026gt; syncKeyStores(appStripe='system',keystoreFormat='KSS') Update the WebLogic keystores with custom identity and trust To update the WebLogic keystores with custom identity and custom trust:\n In the WebLogic Server Administration Console, open Servers \u0026gt; AdminServer \u0026gt; Configurations \u0026gt; Keystores tab.\n Change the Keystores to Custom Identity and Custom Trust and Save.\n Provide the values for Custom Identity:\n Custom Identity Keystore: kss://system/custidentity Custom Identity KeyStore Type: KSS Custom Identity PassPhrase: enter password given while creating the custIdentity keystore. Confirm Custom Identity PassPhrase: reenter the password. Provide the values for Custom Trust:\n Custom Trust Keystore: kss://system/custTrust Custom Trust KeyStore Type: KSS Custom Trust PassPhrase: enter password given while creating the custTrust keystore. Confirm Custom Trust PassPhrase: reenter the password. Click Save and then Activate changes.\n Open the SSL tab and provide the following details:\n Private Key Alias: custIdentity (this is the alias given while creating the key pair in the custIdentity keystore.) Private Key PassPhrase: enter password given while creating the key pair under the custIdentity keystore. Confirm Private Key PassPhrase: reenter the password. In the Advanced section, change Hostname Verification to None. Click Save and Activate changes.\n Repeat steps 1 to 7 for all Managed Servers.\n Restart the domain.\n Once the servers are up and running, you can check if the SSL URLs show the updated certificates.\n For more details, refer to:\n Administering Oracle SOA Cloud Service Administering Oracle Fusion Middleware Create custom SSL certificates in an SSL termination at a load balancer This section provides references to configure a custom SSL certificate at a load balancer.\nThere are multiple CA vendors in the marketplace today, each offering different levels of service at varying price points. Research and choose a CA vendor that meets your service-level and budget requirements.\nFor a CA vendor to issue you a CA-issued SSL certificate, you must provide the following information:\n Your custom domain name. Public information associated with the domain confirming you as the owner. Email address associated with the custom domain for verification. Create a Certificate Signing Request (CSR) for your load balancer and submit the CSR to the CA vendor. After receiving the CA-issued certificate, refer to Administering Oracle SOA Cloud Service to import the CA-issued SSL certificate to the load balancer. If you are using openssl to create the certificates, you can refer to Manually Generate a Certificate Signing Request (CSR) Using OpenSSL to submit the CSR to the CA vendor.\n" +}, +{ + "uri": "/fmw-kubernetes/21.4.2/soa-domains/appendix/docker-k8s-hardening/", + "title": "Security hardening", + "tags": [], + "description": "Review resources for the Docker and Kubernetes cluster hardening.", + "content": "Securing a Kubernetes cluster involves hardening on multiple fronts - securing the API servers, etcd, nodes, container images, container run-time, and the cluster network. Apply principles of defense in depth, principle of least privilege, and minimize the attack surface. Use security tools such as Kube-Bench to verify the cluster\u0026rsquo;s security posture. Since Kubernetes is evolving rapidly refer to Kubernetes Security Overview for the latest information on securing a Kubernetes cluster. Also ensure the deployed Docker containers follow the Docker Security guidance.\nThis section provides references on how to securely configure Docker and Kubernetes.\nReferences Docker hardening\n https://docs.docker.com/engine/security/security/ https://blog.aquasec.com/docker-security-best-practices Kubernetes hardening\n https://kubernetes.io/docs/concepts/security/overview/ https://kubernetes.io/docs/concepts/security/pod-security-standards/ https://blogs.oracle.com/developers/5-best-practices-for-kubernetes-security Security best practices for Oracle WebLogic Server Running in Docker and Kubernetes\n https://blogs.oracle.com/weblogicserver/security-best-practices-for-weblogic-server-running-in-docker-and-kubernetes " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/soa-domains/patch_and_upgrade/upgrade-k8s-cluster/", + "title": "Upgrade a Kubernetes cluster", + "tags": [], + "description": "Upgrade the underlying Kubernetes cluster version in a running SOA Kubernetes environment.", + "content": "These instructions describe how to upgrade a Kubernetes cluster created using kubeadm on which an Oracle SOA Suite domain is deployed. A rolling upgrade approach is used to upgrade nodes (master and worker) of the Kubernetes cluster.\nIt is expected that there will be a down time during the upgrade of the Kubernetes cluster as the nodes need to be drained as part of the upgrade process.\n Prerequisites Review Prerequisites and ensure that your Kubernetes cluster is ready for upgrade. Make sure your environment meets all prerequisites. Make sure the database used for the SOA domain deployment is up and running during the upgrade process. Upgrade the Kubernetes version An upgrade of Kubernetes is supported from one MINOR version to the next MINOR version, or between PATCH versions of the same MINOR. For example, you can upgrade from 1.x to 1.x+1, but not from 1.x to 1.x+2. To upgrade a Kubernetes version, first all the master nodes of the Kubernetes cluster must be upgraded sequentially, followed by the sequential upgrade of each worker node.\n See here for Kubernetes official documentation to upgrade from v1.16.x to v1.17.x. See here for Kubernetes official documentation to upgrade from v1.17.x to v1.18.x. See here for Kubernetes official documentation to upgrade from v1.18.x to v1.19.x. " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oam/prepare-your-environment/", + "title": "Prepare your environment", + "tags": [], + "description": "Sample for creating an OAM domain home on an existing PV or PVC, and the domain resource YAML file for deploying the generated OAM domain.", + "content": "To prepare for Oracle Access Management deployment in a Kubernetes environment, complete the following steps:\n Set up your Kubernetes cluster\n Install Helm\n Check the Kubernetes cluster is ready\n Install the OAM Docker image\n Install the WebLogic Kubernetes Operator docker image\n Set up the code repository to deploy OAM domains\n Install the WebLogic Kubernetes Operator\n Create a namespace for Oracle Access Management\n RCU schema creation\n Preparing the environment for domain creation\na. Creating Kubernetes secrets for the domain and RCU\nb. Create a Kubernetes persistent volume and persistent volume claim\n Set up your Kubernetes cluster If you need help setting up a Kubernetes environment, refer to the official Kubernetes documentation to set up a production grade Kubernetes cluster.\nIt is recommended you have a master node and one or more worker nodes. The examples in this documentation assume one master and two worker nodes.\nVerify that the system clocks on each host computer are synchronized. You can do this by running the date command simultaneously on all the hosts in each cluster.\nAfter creating Kubernetes clusters, you can optionally:\n Configure an Ingress to direct traffic to backend domains. Configure Kibana and Elasticsearch for your operator logs. Install Helm As per the prerequisites an installation of Helm is required to create and deploy the necessary resources and then run the operator in a Kubernetes cluster. For Helm installation and usage information, refer to the README.\nCheck the Kubernetes cluster is ready Run the following command on the master node to check the cluster and worker nodes are running:\n$ kubectl get nodes,pods -n kube-system The output will look similar to the following:\nNAME STATUS ROLES AGE VERSION node/worker-node1 Ready \u0026lt;none\u0026gt; 17h v1.20.10 node/worker-node2 Ready \u0026lt;none\u0026gt; 17h v1.20.10 node/master-node Ready control-plane,master 23h v1.20.10 NAME READY STATUS RESTARTS AGE pod/coredns-66bff467f8-fnhbq 1/1 Running 0 23h pod/coredns-66bff467f8-xtc8k 1/1 Running 0 23h pod/etcd-master 1/1 Running 0 21h pod/kube-apiserver-master-node 1/1 Running 0 21h pod/kube-controller-manager-master-node 1/1 Running 0 21h pod/kube-flannel-ds-amd64-lxsfw 1/1 Running 0 17h pod/kube-flannel-ds-amd64-pqrqr 1/1 Running 0 17h pod/kube-flannel-ds-amd64-wj5nh 1/1 Running 0 17h pod/kube-proxy-2kxv2 1/1 Running 0 17h pod/kube-proxy-82vvj 1/1 Running 0 17h pod/kube-proxy-nrgw9 1/1 Running 0 23h pod/kube-scheduler-master 1/1 Running 0 21 Install the OAM Docker image You can deploy OAM Docker images in the following ways:\n Download the latest prebuilt OAM Docker image from My Oracle Support by referring to the document ID 2723908.1. This image is prebuilt by Oracle and includes Oracle Access Management 12.2.1.4.0 and the latest PSU.\n Build your own OAM image using the WebLogic Image Tool or by using the dockerfile, scripts and base images from Oracle Container Registry (OCR). You can also build your own image by using only the dockerfile and scripts. For more information about the various ways in which you can build your own container image, see Building the OAM Image.\n Choose one of these options based on your requirements.\nThe OAM Docker image must be installed on the master node and each of the worker nodes in your Kubernetes cluster. Alternatively you can place the image in a Docker registry that your cluster can access.\n After installing the OAM Docker image run the following command to make sure the image is installed correctly on the master and worker nodes:\n$ docker images The output will look similar to the following:\nREPOSITORY TAG IMAGE ID CREATED SIZE oracle/oam 12.2.1.4.0-8-ol7-210721.0755 720a172374e6 2 weeks ago 3.38GB quay.io/coreos/flannel v0.15.0 09b38f011a29 6 days ago 69.5MB rancher/mirrored-flannelcni-flannel-cni-plugin v1.2 98660e6e4c3a 13 days ago 8.98MB k8s.gcr.io/kube-proxy v1.20.10 945c9bce487a 2 months ago 99.7MB k8s.gcr.io/kube-controller-manager v1.20.10 2f450864515d 2 months ago 116MB k8s.gcr.io/kube-apiserver v1.20.10 644cadd07add 2 months ago 122MB k8s.gcr.io/kube-scheduler v1.20.10 4c9be8dc650b 2 months ago 47.3MB k8s.gcr.io/etcd 3.4.13-0 0369cf4303ff 14 months ago 253MB k8s.gcr.io/coredns 1.7.0 bfe3a36ebd25 16 months ago 45.2MB k8s.gcr.io/pause 3.2 80d28bedfe5d 20 months ago 683kB Install the WebLogic Kubernetes Operator Docker image The WebLogic Kubernetes Operator Docker image must be installed on the master node and each of the worker nodes in your Kubernetes cluster. Alternatively you can place the image in a Docker registry that your cluster can access.\n Pull the WebLogic Kubernetes Operator image by running the following command on the master node:\n$ docker pull ghcr.io/oracle/weblogic-kubernetes-operator:3.3.0 The output will look similar to the following:\nTrying to pull repository ghcr.io/oracle/weblogic-kubernetes-operator ... 3.3.0: Pulling from ghcr.io/oracle/weblogic-kubernetes-operator c828c776e142: Pull complete 175676c54fa1: Pull complete b3231f480c32: Pull complete ea4423fa8daa: Pull complete f3ca38f7f95f: Pull complete effd851583ec: Pull complete 4f4fb700ef54: Pull complete Digest: sha256:3e93848ad2f5b272c88680e7b37a4ee428dd12e4c4c91af6977fd2fa9ec1f9dc Status: Downloaded newer image for ghcr.io/oracle/weblogic-kubernetes-operator:3.3.0 ghcr.io/oracle/weblogic-kubernetes-operator:3.3.0 Run the docker tag command as follows:\n$ docker tag ghcr.io/oracle/weblogic-kubernetes-operator:3.3.0 weblogic-kubernetes-operator:3.3.0 After installing the WebLogic Kubernetes Operator image, repeat the above on the worker nodes.\n Set up the code repository to deploy OAM domains OAM domain deployment on Kubernetes leverages the WebLogic Kubernetes Operator infrastructure. For deploying the OAM domains, you need to set up the deployment scripts on the master node as below:\n Create a working directory to setup the source code.\n$ mkdir \u0026lt;workdir\u0026gt; For example:\n$ mkdir /scratch/OAMK8S Download the latest OAM deployment scripts from the OAM repository.\n$ cd \u0026lt;workdir\u0026gt; $ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/21.4.2 For example:\n$ cd /scratch/OAMK8S $ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/21.4.2 Set the $WORKDIR environment variable as follows:\n$ export WORKDIR=\u0026lt;workdir\u0026gt;/fmw-kubernetes/OracleAccessManagement For example:\n$ export WORKDIR=/scratch/OAMK8S/fmw-kubernetes/OracleAccessManagement Run the following command and see if the WebLogic custom resource definition name already exists:\n$ kubectl get crd In the output you should see:\nNo resources found If you see the following:\nNAME AGE domains.weblogic.oracle 5d then run the following command to delete the existing crd:\n$ kubectl delete crd domains.weblogic.oracle customresourcedefinition.apiextensions.k8s.io \u0026#34;domains.weblogic.oracle\u0026#34; deleted Install the WebLogic Kubernetes Operator On the master node run the following command to create a namespace for the operator:\n$ kubectl create namespace \u0026lt;sample-kubernetes-operator-ns\u0026gt; For example:\n$ kubectl create namespace opns The output will look similar to the following:\nnamespace/opns created Create a service account for the operator in the operator\u0026rsquo;s namespace by running the following command:\n$ kubectl create serviceaccount -n \u0026lt;sample-kubernetes-operator-ns\u0026gt; \u0026lt;sample-kubernetes-operator-sa\u0026gt; For example:\n$ kubectl create serviceaccount -n opns op-sa The output will look similar to the following:\nserviceaccount/op-sa created If you want to to setup logging and visualisation with Elasticsearch and Kibana (post domain creation) edit the $WORKDIR/kubernetes/charts/weblogic-operator/values.yaml and set the parameter elkIntegrationEnabled to true and make sure the following parameters are set:\n# elkIntegrationEnabled specifies whether or not ELK integration is enabled. elkIntegrationEnabled: true # logStashImage specifies the docker image containing logstash. # This parameter is ignored if 'elkIntegrationEnabled' is false. logStashImage: \u0026quot;logstash:6.6.0\u0026quot; # elasticSearchHost specifies the hostname of where elasticsearch is running. # This parameter is ignored if 'elkIntegrationEnabled' is false. elasticSearchHost: \u0026quot;elasticsearch.default.svc.cluster.local\u0026quot; # elasticSearchPort specifies the port number of where elasticsearch is running. # This parameter is ignored if 'elkIntegrationEnabled' is false. elasticSearchPort: 9200 After the domain creation see Logging and Visualization in order to complete the setup of Elasticsearch and Kibana.\n Run the following helm command to install and start the operator:\n$ cd $WORKDIR $ helm install weblogic-kubernetes-operator kubernetes/charts/weblogic-operator \\ --namespace \u0026lt;sample-kubernetes-operator-ns\u0026gt; \\ --set image=weblogic-kubernetes-operator:3.3.0 \\ --set serviceAccount=\u0026lt;sample-kubernetes-operator-sa\u0026gt; \\ --set “enableClusterRoleBinding=true” \\ --set \u0026#34;domainNamespaceSelectionStrategy=LabelSelector\u0026#34; \\ --set \u0026#34;domainNamespaceLabelSelector=weblogic-operator\\=enabled\u0026#34; \\ --set \u0026#34;javaLoggingLevel=FINE\u0026#34; --wait For example:\n$ cd $WORKDIR $ helm install weblogic-kubernetes-operator kubernetes/charts/weblogic-operator \\ --namespace opns \\ --set image=weblogic-kubernetes-operator:3.3.0 \\ --set serviceAccount=op-sa \\ --set \u0026#34;enableClusterRoleBinding=true\u0026#34; \\ --set \u0026#34;domainNamespaceSelectionStrategy=LabelSelector\u0026#34; \\ --set \u0026#34;domainNamespaceLabelSelector=weblogic-operator\\=enabled\u0026#34; \\ --set \u0026#34;javaLoggingLevel=FINE\u0026#34; --wait The output will look similar to the following:\nNAME: weblogic-kubernetes-operator LAST DEPLOYED: Fri Oct 29 03:10:39 2021 NAMESPACE: opns STATUS: deployed REVISION: 1 TEST SUITE: None Verify that the operator\u0026rsquo;s pod and services are running by executing the following command:\n$ kubectl get all -n \u0026lt;sample-kubernetes-operator-ns\u0026gt; For example:\n$ kubectl get all -n opns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE pod/weblogic-operator-676d5cc6f4-wct7b 2/2 Running 0 40s NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/internal-weblogic-operator-svc ClusterIP 10.101.1.198 \u0026lt;none\u0026gt; 8082/TCP 40s NAME READY UP-TO-DATE AVAILABLE AGE deployment.apps/weblogic-operator 1/1 1 1 40s NAME DESIRED CURRENT READY AGE replicaset.apps/weblogic-operator-676d5cc6f4 1 1 1 40s Verify the operator pod\u0026rsquo;s log:\n$ kubectl logs -n \u0026lt;sample-kubernetes-operator-ns\u0026gt; -c weblogic-operator deployments/weblogic-operator For example:\n$ kubectl logs -n opns -c weblogic-operator deployments/weblogic-operator The output will look similar to the following:\n... {\u0026quot;timestamp\u0026quot;:\u0026quot;2021-11-01T10:26:10.917829423Z\u0026quot;,\u0026quot;thread\u0026quot;:13,\u0026quot;fiber\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;namespace\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;domainUID\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;level\u0026quot;:\u0026quot;CONFIG\u0026quot;,\u0026quot;class\u0026quot;:\u0026quot;oracle.kubernetes.operator.TuningParametersImpl\u0026quot;,\u0026quot;method\u0026quot;:\u0026quot;update\u0026quot;,\u0026quot;timeInMillis\u0026quot;:1635762370917,\u0026quot;message\u0026quot;:\u0026quot;Reloading tuning parameters from Operator's config map\u0026quot;,\u0026quot;exception\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;code\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;headers\u0026quot;:{},\u0026quot;body\u0026quot;:\u0026quot;\u0026quot;} {\u0026quot;timestamp\u0026quot;:\u0026quot;2021-11-01T10:26:20.920145876Z\u0026quot;,\u0026quot;thread\u0026quot;:13,\u0026quot;fiber\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;namespace\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;domainUID\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;level\u0026quot;:\u0026quot;CONFIG\u0026quot;,\u0026quot;class\u0026quot;:\u0026quot;oracle.kubernetes.operator.TuningParametersImpl\u0026quot;,\u0026quot;method\u0026quot;:\u0026quot;update\u0026quot;,\u0026quot;timeInMillis\u0026quot;:1635762380920,\u0026quot;message\u0026quot;:\u0026quot;Reloading tuning parameters from Operator's config map\u0026quot;,\u0026quot;exception\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;code\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;headers\u0026quot;:{},\u0026quot;body\u0026quot;:\u0026quot;\u0026quot;} {\u0026quot;timestamp\u0026quot;:\u0026quot;2021-11-01T10:26:30.922360564Z\u0026quot;,\u0026quot;thread\u0026quot;:19,\u0026quot;fiber\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;namespace\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;domainUID\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;level\u0026quot;:\u0026quot;CONFIG\u0026quot;,\u0026quot;class\u0026quot;:\u0026quot;oracle.kubernetes.operator.TuningParametersImpl\u0026quot;,\u0026quot;method\u0026quot;:\u0026quot;update\u0026quot;,\u0026quot;timeInMillis\u0026quot;:1635762390922,\u0026quot;message\u0026quot;:\u0026quot;Reloading tuning parameters from Operator's config map\u0026quot;,\u0026quot;exception\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;code\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;headers\u0026quot;:{},\u0026quot;body\u0026quot;:\u0026quot;\u0026quot;} {\u0026quot;timestamp\u0026quot;:\u0026quot;2021-11-01T10:26:40.924847211Z\u0026quot;,\u0026quot;thread\u0026quot;:29,\u0026quot;fiber\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;namespace\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;domainUID\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;level\u0026quot;:\u0026quot;CONFIG\u0026quot;,\u0026quot;class\u0026quot;:\u0026quot;oracle.kubernetes.operator.TuningParametersImpl\u0026quot;,\u0026quot;method\u0026quot;:\u0026quot;update\u0026quot;,\u0026quot;timeInMillis\u0026quot;:1635762400924,\u0026quot;message\u0026quot;:\u0026quot;Reloading tuning parameters from Operator's config map\u0026quot;,\u0026quot;exception\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;code\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;headers\u0026quot;:{},\u0026quot;body\u0026quot;:\u0026quot;\u0026quot;} Create a namespace for Oracle Access Management Run the following command to create a namespace for the domain:\n$ kubectl create namespace \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl create namespace oamns The output will look similar to the following:\nnamespace/oamns created Run the following command to tag the namespace so the WebLogic Kubernetes Operator can manage it:\n$ kubectl label namespaces \u0026lt;domain_namespace\u0026gt; weblogic-operator=enabled For example:\n$ kubectl label namespaces oamns weblogic-operator=enabled The output will look similar to the following:\nnamespace/oamns labeled Run the following command to check the label was created:\n$ kubectl describe namespace \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl describe namespace oamns The output will look similar to the following:\nName: oamns Labels: weblogic-operator=enabled Annotations: \u0026lt;none\u0026gt; Status: Active No resource quota. No LimitRange resource. RCU schema creation In this section you create the RCU schemas in the Oracle Database.\nBefore following the steps in this section, make sure that the database and listener are up and running and you can connect to the database via SQL*Plus or other client tool.\n Run the following command to create a helper pod to run RCU:\n$ kubectl run helper --image \u0026lt;image_name\u0026gt; -n \u0026lt;domain_namespace\u0026gt; -- sleep infinity For example:\n$ kubectl run helper --image oracle/oam:12.2.1.4.0-8-ol7-210721.0755 -n oamns -- sleep infinity The output will look similar to the following:\npod/helper created Run the following command to check the pod is running:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oamns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE helper 1/1 Running 0 8s Run the following command to start a bash shell in the helper pod:\n$ kubectl exec -it helper -n \u0026lt;domain_namespace\u0026gt; -- /bin/bash For example:\n$ kubectl exec -it helper -n oamns -- /bin/bash This will take you into a bash shell in the running helper pod:\n[oracle@helper ~]$ In the helper bash shell run the following commands to set the environment:\n[oracle@helper ~]$ export CONNECTION_STRING=\u0026lt;db_host.domain\u0026gt;:\u0026lt;db_port\u0026gt;/\u0026lt;service_name\u0026gt; [oracle@helper ~]$ export RCUPREFIX=\u0026lt;rcu_schema_prefix\u0026gt; [oracle@helper ~]$ echo -e \u0026lt;db_pwd\u0026gt;\u0026#34;\\n\u0026#34;\u0026lt;rcu_schema_pwd\u0026gt; \u0026gt; /tmp/pwd.txt [oracle@helper ~]$ cat /tmp/pwd.txt where:\n\u0026lt;db_host.domain\u0026gt;:\u0026lt;db_port\u0026gt;/\u0026lt;service_name\u0026gt;\tis your database connect string\n\u0026lt;rcu_schema_prefix\u0026gt; is the RCU schema prefix you want to set\n\u0026lt;db_pwd\u0026gt; is the SYS password for the database\n\u0026lt;rcu_schema_pwd\u0026gt; is the password you want to set for the \u0026lt;rcu_schema_prefix\u0026gt;\nFor example:\n[oracle@helper ~]$ export CONNECTION_STRING=mydatabasehost.example.com:1521/orcl.example.com [oracle@helper ~]$ export RCUPREFIX=OAMK8S [oracle@helper ~]$ echo -e \u0026lt;password\u0026gt;\u0026#34;\\n\u0026#34;\u0026lt;password\u0026gt; \u0026gt; /tmp/pwd.txt [oracle@helper ~]$ cat /tmp/pwd.txt \u0026lt;password\u0026gt; \u0026lt;password\u0026gt; In the helper bash shell run the following command to create the RCU schemas in the database:\n$ [oracle@helper ~]$ /u01/oracle/oracle_common/bin/rcu -silent -createRepository -databaseType ORACLE -connectString \\ $CONNECTION_STRING -dbUser sys -dbRole sysdba -useSamePasswordForAllSchemaUsers true \\ -selectDependentsForComponents true -schemaPrefix $RCUPREFIX -component MDS -component IAU \\ -component IAU_APPEND -component IAU_VIEWER -component OPSS -component WLS -component STB -component OAM -f \u0026lt; /tmp/pwd.txt The output will look similar to the following:\nRCU Logfile: /tmp/RCU2021-11-01_10-29_561898106/logs/rcu.log Processing command line .... Repository Creation Utility - Checking Prerequisites Checking Global Prerequisites Repository Creation Utility - Checking Prerequisites Checking Component Prerequisites Repository Creation Utility - Creating Tablespaces Validating and Creating Tablespaces Create tablespaces in the repository database Repository Creation Utility - Create Repository Create in progress. Executing pre create operations Percent Complete: 18 Percent Complete: 18 Percent Complete: 19 Percent Complete: 20 Percent Complete: 21 Percent Complete: 21 Percent Complete: 22 Percent Complete: 22 Creating Common Infrastructure Services(STB) Percent Complete: 30 Percent Complete: 30 Percent Complete: 39 Percent Complete: 39 Percent Complete: 39 Creating Audit Services Append(IAU_APPEND) Percent Complete: 46 Percent Complete: 46 Percent Complete: 55 Percent Complete: 55 Percent Complete: 55 Creating Audit Services Viewer(IAU_VIEWER) Percent Complete: 62 Percent Complete: 62 Percent Complete: 63 Percent Complete: 63 Percent Complete: 64 Percent Complete: 64 Creating Metadata Services(MDS) Percent Complete: 73 Percent Complete: 73 Percent Complete: 73 Percent Complete: 74 Percent Complete: 74 Percent Complete: 75 Percent Complete: 75 Percent Complete: 75 Creating Weblogic Services(WLS) Percent Complete: 80 Percent Complete: 80 Percent Complete: 83 Percent Complete: 83 Percent Complete: 91 Percent Complete: 98 Percent Complete: 98 Creating Audit Services(IAU) Percent Complete: 100 Creating Oracle Platform Security Services(OPSS) Creating Oracle Access Manager(OAM) Executing post create operations Repository Creation Utility: Create - Completion Summary Database details: ----------------------------- Host Name : mydatabasehost.example.com Port : 1521 Service Name : ORCL.EXAMPLE.COM Connected As : sys Prefix for (prefixable) Schema Owners : OAMK8S RCU Logfile : /tmp/RCU2021-11-01_10-29_561898106/logs/rcu.log Component schemas created: ----------------------------- Component Status Logfile Common Infrastructure Services Success /tmp/RCU2021-11-01_10-29_561898106/logs/stb.log Oracle Platform Security Services Success /tmp/RCU2021-11-01_10-29_561898106/logs/opss.log Oracle Access Manager Success /tmp/RCU2021-11-01_10-29_561898106/logs/oam.log Audit Services Success /tmp/RCU2021-11-01_10-29_561898106/logs/iau.log Audit Services Append Success /tmp/RCU2021-11-01_10-29_561898106/logs/iau_append.log Audit Services Viewer Success /tmp/RCU2021-11-01_10-29_561898106/logs/iau_viewer.log Metadata Services Success /tmp/RCU2021-11-01_10-29_561898106/logs/mds.log WebLogic Services Success /tmp/RCU2021-11-01_10-29_561898106/logs/wls.log Repository Creation Utility - Create : Operation Completed [oracle@helper ~]$ Exit the helper bash shell by issuing the command exit.\n Preparing the environment for domain creation In this section you prepare the environment for the OAM domain creation. This involves the following steps:\na. Creating Kubernetes secrets for the domain and RCU\nb. Create a Kubernetes persistent volume and persistent volume claim\nCreating Kubernetes secrets for the domain and RCU Create a Kubernetes secret for the domain using the create-weblogic-credentials script in the same Kubernetes namespace as the domain:\n$ cd $WORKDIR/kubernetes/create-weblogic-domain-credentials $ ./create-weblogic-credentials.sh -u weblogic -p \u0026lt;pwd\u0026gt; -n \u0026lt;domain_namespace\u0026gt; -d \u0026lt;domain_uid\u0026gt; -s \u0026lt;kubernetes_domain_secret\u0026gt; where:\n-u weblogic is the WebLogic username\n-p \u0026lt;pwd\u0026gt; is the password for the weblogic user\n-n \u0026lt;domain_namespace\u0026gt; is the domain namespace\n-d \u0026lt;domain_uid\u0026gt; is the domain UID to be created. The default is domain1 if not specified\n-s \u0026lt;kubernetes_domain_secret\u0026gt; is the name you want to create for the secret for this namespace. The default is to use the domainUID if not specified\nFor example:\n$ cd $WORKDIR/kubernetes/create-weblogic-domain-credentials $ ./create-weblogic-credentials.sh -u weblogic -p \u0026lt;password\u0026gt; -n oamns -d accessdomain -s accessdomain-credentials The output will look similar to the following:\nsecret/accessdomain-credentials created secret/accessdomain-credentials labeled The secret accessdomain-credentials has been successfully created in the oamns namespace. Verify the secret is created using the following command:\n$ kubectl get secret \u0026lt;kubernetes_domain_secret\u0026gt; -o yaml -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get secret accessdomain-credentials -o yaml -n oamns The output will look similar to the following:\napiVersion: v1 data: password: V2VsY29tZTE= username: d2VibG9naWM= kind: Secret metadata: creationTimestamp: \u0026quot;2021-11-01T10:32:35Z\u0026quot; labels: weblogic.domainName: accessdomain weblogic.domainUID: accessdomain managedFields: - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:data: .: {} f:password: {} f:username: {} f:type: {} manager: kubectl-create operation: Update time: \u0026quot;2021-11-01T10:32:35Z\u0026quot; - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:metadata: f:labels: .: {} f:weblogic.domainName: {} f:weblogic.domainUID: {} manager: kubectl-label operation: Update time: \u0026quot;2021-11-01T10:32:35Z\u0026quot; name: accessdomain-credentials namespace: oamns resourceVersion: \u0026quot;990770\u0026quot; uid: b2ffcd87-8c61-4fb1-805e-3768295982e2 type: Opaque Create a Kubernetes secret for RCU using the create-weblogic-credentials script in the same Kubernetes namespace as the domain:\n$ cd $WORKDIR/kubernetes/create-rcu-credentials $ ./create-rcu-credentials.sh -u \u0026lt;rcu_prefix\u0026gt; -p \u0026lt;rcu_schema_pwd\u0026gt; -a sys -q \u0026lt;sys_db_pwd\u0026gt; -d \u0026lt;domain_uid\u0026gt; -n \u0026lt;domain_namespace\u0026gt; -s \u0026lt;kubernetes_rcu_secret\u0026gt; where:\n-u \u0026lt;rcu_prefix\u0026gt; is the name of the RCU schema prefix created previously\n-p \u0026lt;rcu_schema_pwd\u0026gt; is the password for the RCU schema prefix\n-q \u0026lt;sys_db_pwd\u0026gt; is the sys database password\n-d \u0026lt;domain_uid\u0026gt; is the domain_uid that you created earlier\n-n \u0026lt;domain_namespace\u0026gt; is the domain namespace\n-s \u0026lt;kubernetes_rcu_secret\u0026gt; is the name of the rcu secret to create\nFor example:\n$ cd $WORKDIR/kubernetes/create-rcu-credentials $ ./create-rcu-credentials.sh -u OAMK8S -p \u0026lt;password\u0026gt; -a sys -q \u0026lt;password\u0026gt; -d accessdomain -n oamns -s accessdomain-rcu-credentials The output will look similar to the following:\nsecret/accessdomain-rcu-credentials created secret/accessdomain-rcu-credentials labeled The secret accessdomain-rcu-credentials has been successfully created in the oamns namespace. Verify the secret is created using the following command:\n$ kubectl get secret \u0026lt;kubernetes_rcu_secret\u0026gt; -o yaml -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get secret accessdomain-rcu-credentials -o yaml -n oamns The output will look similar to the following:\napiVersion: v1 data: password: V2VsY29tZTE= sys_password: V2VsY29tZTE= sys_username: c3lz username: T0FNSzhT kind: Secret metadata: creationTimestamp: \u0026quot;2021-11-01T10:33:37Z\u0026quot; labels: weblogic.domainName: accessdomain weblogic.domainUID: accessdomain managedFields: - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:data: .: {} f:password: {} f:sys_password: {} f:sys_username: {} f:username: {} f:type: {} manager: kubectl-create operation: Update time: \u0026quot;2021-11-01T10:33:37Z\u0026quot; - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:metadata: f:labels: .: {} f:weblogic.domainName: {} f:weblogic.domainUID: {} manager: kubectl-label operation: Update time: \u0026quot;2021-11-01T10:33:37Z\u0026quot; name: accessdomain-rcu-credentials namespace: oamns resourceVersion: \u0026quot;992205\u0026quot; uid: ee283fbd-6211-4172-9c28-a65c84ecd794 type: Opaque Create a Kubernetes persistent volume and persistent volume claim A persistent volume is the same as a disk mount but is inside a container. A Kubernetes persistent volume is an arbitrary name (determined in this case, by Oracle) that is mapped to a physical volume on a disk.\nWhen a container is started, it needs to mount that volume. The physical volume should be on a shared disk accessible by all the Kubernetes worker nodes because it is not known on which worker node the container will be started. In the case of Identity and Access Management, the persistent volume does not get erased when a container stops. This enables persistent configurations.\nThe example below uses an NFS mounted volume (/accessdomainpv). Other volume types can also be used. See the official Kubernetes documentation for Volumes.\nTo create a Kubernetes persistent volume, perform the following steps:\n Make a backup copy of the create-pv-pvc-inputs.yaml file and create required directories:\n$ cd $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc $ cp create-pv-pvc-inputs.yaml create-pv-pvc-inputs.yaml.orig $ mkdir output $ mkdir -p /\u0026lt;workdir\u0026gt;/accessdomainpv $ chmod -R 777 /\u0026lt;workdir\u0026gt;/accessdomainpv For example:\n$ cd $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc $ cp create-pv-pvc-inputs.yaml create-pv-pvc-inputs.yaml.orig $ mkdir output $ mkdir -p /scratch/OAMK8S/accessdomainpv $ chmod -R 777 /scratch/OAMK8S/accessdomainpv Note: The persistent volume directory needs to be accessible to both the master and worker node(s) via NFS. Make sure this path has full access permissions, and that the folder is empty. In this example /scratch/OAMK8S/accessdomainpv is accessible from all nodes via NFS.\n On the master node run the following command to ensure it is possible to read and write to the persistent volume:\ncd \u0026lt;workdir\u0026gt;/accessdomainpv touch filemaster.txt ls filemaster.txt For example:\ncd /scratch/OAMK8S/accessdomainpv touch filemaster.txt ls filemaster.txt On the first worker node run the following to ensure it is possible to read and write to the persistent volume:\ncd /scratch/OAMK8S/accessdomainpv ls filemaster.txt touch fileworker1.txt ls fileworker1.txt Repeat the above for any other worker nodes e.g fileworker2.txt etc. Once proven that it\u0026rsquo;s possible to read and write from each node to the persistent volume, delete the files created.\n Edit the create-pv-pvc-inputs.yaml file and update the following parameters to reflect your settings. Save the file when complete:\nbaseName: \u0026lt;domain\u0026gt; domainUID: \u0026lt;domain_uid\u0026gt; namespace: \u0026lt;domain_namespace\u0026gt; weblogicDomainStorageType: NFS weblogicDomainStorageNFSServer: \u0026lt;nfs_server\u0026gt; weblogicDomainStoragePath: \u0026lt;physical_path_of_persistent_storage\u0026gt; weblogicDomainStorageSize: 10Gi For example:\n # The base name of the pv and pvc baseName: domain # Unique ID identifying a domain. # If left empty, the generated pv can be shared by multiple domains # This ID must not contain an underscope (\u0026quot;_\u0026quot;), and must be lowercase and unique across all domains in a Kubernetes cluster. domainUID: accessdomain # Name of the namespace for the persistent volume claim namespace: oamns ... # Persistent volume type for the persistent storage. # The value must be 'HOST_PATH' or 'NFS'. # If using 'NFS', weblogicDomainStorageNFSServer must be specified. weblogicDomainStorageType: NFS # The server name or ip address of the NFS server to use for the persistent storage. # The following line must be uncomment and customized if weblogicDomainStorateType is NFS: weblogicDomainStorageNFSServer: mynfsserver # Physical path of the persistent storage. # When weblogicDomainStorageType is set to HOST_PATH, this value should be set the to path to the # domain storage on the Kubernetes host. # When weblogicDomainStorageType is set to NFS, then weblogicDomainStorageNFSServer should be set # to the IP address or name of the DNS server, and this value should be set to the exported path # on that server. # Note that the path where the domain is mounted in the WebLogic containers is not affected by this # setting, that is determined when you create your domain. # The following line must be uncomment and customized: weblogicDomainStoragePath: /scratch/OAMK8S/accessdomainpv # Reclaim policy of the persistent storage # The valid values are: 'Retain', 'Delete', and 'Recycle' weblogicDomainStorageReclaimPolicy: Retain # Total storage allocated to the persistent storage. weblogicDomainStorageSize: 10Gi Execute the create-pv-pvc.sh script to create the PV and PVC configuration files:\n$ ./create-pv-pvc.sh -i create-pv-pvc-inputs.yaml -o output The output will be similar to the following:\nInput parameters being used export version=\u0026quot;create-weblogic-sample-domain-pv-pvc-inputs-v1\u0026quot; export baseName=\u0026quot;domain\u0026quot; export domainUID=\u0026quot;accessdomain\u0026quot; export namespace=\u0026quot;oamns\u0026quot; export weblogicDomainStorageType=\u0026quot;NFS\u0026quot; export weblogicDomainStorageNFSServer=\u0026quot;mynfsserver\u0026quot; export weblogicDomainStoragePath=\u0026quot;/scratch/OAMK8S/accessdomainpv\u0026quot; export weblogicDomainStorageReclaimPolicy=\u0026quot;Retain\u0026quot; export weblogicDomainStorageSize=\u0026quot;10Gi\u0026quot; Generating output/pv-pvcs/accessdomain-weblogic-sample-pv.yaml Generating output/pv-pvcs/accessdomain-weblogic-sample-pvc.yaml The following files were generated: output/pv-pvcs/accessdomain-weblogic-sample-pv.yaml output/pv-pvcs/accessdomain-weblogic-sample-pvc.yaml Run the following to show the files are created:\n$ ls output/pv-pvcs accessdomain-domain-pv.yaml accessdomain-domain-pvc.yaml create-pv-pvc-inputs.yaml Run the following kubectl command to create the PV and PVC in the domain namespace:\n$ kubectl create -f output/pv-pvcs/accessdomain-domain-pv.yaml -n \u0026lt;domain_namespace\u0026gt; $ kubectl create -f output/pv-pvcs/accessdomain-domain-pvc.yaml -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl create -f output/pv-pvcs/accessdomain-domain-pv.yaml -n oamns $ kubectl create -f output/pv-pvcs/accessdomain-domain-pvc.yaml -n oamns The output will look similar to the following:\npersistentvolume/accessdomain-domain-pv created persistentvolumeclaim/accessdomain-domain-pvc created Run the following commands to verify the PV and PVC were created successfully:\n$ kubectl describe pv \u0026lt;pv_name\u0026gt; $ kubectl describe pvc \u0026lt;pvc_name\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl describe pv accessdomain-domain-pv $ kubectl describe pvc accessdomain-domain-pvc -n oamns The output will look similar to the following:\n$ kubectl describe pv accessdomain-domain-pv Name: accessdomain-domain-pv Labels: weblogic.domainUID=accessdomain Annotations: pv.kubernetes.io/bound-by-controller: yes Finalizers: [kubernetes.io/pv-protection] StorageClass: accessdomain-domain-storage-class Status: Bound Claim: oamns/accessdomain-domain-pvc Reclaim Policy: Retain Access Modes: RWX VolumeMode: Filesystem Capacity: 10Gi Node Affinity: \u0026lt;none\u0026gt; Message: Source: Type: NFS (an NFS mount that lasts the lifetime of a pod) Server: mynfsserver Path: /scratch/OAMK8S/accessdomainpv ReadOnly: false Events: \u0026lt;none\u0026gt; $ kubectl describe pvc accessdomain-domain-pvc -n oamns Name: accessdomain-domain-pvc Namespace: oamns StorageClass: accessdomain-domain-storage-class Status: Bound Volume: accessdomain-domain-pv Labels: weblogic.domainUID=accessdomain Annotations: pv.kubernetes.io/bind-completed: yes pv.kubernetes.io/bound-by-controller: yes Finalizers: [kubernetes.io/pvc-protection] Capacity: 10Gi Access Modes: RWX VolumeMode: Filesystem Events: \u0026lt;none\u0026gt; Mounted By: \u0026lt;none\u0026gt; You are now ready to create the OAM domain as per Create OAM Domains\n " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oig/prepare-your-environment/", + "title": "Prepare your environment", + "tags": [], + "description": "Preparation to deploy OIG on Kubernetes", + "content": " Set up your Kubernetes cluster\n Install Helm\n Check the Kubernetes cluster is ready\n Install the OIG Docker image\n Install the WebLogic Kubernetes Operator Docker Image\n Setup the code repository to deploy OIG domains\n Install the WebLogic Kubernetes Operator\n Create a namespace for Oracle Identity Governance\n RCU schema creation\n Preparing the environment for domain creation\na. Creating Kubernetes secrets for the domain and RCU\nb. Create a Kubernetes persistent volume and persistent volume claim\n Set up your Kubernetes cluster If you need help setting up a Kubernetes environment, refer to the official Kubernetes documentation to set up a production grade Kubernetes cluster.\nIt is recommended you have a master node and one or more worker nodes. The examples in this documentation assume one master and two worker nodes.\nVerify that the system clocks on each host computer are synchronized. You can do this by running the date command simultaneously on all the hosts in each cluster.\nAfter creating Kubernetes clusters, you can optionally:\n Configure an Ingress to direct traffic to backend domains. Configure Kibana and Elasticsearch for your operator logs. Install Helm As per the prerequisites an installation of Helm is required to create and deploy the necessary resources and then run the operator in a Kubernetes cluster. For Helm installation and usage information, refer to the README.\nCheck the Kubernetes cluster is ready Run the following command on the master node to check the cluster and worker nodes are running:\n$ kubectl get nodes,pods -n kube-system The output will look similar to the following:\nNAME STATUS ROLES AGE VERSION node/worker-node1 Ready \u0026lt;none\u0026gt; 17h v1.20.10 node/worker-node2 Ready \u0026lt;none\u0026gt; 17h v1.20.10 node/master-node Ready master 23h v1.20.10 NAME READY STATUS RESTARTS AGE pod/coredns-66bff467f8-fnhbq 1/1 Running 0 23h pod/coredns-66bff467f8-xtc8k 1/1 Running 0 23h pod/etcd-master 1/1 Running 0 21h pod/kube-apiserver-master-node 1/1 Running 0 21h pod/kube-controller-manager-master-node 1/1 Running 0 21h pod/kube-flannel-ds-amd64-lxsfw 1/1 Running 0 17h pod/kube-flannel-ds-amd64-pqrqr 1/1 Running 0 17h pod/kube-flannel-ds-amd64-wj5nh 1/1 Running 0 17h pod/kube-proxy-2kxv2 1/1 Running 0 17h pod/kube-proxy-82vvj 1/1 Running 0 17h pod/kube-proxy-nrgw9 1/1 Running 0 23h pod/kube-scheduler-master 1/1 Running 0 21$ Install the OIG Docker Image You can deploy OIG Docker images in the following ways:\n Download a prebuilt OIG Docker image from My Oracle Support by referring to the document ID 2723908.1. This image is prebuilt by Oracle and includes Oracle Identity Governance 12.2.1.4.0 and the latest PSU.\n Build your own OIG image using the WebLogic Image Tool or by using the dockerfile, scripts and base images from Oracle Container Registry (OCR). You can also build your own image by using only the dockerfile and scripts. For more information about the various ways in which you can build your own container image, see Building the OIG Docker Image.\n Choose one of these options based on your requirements.\nThe OIG Docker image must be installed on the master node and each of the worker nodes in your Kubernetes cluster. Alternatively you can place the image in a Docker registry that your cluster can access.\n After installing the OIG Docker image run the following command to make sure the image is installed correctly on the master and worker nodes:\n$ docker images The output will look similar to the following:\nREPOSITORY TAG IMAGE ID CREATED SIZE oracle/oig 12.2.1.4.0-8-ol7-211022.0723 f05f3b63c9e8 2 weeks ago 4.43GB quay.io/coreos/flannel v0.15.0 09b38f011a29 6 days ago 69.5MB rancher/mirrored-flannelcni-flannel-cni-plugin v1.2 98660e6e4c3a 13 days ago 8.98MB k8s.gcr.io/kube-proxy v1.20.10 945c9bce487a 2 months ago 99.7MB k8s.gcr.io/kube-controller-manager v1.20.10 2f450864515d 2 months ago 116MB k8s.gcr.io/kube-apiserver v1.20.10 644cadd07add 2 months ago 122MB k8s.gcr.io/kube-scheduler v1.20.10 4c9be8dc650b 2 months ago 47.3MB k8s.gcr.io/etcd 3.4.13-0 0369cf4303ff 14 months ago 253MB k8s.gcr.io/coredns 1.7.0 bfe3a36ebd25 16 months ago 45.2MB k8s.gcr.io/pause 3.2 80d28bedfe5d 20 months ago Install the WebLogic Kubernetes Operator Docker Image The WebLogic Kubernetes Operator Docker image must be installed on the master node and each of the worker nodes in your Kubernetes cluster. Alternatively you can place the image in a Docker registry that your cluster can access.\n Pull the Oracle WebLogic Server Kubernetes Operator image by running the following command on the master node:\n$ docker pull ghcr.io/oracle/weblogic-kubernetes-operator:3.3.0 The output will look similar to the following:\nTrying to pull repository ghcr.io/oracle/weblogic-kubernetes-operator ... 3.3.0: Pulling from ghcr.io/oracle/weblogic-kubernetes-operator c828c776e142: Pull complete 175676c54fa1: Pull complete b3231f480c32: Pull complete ea4423fa8daa: Pull complete f3ca38f7f95f: Pull complete effd851583ec: Pull complete 4f4fb700ef54: Pull complete Digest: sha256:3e93848ad2f5b272c88680e7b37a4ee428dd12e4c4c91af6977fd2fa9ec1f9dc Status: Downloaded newer image for ghcr.io/oracle/weblogic-kubernetes-operator:3.3.0 ghcr.io/oracle/weblogic-kubernetes-operator:3.3.0 Run the docker tag command as follows:\n$ docker tag ghcr.io/oracle/weblogic-kubernetes-operator:3.3.0 weblogic-kubernetes-operator:3.3.0 After installing the Oracle WebLogic Kubernetes Operator image, repeat the above on the worker nodes.\n Setup the Code Repository to Deploy OIG Domains Oracle Identity Governance domain deployment on Kubernetes leverages the WebLogic Kubernetes Operator infrastructure. For deploying the OIG domains, you need to set up the deployment scripts on the master node as below:\n Create a working directory to setup the source code.\n$ mkdir \u0026lt;workdir\u0026gt; For example:\n$ mkdir /scratch/OIGK8S Download the latest OIG deployment scripts from the OIG repository.\n$ cd \u0026lt;workdir\u0026gt; $ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/21.4.2 For example:\n$ cd /scratch/OIGK8S $ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/21.4.2 Set the $WORKDIR environment variable as follows:\n$ export WORKDIR=\u0026lt;workdir\u0026gt;/fmw-kubernetes/OracleIdentityGovernance For example:\n$ export WORKDIR=/scratch/OIGK8S/fmw-kubernetes/OracleIdentityGovernance Run the following command and see if the WebLogic custom resource definition name already exists:\n$ kubectl get crd In the output you should see:\nNo resources found in default namespace. If you see the following:\nNAME AGE domains.weblogic.oracle 5d then run the following command to delete the existing crd:\n$ kubectl delete crd domains.weblogic.oracle customresourcedefinition.apiextensions.k8s.io \u0026#34;domains.weblogic.oracle\u0026#34; deleted Install the WebLogic Kubernetes Operator On the master node run the following command to create a namespace for the operator:\n$ kubectl create namespace \u0026lt;sample-kubernetes-operator-ns\u0026gt; For example:\n$ kubectl create namespace opns The output will look similar to the following:\nnamespace/opns created Create a service account for the operator in the operator\u0026rsquo;s namespace by running the following command:\n$ kubectl create serviceaccount -n \u0026lt;sample-kubernetes-operator-ns\u0026gt; \u0026lt;sample-kubernetes-operator-sa\u0026gt; For example:\n$ kubectl create serviceaccount -n opns op-sa The output will look similar to the following:\nserviceaccount/op-sa created If you want to setup logging and visualisation with Elasticsearch and Kibana (post domain creation) edit the $WORKDIR/kubernetes/charts/weblogic-operator/values.yaml and set the parameter elkIntegrationEnabled to true and make sure the following parameters are set:\n# elkIntegrationEnabled specifies whether or not ELK integration is enabled. elkIntegrationEnabled: true # logStashImage specifies the docker image containing logstash. # This parameter is ignored if 'elkIntegrationEnabled' is false. logStashImage: \u0026quot;logstash:6.6.0\u0026quot; # elasticSearchHost specifies the hostname of where elasticsearch is running. # This parameter is ignored if 'elkIntegrationEnabled' is false. elasticSearchHost: \u0026quot;elasticsearch.default.svc.cluster.local\u0026quot; # elasticSearchPort specifies the port number of where elasticsearch is running. # This parameter is ignored if 'elkIntegrationEnabled' is false. elasticSearchPort: 9200 After the domain creation see Logging and Visualization in order to complete the setup of Elasticsearch and Kibana.\n Run the following helm command to install and start the operator:\n$ cd $WORKDIR $ helm install weblogic-kubernetes-operator kubernetes/charts/weblogic-operator \\ --namespace \u0026lt;sample-kubernetes-operator-ns\u0026gt; \\ --set image=weblogic-kubernetes-operator:3.3.0 \\ --set serviceAccount=\u0026lt;sample-kubernetes-operator-sa\u0026gt; \\ --set “enableClusterRoleBinding=true” \\ --set \u0026#34;domainNamespaceSelectionStrategy=LabelSelector\u0026#34; \\ --set \u0026#34;domainNamespaceLabelSelector=weblogic-operator\\=enabled\u0026#34; \\ --set \u0026#34;javaLoggingLevel=FINE\u0026#34; --wait For example:\n$ cd $WORKDIR $ helm install weblogic-kubernetes-operator kubernetes/charts/weblogic-operator \\ --namespace opns \\ --set image=weblogic-kubernetes-operator:3.3.0 \\ --set serviceAccount=op-sa \\ --set \u0026#34;enableClusterRoleBinding=true\u0026#34; \\ --set \u0026#34;domainNamespaceSelectionStrategy=LabelSelector\u0026#34; \\ --set \u0026#34;domainNamespaceLabelSelector=weblogic-operator\\=enabled\u0026#34; \\ --set \u0026#34;javaLoggingLevel=FINE\u0026#34; --wait The output will look similar to the following:\nNAME: weblogic-kubernetes-operator LAST DEPLOYED: Thu Nov 11 09:02:50 2021 NAMESPACE: opns STATUS: deployed REVISION: 1 TEST SUITE: None Verify that the operator\u0026rsquo;s pod and services are running by executing the following command:\n$ kubectl get all -n \u0026lt;sample-kubernetes-operator-ns\u0026gt; For example:\n$ kubectl get all -n opns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE pod/weblogic-operator-676d5cc6f4-rwzxf 2/2 Running 0 59s NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/internal-weblogic-operator-svc ClusterIP 10.102.7.232 \u0026lt;none\u0026gt; 8082/TCP 59s NAME READY UP-TO-DATE AVAILABLE AGE deployment.apps/weblogic-operator 1/1 1 1 59s NAME DESIRED CURRENT READY AGE replicaset.apps/weblogic-operator-676d5cc6f4 1 1 1 59s Verify the operator pod\u0026rsquo;s log:\n$ kubectl logs -n \u0026lt;sample-kubernetes-operator-ns\u0026gt; -c weblogic-operator deployments/weblogic-operator For example:\n$ kubectl logs -n opns -c weblogic-operator deployments/weblogic-operator The output will look similar to the following:\n{\u0026quot;timestamp\u0026quot;:\u0026quot;2021-11-11T17:04:53.167756673Z\u0026quot;,\u0026quot;thread\u0026quot;:23,\u0026quot;fiber\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;namespace\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;domainUID\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;level\u0026quot;:\u0026quot;CONFIG\u0026quot;,\u0026quot;class\u0026quot;:\u0026quot;oracle.kubernetes.operator.TuningParametersImpl\u0026quot;,\u0026quot;method\u0026quot;:\u0026quot;update\u0026quot;,\u0026quot;timeInMillis\u0026quot;:1636650293167,\u0026quot;message\u0026quot;:\u0026quot;Reloading tuning parameters from Operator's config map\u0026quot;,\u0026quot;exception\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;code\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;headers\u0026quot;:{},\u0026quot;body\u0026quot;:\u0026quot;\u0026quot;} {\u0026quot;timestamp\u0026quot;:\u0026quot;2021-11-11T17:05:03.170083172Z\u0026quot;,\u0026quot;thread\u0026quot;:30,\u0026quot;fiber\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;namespace\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;domainUID\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;level\u0026quot;:\u0026quot;CONFIG\u0026quot;,\u0026quot;class\u0026quot;:\u0026quot;oracle.kubernetes.operator.TuningParametersImpl\u0026quot;,\u0026quot;method\u0026quot;:\u0026quot;update\u0026quot;,\u0026quot;timeInMillis\u0026quot;:1636650303170,\u0026quot;message\u0026quot;:\u0026quot;Reloading tuning parameters from Operator's config map\u0026quot;,\u0026quot;exception\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;code\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;headers\u0026quot;:{},\u0026quot;body\u0026quot;:\u0026quot;\u0026quot;} {\u0026quot;timestamp\u0026quot;:\u0026quot;2021-11-11T17:05:13.172302644Z\u0026quot;,\u0026quot;thread\u0026quot;:29,\u0026quot;fiber\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;namespace\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;domainUID\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;level\u0026quot;:\u0026quot;CONFIG\u0026quot;,\u0026quot;class\u0026quot;:\u0026quot;oracle.kubernetes.operator.TuningParametersImpl\u0026quot;,\u0026quot;method\u0026quot;:\u0026quot;update\u0026quot;,\u0026quot;timeInMillis\u0026quot;:1636650313172,\u0026quot;message\u0026quot;:\u0026quot;Reloading tuning parameters from Operator's config map\u0026quot;,\u0026quot;exception\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;code\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;headers\u0026quot;:{},\u0026quot;body\u0026quot;:\u0026quot;\u0026quot;} Create a namespace for Oracle Identity Governance Run the following command to create a namespace for the domain:\n$ kubectl create namespace \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl create namespace oigns The output will look similar to the following:\nnamespace/oigns created Run the following command to tag the namespace so the WebLogic Kubernetes Operator can manage it:\n$ kubectl label namespaces \u0026lt;domain_namespace\u0026gt; weblogic-operator=enabled For example:\n$ kubectl label namespaces oigns weblogic-operator=enabled The output will look similar to the following:\nnamespace/oigns labeled Run the following command to check the label was created:\n$ kubectl describe namespace \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl describe namespace oigns The output will look similar to the following:\nName: oigns Labels: weblogic-operator=enabled Annotations: \u0026lt;none\u0026gt; Status: Active No resource quota. No LimitRange resource. RCU schema creation In this section you create the RCU schemas in the Oracle Database.\nBefore following the steps in this section, make sure that the database and listener are up and running and you can connect to the database via SQL*Plus or other client tool.\n Run the following command to create a helper pod:\n$ kubectl run helper --image \u0026lt;image_name\u0026gt; -n \u0026lt;domain_namespace\u0026gt; -- sleep infinity For example:\n$ kubectl run helper --image oracle/oig:12.2.1.4.0-8-ol7-211022.0723 -n oigns -- sleep infinity The output will look similar to the following:\npod/helper created Run the following command to start a bash shell in the helper pod:\n$ kubectl exec -it helper -n \u0026lt;domain_namespace\u0026gt; -- /bin/bash For example:\n$ kubectl exec -it helper -n oigns -- /bin/bash This will take you into a bash shell in the running helper pod:\n[oracle@helper oracle]$ In the helper bash shell run the following commands to set the environment:\n[oracle@helper oracle]$ export DB_HOST=\u0026lt;db_host.domain\u0026gt; [oracle@helper oracle]$ export DB_PORT=\u0026lt;db_port\u0026gt; [oracle@helper oracle]$ export DB_SERVICE=\u0026lt;service_name\u0026gt; [oracle@helper oracle]$ export RCUPREFIX=\u0026lt;rcu_schema_prefix\u0026gt; [oracle@helper oracle]$ export RCU_SCHEMA_PWD=\u0026lt;rcu_schema_pwd\u0026gt; [oracle@helper oracle]$ echo -e \u0026lt;db_pwd\u0026gt;\u0026#34;\\n\u0026#34;\u0026lt;rcu_schema_pwd\u0026gt; \u0026gt; /tmp/pwd.txt [oracle@helper oracle]$ cat /tmp/pwd.txt where:\n\u0026lt;db_host.domain\u0026gt; is the database server hostname\n\u0026lt;db_port\u0026gt; is the database listener port\n\u0026lt;service_name\u0026gt; is the database service name\n\u0026lt;rcu_schema_prefix\u0026gt; is the RCU schema prefix you want to set\n\u0026lt;rcu_schema_pwd\u0026gt; is the password you want to set for the \u0026lt;rcu_schema_prefix\u0026gt;\n\u0026lt;db_pwd\u0026gt; is the SYS password for the database\nFor example:\n[oracle@helper oracle]$ export DB_HOST=mydatabasehost.example.com [oracle@helper oracle]$ export DB_PORT=1521 [oracle@helper oracle]$ export DB_SERVICE=orcl.example.com [oracle@helper oracle]$ export RCUPREFIX=OIGK8S [oracle@helper oracle]$ export RCU_SCHEMA_PWD=\u0026lt;password\u0026gt; [oracle@helper oracle]$ echo -e \u0026lt;password\u0026gt;\u0026#34;\\n\u0026#34;\u0026lt;password\u0026gt; \u0026gt; /tmp/pwd.txt [oracle@helper oracle]$ cat /tmp/pwd.txt \u0026lt;password\u0026gt; \u0026lt;password\u0026gt; In the helper bash shell run the following commands to create the RCU schemas in the database:\n[oracle@helper oracle]$ /u01/oracle/oracle_common/bin/rcu -silent -createRepository -databaseType ORACLE -connectString \\ $DB_HOST:$DB_PORT/$DB_SERVICE -dbUser sys -dbRole sysdba -useSamePasswordForAllSchemaUsers true \\ -selectDependentsForComponents true -schemaPrefix $RCUPREFIX -component OIM -component MDS -component SOAINFRA -component OPSS \\ -f \u0026lt; /tmp/pwd.txt The output will look similar to the following:\nRCU Logfile: /tmp/RCU2020-09-29_10-51_508080961/logs/rcu.log Processing command line .... Repository Creation Utility - Checking Prerequisites Checking Global Prerequisites Repository Creation Utility - Checking Prerequisites Checking Component Prerequisites Repository Creation Utility - Creating Tablespaces Validating and Creating Tablespaces Create tablespaces in the repository database Repository Creation Utility - Create Repository Create in progress. Percent Complete: 10 Executing pre create operations Percent Complete: 25 Percent Complete: 25 Percent Complete: 26 Percent Complete: 27 Percent Complete: 28 Percent Complete: 28 Percent Complete: 29 Percent Complete: 29 Creating Common Infrastructure Services(STB) Percent Complete: 36 Percent Complete: 36 Percent Complete: 44 Percent Complete: 44 Percent Complete: 44 Creating Audit Services Append(IAU_APPEND) Percent Complete: 51 Percent Complete: 51 Percent Complete: 59 Percent Complete: 59 Percent Complete: 59 Creating Audit Services Viewer(IAU_VIEWER) Percent Complete: 66 Percent Complete: 66 Percent Complete: 67 Percent Complete: 67 Percent Complete: 68 Percent Complete: 68 Creating Metadata Services(MDS) Percent Complete: 76 Percent Complete: 76 Percent Complete: 76 Percent Complete: 77 Percent Complete: 77 Percent Complete: 78 Percent Complete: 78 Percent Complete: 78 Creating Weblogic Services(WLS) Percent Complete: 82 Percent Complete: 82 Percent Complete: 83 Percent Complete: 84 Percent Complete: 86 Percent Complete: 88 Percent Complete: 88 Percent Complete: 88 Creating User Messaging Service(UCSUMS) Percent Complete: 92 Percent Complete: 92 Percent Complete: 95 Percent Complete: 95 Percent Complete: 100 Creating Audit Services(IAU) Creating Oracle Platform Security Services(OPSS) Creating SOA Infrastructure(SOAINFRA) Creating Oracle Identity Manager(OIM) Executing post create operations Repository Creation Utility: Create - Completion Summary Database details: ----------------------------- Host Name : mydatabasehost.example.com Port : 1521 Service Name : ORCL.EXAMPLE.COM Connected As : sys Prefix for (prefixable) Schema Owners : OIGK8S RCU Logfile : /tmp/RCU2021-11-11_17-16_464189537/logs/rcu.log Component schemas created: ----------------------------- Component Status Logfile Common Infrastructure Services Success /tmp/RCU2021-11-11_17-16_464189537/logs/stb.log Oracle Platform Security Services Success /tmp/RCU2021-11-11_17-16_464189537/logs/opss.log SOA Infrastructure Success /tmp/RCU2021-11-11_17-16_464189537/logs/soainfra.log Oracle Identity Manager Success /tmp/RCU2021-11-11_17-16_464189537/logs/oim.log User Messaging Service Success /tmp/RCU2021-11-11_17-16_464189537/logs/ucsums.log Audit Services Success /tmp/RCU2021-11-11_17-16_464189537/logs/iau.log Audit Services Append Success /tmp/RCU2021-11-11_17-16_464189537/logs/iau_append.log Audit Services Viewer Success /tmp/RCU2021-11-11_17-16_464189537/logs/iau_viewer.log Metadata Services Success /tmp/RCU2021-11-11_17-16_464189537/logs/mds.log WebLogic Services Success /tmp/RCU2021-11-11_17-16_464189537/logs/wls.log Repository Creation Utility - Create : Operation Completed [oracle@helper oracle]$ Run the following command to patch schemas in the database:\nThis command should be run if you are using an OIG image that contains OIG bundle patches. If using an OIG image without OIG bundle patches, then you can skip this step.\n [oracle@helper oracle]$ /u01/oracle/oracle_common/modules/thirdparty/org.apache.ant/1.10.5.0.0/apache-ant-1.10.5/bin/ant \\ -f /u01/oracle/idm/server/setup/deploy-files/automation.xml \\ run-patched-sql-files \\ -logger org.apache.tools.ant.NoBannerLogger \\ -logfile /u01/oracle/idm/server/bin/patch_oim_wls.log \\ -DoperationsDB.host=$DB_HOST \\ -DoperationsDB.port=$DB_PORT \\ -DoperationsDB.serviceName=$DB_SERVICE \\ -DoperationsDB.user=${RCUPREFIX}_OIM \\ -DOIM.DBPassword=$RCU_SCHEMA_PWD \\ -Dojdbc=/u01/oracle/oracle_common/modules/oracle.jdbc/ojdbc8.jar The output will look similar to the following:\nBuildfile: /u01/oracle/idm/server/setup/deploy-files/automation.xml Verify the database was patched successfully by viewing the patch_oim_wls.log:\n[oracle@helper oracle]$ cat /u01/oracle/idm/server/bin/patch_oim_wls.log The output should look similar to below:\n... run-patched-sql-files: [sql] Executing resource: /u01/oracle/idm/server/db/oim/oracle/StoredProcedures/API/oim_role_mgmt_pkg_body.sql [sql] Executing resource: /u01/oracle/idm/server/db/oim/oracle/Upgrade/oim12cps4/list/oim12cps4_dml_pty_insert_sysprop_ssointg_grprecon_matching_rolename.sql [sql] Executing resource: /u01/oracle/idm/server/db/oim/oracle/Upgrade/oim12cps4/list/oim12cps4_dml_pty_insert_sysprop_oimadpswdpolicy.sql etc... [sql] 34 of 34 SQL statements executed successfully BUILD SUCCESSFUL Total time: 5 second Exit the helper bash shell by issuing the command exit.\n Preparing the environment for domain creation In this section you prepare the environment for the OIG domain creation. This involves the following steps:\na. Creating Kubernetes secrets for the domain and RCU\nb. Create a Kubernetes persistent volume and persistent volume claim\nCreating Kubernetes secrets for the domain and RCU Create a Kubernetes secret for the domain using the create-weblogic-credentials script in the same Kubernetes namespace as the domain:\n$ cd $WORKDIR/kubernetes/create-weblogic-domain-credentials $ ./create-weblogic-credentials.sh -u weblogic -p \u0026lt;pwd\u0026gt; -n \u0026lt;domain_namespace\u0026gt; -d \u0026lt;domain_uid\u0026gt; -s \u0026lt;kubernetes_domain_secret\u0026gt; where:\n-u weblogic is the WebLogic username\n-p \u0026lt;pwd\u0026gt; is the password for the WebLogic user\n-n \u0026lt;domain_namespace\u0026gt; is the domain namespace\n-d \u0026lt;domain_uid\u0026gt; is the domain UID to be created. The default is domain1 if not specified\n-s \u0026lt;kubernetes_domain_secret\u0026gt; is the name you want to create for the secret for this namespace. The default is to use the domainUID if not specified\nFor example:\n$ cd $WORKDIR/kubernetes/create-weblogic-domain-credentials $ ./create-weblogic-credentials.sh -u weblogic -p \u0026lt;password\u0026gt; -n oigns -d governancedomain -s oig-domain-credentials The output will look similar to the following:\nsecret/oig-domain-credentials created secret/oig-domain-credentials labeled The secret oig-domain-credentials has been successfully created in the oigns namespace. Verify the secret is created using the following command:\n$ kubectl get secret \u0026lt;kubernetes_domain_secret\u0026gt; -o yaml -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get secret oig-domain-credentials -o yaml -n oigns The output will look similar to the following:\n$ kubectl get secret oig-domain-credentials -o yaml -n oigns apiVersion: v1 data: password: V2VsY29tZTE= username: d2VibG9naWM= kind: Secret metadata: creationTimestamp: \u0026quot;2021-11-12T10:37:43Z\u0026quot; labels: weblogic.domainName: governancedomain weblogic.domainUID: governancedomain managedFields: - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:data: .: {} f:password: {} f:username: {} f:metadata: f:labels: .: {} f:weblogic.domainName: {} f:weblogic.domainUID: {} f:type: {} manager: kubectl operation: Update time: \u0026quot;2021-11-12T10:37:43Z\u0026quot; name: oig-domain-credentials namespace: oigns resourceVersion: \u0026quot;1249007\u0026quot; selfLink: /api/v1/namespaces/oigns/secrets/oig-domain-credentials uid: 4ade08f3-7b11-4bb0-9340-7304a2ef9b64 type: Opaque Create a Kubernetes secret for RCU in the same Kubernetes namespace as the domain, using the create-weblogic-credentials.sh script:\n$ cd $WORKDIR/kubernetes/create-rcu-credentials $ ./create-rcu-credentials.sh -u \u0026lt;rcu_prefix\u0026gt; -p \u0026lt;rcu_schema_pwd\u0026gt; -a sys -q \u0026lt;sys_db_pwd\u0026gt; -d \u0026lt;domain_uid\u0026gt; -n \u0026lt;domain_namespace\u0026gt; -s \u0026lt;kubernetes_rcu_secret\u0026gt; where:\n-u \u0026lt;rcu_prefix\u0026gt; is the name of the RCU schema prefix created previously\n-p \u0026lt;rcu_schema_pwd\u0026gt; is the password for the RCU schema prefix\n-q \u0026lt;sys_db_pwd\u0026gt; is the sys database password\n-d \u0026lt;domain_uid\u0026gt; is the domain_uid that you created earlier\n-n \u0026lt;domain_namespace\u0026gt; is the domain namespace\n-s \u0026lt;kubernetes_rcu_secret\u0026gt; is the name of the rcu secret to create\nFor example:\n$ cd $WORKDIR/kubernetes/create-rcu-credentials $ ./create-rcu-credentials.sh -u OIGK8S -p \u0026lt;password\u0026gt; -a sys -q \u0026lt;password\u0026gt; -d governancedomain -n oigns -s oig-rcu-credentials The output will look similar to the following:\nsecret/oig-rcu-credentials created secret/oig-rcu-credentials labeled The secret oig-rcu-credentials has been successfully created in the oigns namespace. Verify the secret is created using the following command:\n$ kubectl get secret \u0026lt;kubernetes_rcu_secret\u0026gt; -o yaml -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get secret oig-rcu-credentials -o yaml -n oigns The output will look similar to the following:\napiVersion: v1 data: password: V2VsY29tZTE= sys_password: V2VsY29tZTE= sys_username: c3lz username: T0lHSzhT kind: Secret metadata: creationTimestamp: \u0026quot;2021-11-12T10:39:24Z\u0026quot; labels: weblogic.domainName: governancedomain weblogic.domainUID: governancedomain managedFields: - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:data: .: {} f:password: {} f:sys_password: {} f:sys_username: {} f:username: {} f:metadata: f:labels: .: {} f:weblogic.domainName: {} f:weblogic.domainUID: {} f:type: {} manager: kubectl operation: Update time: \u0026quot;2021-11-12T10:39:24Z\u0026quot; name: oig-rcu-credentials namespace: oigns resourceVersion: \u0026quot;1251020\u0026quot; selfLink: /api/v1/namespaces/oigns/secrets/oig-rcu-credentials uid: aee4213e-ffe2-45a6-9b96-11c4e88d12f2 type: Opaque Create a Kubernetes persistent volume and persistent volume claim In the Kubernetes domain namespace created above, create the persistent volume (PV) and persistent volume claim (PVC) by running the create-pv-pvc.sh script.\n Make a backup copy of the create-pv-pvc-inputs.yaml file and create required directories:\n$ cd $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc $ cp create-pv-pvc-inputs.yaml create-pv-pvc-inputs.yaml.orig $ mkdir output $ mkdir -p \u0026lt;workdir\u0026gt;/governancedomainpv $ chmod -R 777 \u0026lt;workdir\u0026gt;/governancedomainpv For example:\n$ cd $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc $ cp create-pv-pvc-inputs.yaml create-pv-pvc-inputs.yaml.orig $ mkdir output $ mkdir -p /scratch/OIGK8S/governancedomainpv $ chmod -R 777 /scratch/OIGK8S/governancedomainpv Note: The persistent volume directory needs to be accessible to both the master and worker node(s) via NFS. Make sure this path has full access permissions, and that the folder is empty. In this example /scratch/OIGK8S/governancedomainpv is accessible from all nodes via NFS.\n On the master node run the following command to ensure it is possible to read and write to the persistent volume:\ncd \u0026lt;workdir\u0026gt;/governancedomainpv touch file.txt ls filemaster.txt For example:\ncd /scratch/OIGK8S/governancedomainpv touch filemaster.txt ls filemaster.txt On the first worker node run the following to ensure it is possible to read and write to the persistent volume:\ncd /scratch/OIGK8S/governancedomainpv ls filemaster.txt touch fileworker1.txt ls fileworker1.txt Repeat the above for any other worker nodes e.g fileworker2.txt etc. Once proven that it\u0026rsquo;s possible to read and write from each node to the persistent volume, delete the files created.\n Edit the create-pv-pvc-inputs.yaml file and update the following parameters to reflect your settings. Save the file when complete:\nbaseName: \u0026lt;domain\u0026gt; domainUID: \u0026lt;domain_uid\u0026gt; namespace: \u0026lt;domain_namespace\u0026gt; weblogicDomainStorageType: NFS weblogicDomainStorageNFSServer: \u0026lt;nfs_server\u0026gt; weblogicDomainStoragePath: \u0026lt;physical_path_of_persistent_storage\u0026gt; weblogicDomainStorageSize: 10Gi For example:\n# The base name of the pv and pvc baseName: domain # Unique ID identifying a domain. # If left empty, the generated pv can be shared by multiple domains # This ID must not contain an underscope (\u0026quot;_\u0026quot;), and must be lowercase and unique across all domains in a Kubernetes cluster. domainUID: governancedomain # Name of the namespace for the persistent volume claim namespace: oigns # Persistent volume type for the persistent storage. # The value must be 'HOST_PATH' or 'NFS'. # If using 'NFS', weblogicDomainStorageNFSServer must be specified. weblogicDomainStorageType: NFS # The server name or ip address of the NFS server to use for the persistent storage. # The following line must be uncomment and customized if weblogicDomainStorateType is NFS: weblogicDomainStorageNFSServer: mynfsserver # Physical path of the persistent storage. # When weblogicDomainStorageType is set to HOST_PATH, this value should be set the to path to the # domain storage on the Kubernetes host. # When weblogicDomainStorageType is set to NFS, then weblogicDomainStorageNFSServer should be set # to the IP address or name of the DNS server, and this value should be set to the exported path # on that server. # Note that the path where the domain is mounted in the WebLogic containers is not affected by this # setting, that is determined when you create your domain. # The following line must be uncomment and customized: weblogicDomainStoragePath: /scratch/OIGK8S/governancedomainpv # Reclaim policy of the persistent storage # The valid values are: 'Retain', 'Delete', and 'Recycle' weblogicDomainStorageReclaimPolicy: Retain # Total storage allocated to the persistent storage. weblogicDomainStorageSize: 10Gi Execute the create-pv-pvc.sh script to create the PV and PVC configuration files:\n$ ./create-pv-pvc.sh -i create-pv-pvc-inputs.yaml -o output The output will be similar to the following:\nInput parameters being used export version=\u0026quot;create-weblogic-sample-domain-pv-pvc-inputs-v1\u0026quot; export baseName=\u0026quot;domain\u0026quot; export domainUID=\u0026quot;governancedomain\u0026quot; export namespace=\u0026quot;oigns\u0026quot; export weblogicDomainStorageType=\u0026quot;NFS\u0026quot; export weblogicDomainStorageNFSServer=\u0026quot;mynfsserver\u0026quot; export weblogicDomainStoragePath=\u0026quot;/scratch/OIGK8S/governancedomainpv\u0026quot; export weblogicDomainStorageReclaimPolicy=\u0026quot;Retain\u0026quot; export weblogicDomainStorageSize=\u0026quot;10Gi\u0026quot; Generating output/pv-pvcs/governancedomain-domain-pv.yaml Generating output/pv-pvcs/governancedomain-domain-pvc.yaml The following files were generated: output/pv-pvcs/governancedomain-domain-pv.yaml output/pv-pvcs/governancedomain-domain-pvc.yaml Completed Run the following to show the files are created:\n$ ls output/pv-pvcs create-pv-pvc-inputs.yaml governancedomain-domain-pv.yaml governancedomain-domain-pvc.yaml Run the following kubectl command to create the PV and PVC in the domain namespace:\n$ kubectl create -f output/pv-pvcs/governancedomain-domain-pv.yaml -n \u0026lt;domain_namespace\u0026gt; $ kubectl create -f output/pv-pvcs/governancedomain-domain-pvc.yaml -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl create -f output/pv-pvcs/governancedomain-domain-pv.yaml -n oigns $ kubectl create -f output/pv-pvcs/governancedomain-domain-pvc.yaml -n oigns The output will look similar to the following:\npersistentvolume/governancedomain-domain-pv created persistentvolumeclaim/governancedomain-domain-pvc created Run the following commands to verify the PV and PVC were created successfully:\n$ kubectl describe pv \u0026lt;pv_name\u0026gt; $ kubectl describe pvc \u0026lt;pvc_name\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl describe pv governancedomain-domain-pv $ kubectl describe pvc governancedomain-domain-pvc -n oigns The output will look similar to the following:\n$ kubectl describe pv governancedomain-domain-pv Name: governancedomain-domain-pv Labels: weblogic.domainUID=governancedomain Annotations: pv.kubernetes.io/bound-by-controller: yes Finalizers: [kubernetes.io/pv-protection] StorageClass: governancedomain-domain-storage-class Status: Bound Claim: oigns/governancedomain-domain-pvc Reclaim Policy: Retain Access Modes: RWX VolumeMode: Filesystem Capacity: 10Gi Node Affinity: \u0026lt;none\u0026gt; Message: Source: Type: NFS (an NFS mount that lasts the lifetime of a pod) Server: mynfsserver Path: /scratch/OIGK8S/governancedomainpv ReadOnly: false Events: \u0026lt;none\u0026gt; $ kubectl describe pvc governancedomain-domain-pvc -n oigns Name: governancedomain-domain-pvc Namespace: oigns StorageClass: governancedomain-domain-storage-class Status: Bound Volume: governancedomain-domain-pv Labels: weblogic.domainUID=governancedomain Annotations: pv.kubernetes.io/bind-completed: yes pv.kubernetes.io/bound-by-controller: yes Finalizers: [kubernetes.io/pvc-protection] Capacity: 10Gi Access Modes: RWX VolumeMode: Filesystem Mounted By: \u0026lt;none\u0026gt; Events: \u0026lt;none\u0026gt; You are now ready to create the OIG domain as per Create OIG Domains\n " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oig/manage-oig-domains/running-oig-utilities/", + "title": "Runnning OIG utilities", + "tags": [], + "description": "Describes the steps for running OIG utilities in Kubernetes.", + "content": "Run OIG utlities inside the OIG Kubernetes cluster.\nRun utilities in an interactive bash shell Access a bash shell inside the governancedomain-oim-server1 pod:\n$ kubectl -n oigns exec -it governancedomain-oim-server1 -- bash This will take you into a bash shell in the running governancedomain-oim-server1 pod:\n[oracle@governancedomain-oim-server1 oracle]$ Navigate to the /u01/oracle/idm/server/bin directory and execute the utility as required. For example:\n[oracle@governancedomain-oim-server1 oracle] cd /u01/oracle/idm/server/bin [oracle@governancedomain-oim-server1 bin]$ ./\u0026lt;filename\u0026gt;.sh Passing inputs as a jar/xml file Copy the input file to pass to a directory of your choice.\n Run the following command to copy the input file to the running governancedomain-oim-server1 pod.\n$ kubectl -n oigns cp /\u0026lt;path\u0026gt;/\u0026lt;inputFile\u0026gt; governancedomain-oim-server1:/u01/oracle/idm/server/bin/ Access a bash shell inside the governancedomain-oim-server1 pod:\n$ kubectl -n oigns exec -it governancedomain-oim-server1 -- bash This will take you into a bash shell in the running governancedomain-oim-server1 pod:\n[oracle@governancedomain-oim-server1 oracle]$ Navigate to the /u01/oracle/idm/server/bin directory and execute the utility as required, passing the input file. For example:\n[oracle@governancedomain-oim-server1 oracle] cd /u01/oracle/idm/server/bin [oracle@governancedomain-oim-server1 bin]$ ./\u0026lt;filename\u0026gt;.sh -inputFile \u0026lt;inputFile\u0026gt; Note As pods are stateless the copied input file will remain until the pod restarts.\n Editing property/profile files To edit a property/profile file in the Kubernetes cluster:\n Copy the input file from the pod to a on the local system, for example:\n$ kubectl -n oigns cp governancedomain-oim-server1:/u01/oracle/idm/server/bin/\u0026lt;file.properties_profile\u0026gt; /\u0026lt;path\u0026gt;/\u0026lt;file.properties_profile\u0026gt; Note: If you see the message tar: Removing leading '/' from member names this can be ignored.\n Edit the \u0026lt;/path\u0026gt;/\u0026lt;file.properties_profile\u0026gt; in an editor of your choice.\n Copy the file back to the pod:\n$ kubectl -n oigns cp /\u0026lt;path\u0026gt;/\u0026lt;file.properties_profile\u0026gt; governancedomain-oim-server1:/u01/oracle/idm/server/bin/ Note: As pods are stateless the copied input file will remain until the pod restarts. Preserve a local copy in case you need to copy files back after pod restart.\n " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/soa-domains/patch_and_upgrade/", + "title": "Patch and upgrade", + "tags": [], + "description": "", + "content": "Patch an existing Oracle SOA Suite image or upgrade the infrastructure, such as upgrading the underlying Kubernetes cluster to a new release and upgrading the WebLogic Kubernetes Operator release.\n Patch an image Create a patched Oracle SOA Suite image using the WebLogic Image Tool.\n Upgrade an operator release Upgrade the WebLogic Kubernetes Operator release to a newer version.\n Upgrade a Kubernetes cluster Upgrade the underlying Kubernetes cluster version in a running SOA Kubernetes environment.\n " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oud/create-oud-instances/", + "title": "Create Oracle Unified Directory Instances", + "tags": [], + "description": "Deploying Oracle Unified Directory instances to a Kubernetes POD.", + "content": "Choose one of the following supported methods to create Oracle Unified Directory instances.\n a. Create Oracle Unified Directory Instances Using Samples Samples for deploying Oracle Unified Directory instances to a Kubernetes POD.\n b. Create Oracle Unified Directory Instances Using Helm This document provides steps to create Oracle Unified Directory instances using Helm Charts.\n " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oudsm/create-oudsm-instances/", + "title": "Create Oracle Unified Directory Instances", + "tags": [], + "description": "Deploying Oracle Unified Directory Services Manager instances to a Kubernetes POD.", + "content": "Choose one of the following supported methods to create Oracle Unified Directory Services Manager instances.\n a. Create Oracle Unified Directory Services Manager Instances Using Samples Samples for deploying Oracle Unified Directory Services Manager instances to a Kubernetes POD.\n b. Create Oracle Unified Directory Services Manager Instances Using Helm This document provides steps to create OUDSM instances using Helm Charts.\n " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oid/create-oid-instances-helm/", + "title": "Create Oracle Internet Directory Instances Using Helm", + "tags": [], + "description": "This document provides steps to create Oracle Internet Directory instances using Helm Charts.", + "content": " Introduction Install Helm Deploy an Application using the Helm Chart Undeploy an Application using the Helm Chart Helm Chart(s) for Oracle Internet Directory Introduction This chapter demonstrates how to deploy Oracle Internet Directory 12c instance(s) using the Helm package manager for Kubernetes. Helm Chart(s) described here can be used to facilitate installation, configuration, and environment setup within a Kubernetes environment.\nInstall Helm Helm can be used to create and deploy the Oracle Internet Directory resources in a Kubernetes cluster. For Helm installation and usage information, refer to the README.\nDeploy an Application using the Helm Chart The helm install command is used to deploy applications to a Kubernetes environment, using the Helm Chart supplied.\n$ helm install [Deployment NAME] [CHART Reference] [flags] For example:\n$ helm install oid oid --namespace oidns Undeploy an Application using the Helm Chart To uninstall an application deployed using a Helm chart you need to identify the release name and then issue a delete command:\nTo get the release name:\n$ helm --namespace \u0026lt;namespace\u0026gt; list For example:\n$ helm --namespace oidns list NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION oid oidns 1 2020-03-31 10:37:30.616927678 -0700 PDT deployed oid-12.2.1.4.0 12.2.1.4.0 To delete the chart:\n$ helm uninstall --namespace \u0026lt;namespace\u0026gt; \u0026lt;release\u0026gt; For example:\n$ helm uninstall --namespace oidns oid release \u0026quot;oid\u0026quot; uninstalled Helm Chart(s) for Oracle Internet Directory The following list provides Helm charts for deploying Oracle Internet Directory in a Kubernetes environment. Helm charts provided can be found in the project at the following location:\nhttps://github.com/oracle/fmw-kubernetes/tree/master/OracleInternetDirectory/kubernetes/helm\nDetails about each Helm Chart can be found in the relevant README listed below:\n oid : A Helm chart for deployment of Oracle Internet Directory instances on Kubernetes. " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/soa-domains/", + "title": "Oracle SOA Suite", + "tags": [], + "description": "The Oracle WebLogic Kubernetes Operator (the “operator”) supports deployment of Oracle SOA Suite components such as Oracle Service-Oriented Architecture (SOA), Oracle Service Bus, and Oracle Enterprise Scheduler (ESS). Follow the instructions in this guide to set up these Oracle SOA Suite domains on Kubernetes.", + "content": "The WebLogic Kubernetes Operator (the “operator”) supports deployment of Oracle SOA Suite components such as Oracle Service-Oriented Architecture (SOA), Oracle Service Bus, and Oracle Enterprise Scheduler (ESS). Currently the operator supports these domain types:\n soa : Deploys a SOA domain with Oracle Enterprise Scheduler (ESS) osb : Deploys an Oracle Service Bus domain soaosb : Deploys a domain with SOA, Oracle Service Bus, and Oracle Enterprise Scheduler (ESS) In this release, Oracle SOA Suite domains are supported using the “domain on a persistent volume” model only, where the domain home is located in a persistent volume (PV).\nThe operator has several key features to assist you with deploying and managing Oracle SOA Suite domains in a Kubernetes environment. You can:\n Create Oracle SOA Suite instances in a Kubernetes persistent volume (PV). This PV can reside in an NFS file system or other Kubernetes volume types. Start servers based on declarative startup parameters and desired states. Expose the Oracle SOA Suite services and composites for external access. Scale Oracle SOA Suite domains by starting and stopping Managed Servers on demand, or by integrating with a REST API. Publish operator and WebLogic Server logs to Elasticsearch and interact with them in Kibana. Monitor the Oracle SOA Suite instance using Prometheus and Grafana. Current production release The current production release for the Oracle SOA Suite domains deployment on Kubernetes is 21.4.2. This release uses the WebLogic Kubernetes Operator version 3.3.0.\nRecent changes and known issues See the Release Notes for recent changes and known issues for Oracle SOA Suite domains deployment on Kubernetes.\nLimitations See here for limitations in this release.\nAbout this documentation This documentation includes sections targeted to different audiences. To help you find what you are looking for more easily, please consult this table of contents:\n Quick Start explains how to quickly get an Oracle SOA Suite domain instance running using default settings. Note that this is only for development and test purposes.\n Install Guide and Administration Guide provide detailed information about all aspects of using the Kubernetes operator including:\n Installing and configuring the operator. Using the operator to create and manage Oracle SOA Suite domains. Configuring Kubernetes load balancers. Configuring custom SSL certificates. Configuring Elasticsearch and Kibana to access the operator and WebLogic Server log files. Deploying composite applications for Oracle SOA Suite and Oracle Service Bus. Patching an Oracle SOA Suite Docker image. Removing domains. And much more! Documentation for earlier releases To view documentation for an earlier release, see:\n Version 21.3.2 Version 21.2.2 Version 21.1.2 Version 20.4.2 Version 20.3.3 Additional reading Oracle SOA Suite domains deployment on Kubernetes leverages the WebLogic Kubernetes Operator framework.\n To develop an understanding of the operator, including design, architecture, domain life cycle management, and configuration overrides, review the operator documentation. To learn more about the Oracle SOA Suite architecture and components, see Understanding Oracle SOA Suite. To review the known issues and common questions for Oracle SOA Suite domains deployment on Kubernetes, see the frequently asked questions. " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/soa-domains/adminguide/monitoring-soa-domains/", + "title": "Monitor a domain and publish logs", + "tags": [], + "description": "Monitor an Oracle SOA Suite domain and publish the WebLogic Server logs to Elasticsearch.", + "content": "After the Oracle SOA Suite domain is set up, you can:\n Monitor the Oracle SOA Suite instance using Prometheus and Grafana Publish WebLogic Server logs into Elasticsearch Publish SOA server diagnostics logs into Elasticsearch Monitor the Oracle SOA Suite instance using Prometheus and Grafana Using the WebLogic Monitoring Exporter you can scrape runtime information from a running Oracle SOA Suite instance and monitor them using Prometheus and Grafana.\nPrerequisite: Before setting up monitoring, make sure that Prometheus and Grafana are deployed on the Kubernetes cluster.\nDeploy Prometheus and Grafana Refer to the compatibility matrix of Kube Prometheus and clone the release version of the kube-prometheus repository according to the Kubernetes version of your cluster.\n Clone the kube-prometheus repository:\n$ git clone https://github.com/coreos/kube-prometheus.git Change to folder kube-prometheus and enter the following commands to create the namespace and CRDs, and then wait for their availability before creating the remaining resources:\n$ cd kube-prometheus $ kubectl create -f manifests/setup $ until kubectl get servicemonitors --all-namespaces ; do date; sleep 1; echo \u0026quot;\u0026quot;; done $ kubectl create -f manifests/ kube-prometheus requires all nodes in the Kubernetes cluster to be labeled with kubernetes.io/os=linux. If any node is not labeled with this, then you need to label it using the following command:\n$ kubectl label nodes --all kubernetes.io/os=linux Enter the following commands to provide external access for Grafana, Prometheus, and Alertmanager:\n$ kubectl patch svc grafana -n monitoring --type=json -p '[{\u0026quot;op\u0026quot;: \u0026quot;replace\u0026quot;, \u0026quot;path\u0026quot;: \u0026quot;/spec/type\u0026quot;, \u0026quot;value\u0026quot;: \u0026quot;NodePort\u0026quot; },{\u0026quot;op\u0026quot;: \u0026quot;replace\u0026quot;, \u0026quot;path\u0026quot;: \u0026quot;/spec/ports/0/nodePort\u0026quot;, \u0026quot;value\u0026quot;: 32100 }]' $ kubectl patch svc prometheus-k8s -n monitoring --type=json -p '[{\u0026quot;op\u0026quot;: \u0026quot;replace\u0026quot;, \u0026quot;path\u0026quot;: \u0026quot;/spec/type\u0026quot;, \u0026quot;value\u0026quot;: \u0026quot;NodePort\u0026quot; },{\u0026quot;op\u0026quot;: \u0026quot;replace\u0026quot;, \u0026quot;path\u0026quot;: \u0026quot;/spec/ports/0/nodePort\u0026quot;, \u0026quot;value\u0026quot;: 32101 }]' $ kubectl patch svc alertmanager-main -n monitoring --type=json -p '[{\u0026quot;op\u0026quot;: \u0026quot;replace\u0026quot;, \u0026quot;path\u0026quot;: \u0026quot;/spec/type\u0026quot;, \u0026quot;value\u0026quot;: \u0026quot;NodePort\u0026quot; },{\u0026quot;op\u0026quot;: \u0026quot;replace\u0026quot;, \u0026quot;path\u0026quot;: \u0026quot;/spec/ports/0/nodePort\u0026quot;, \u0026quot;value\u0026quot;: 32102 }]' Note:\n 32100 is the external port for Grafana 32101 is the external port for Prometheus 32102 is the external port for Alertmanager Set up monitoring Follow these steps to set up monitoring for an Oracle SOA Suite instance. For more details on WebLogic Monitoring Exporter, see here.\nPublish WebLogic Server logs into Elasticsearch You can publish the WebLogic Server logs to Elasticsearch using the WebLogic Logging exporter and interact with them in Kibana. See Publish logs to Elasticsearch.\nWebLogic Server logs can also be published to Elasticsearch using Fluentd. See Fluentd configuration steps.\nPublish SOA server diagnostics logs into Elasticsearch This section shows you how to publish diagnostics logs to Elasticsearch and view them in Kibana. For publishing operator logs, see this sample.\nPrerequisites If you have not already set up Elasticsearch and Kibana for logs collection, refer this document and complete the setup.\nPublish to Elasticsearch The Diagnostics or other logs can be pushed to Elasticsearch server using logstash pod. The logstash pod should have access to the shared domain home or the log location. In case of the Oracle SOA Suite domain, the persistent volume of the domain home can be used in the logstash pod. The steps to create the logstash pod are,\n Get Domain home persistence volume claim details of the domain home of the Oracle SOA Suite domain. The following command will list the persistent volume claim details in the namespace - soans. In the example below the persistent volume claim is soainfra-domain-pvc:\n$ kubectl get pvc -n soans Sample output:\nNAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE soainfra-domain-pvc Bound soainfra-domain-pv 10Gi RWX soainfra-domain-storage-class xxd Create logstash configuration file (logstash.conf). Below is a sample logstash configuration to push diagnostic logs of all servers available at DOMAIN_HOME/servers/\u0026lt;server_name\u0026gt;/logs/-diagnostic.log:\ninput { file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/soainfra/servers/**/logs/*-diagnostic.log\u0026quot; start_position =\u0026gt; beginning } } filter { grok { match =\u0026gt; [ \u0026quot;message\u0026quot;, \u0026quot;\u0026lt;%{DATA:log_timestamp}\u0026gt; \u0026lt;%{WORD:log_level}\u0026gt; \u0026lt;%{WORD:thread}\u0026gt; \u0026lt;%{HOSTNAME:hostname}\u0026gt; \u0026lt;%{HOSTNAME:servername}\u0026gt; \u0026lt;%{DATA:timer}\u0026gt; \u0026lt;\u0026lt;%{DATA:kernel}\u0026gt;\u0026gt; \u0026lt;\u0026gt; \u0026lt;%{DATA:uuid}\u0026gt; \u0026lt;%{NUMBER:timestamp}\u0026gt; \u0026lt;%{DATA:misc}\u0026gt; \u0026lt;%{DATA:log_number}\u0026gt; \u0026lt;%{DATA:log_message}\u0026gt;\u0026quot; ] } } output { elasticsearch { hosts =\u0026gt; [\u0026quot;elasticsearch.default.svc.cluster.local:9200\u0026quot;] } } Copy the logstash.conf into say /u01/oracle/user_projects/domains so that it can be used for logstash deployment, using Administration Server pod ( For example soainfra-adminserver pod in namespace soans):\n$ kubectl cp logstash.conf soans/soainfra-adminserver:/u01/oracle/user_projects/domains --namespace soans Create deployment YAML (logstash.yaml) for logstash pod using the domain home persistence volume claim. Make sure to point the logstash configuration file to correct location ( For example: we copied logstash.conf to /u01/oracle/user_projects/domains/logstash.conf) and also correct domain home persistence volume claim. Below is a sample logstash deployment YAML:\napiVersion: apps/v1 kind: Deployment metadata: name: logstash-soa namespace: soans spec: selector: matchLabels: app: logstash-soa template: # create pods using pod definition in this template metadata: labels: app: logstash-soa spec: volumes: - name: soainfra-domain-storage-volume persistentVolumeClaim: claimName: soainfra-domain-pvc - name: shared-logs emptyDir: {} containers: - name: logstash image: logstash:6.6.0 command: [\u0026quot;/bin/sh\u0026quot;] args: [\u0026quot;/usr/share/logstash/bin/logstash\u0026quot;, \u0026quot;-f\u0026quot;, \u0026quot;/u01/oracle/user_projects/domains/logstash.conf\u0026quot;] imagePullPolicy: IfNotPresent volumeMounts: - mountPath: /u01/oracle/user_projects name: soainfra-domain-storage-volume - name: shared-logs mountPath: /shared-logs ports: - containerPort: 5044 name: logstash Deploy logstash to start publish logs to Elasticsearch:\n$ kubectl create -f logstash.yaml Now, you can view the diagnostics logs using Kibana with index pattern \u0026ldquo;logstash-*\u0026rdquo;.\n " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oam/create-oam-domains/", + "title": "Create OAM domains", + "tags": [], + "description": "Sample for creating an OAM domain home on an existing PV or PVC, and the domain resource YAML file for deploying the generated OAM domain.", + "content": "The OAM deployment scripts demonstrate the creation of an OAM domain home on an existing Kubernetes persistent volume (PV) and persistent volume claim (PVC). The scripts also generate the domain YAML file, which can then be used to start the Kubernetes artifacts of the corresponding domain.\nPrerequisites Before you begin, perform the following steps:\n Review the Domain resource documentation. Ensure that you have executed all the preliminary steps documented in Prepare your environment. Ensure that the database is up and running. Prepare to use the create domain script The sample scripts for Oracle Access Management domain deployment are available at $WORKDIR/kubernetes/create-access-domain.\n Make a copy of the create-domain-inputs.yaml file:\n$ cd $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv $ cp create-domain-inputs.yaml create-domain-inputs.yaml.orig You must edit create-domain-inputs.yaml (or a copy of it) to provide the details for your domain. Please refer to the configuration parameters below to understand the information that you must provide in this file.\nEdit configuration parameters Edit the create-domain-inputs.yaml and modify the following parameters. Save the file when complete:\ndomainUID: \u0026lt;domain_uid\u0026gt; domainHome: /u01/oracle/user_projects/domains/\u0026lt;domain_uid\u0026gt; image: \u0026lt;image_name\u0026gt; weblogicCredentialsSecretName: \u0026lt;kubernetes_domain_secret\u0026gt; logHome: /u01/oracle/user_projects/domains/logs/\u0026lt;domain_uid\u0026gt; namespace: \u0026lt;domain_namespace\u0026gt; persistentVolumeClaimName: \u0026lt;pvc_name\u0026gt; rcuSchemaPrefix: \u0026lt;rcu_prefix\u0026gt; rcuDatabaseURL: \u0026lt;rcu_db_host\u0026gt;:\u0026lt;rcu_db_port\u0026gt;/\u0026lt;rcu_db_service_name\u0026gt; rcuCredentialsSecret: \u0026lt;kubernetes_rcu_secret\u0026gt; For example:\ndomainUID: accessdomain domainHome: /u01/oracle/user_projects/domains/accessdomain image: oracle/oam:12.2.1.4.0-8-ol7-210721.0755 weblogicCredentialsSecretName: accessdomain-credentials logHome: /u01/oracle/user_projects/domains/logs/accessdomain namespace: oamns persistentVolumeClaimName: accessdomain-domain-pvc rcuSchemaPrefix: OAMK8S rcuDatabaseURL: mydatabasehost.example.com:1521/orcl.example.com rcuCredentialsSecret: accessdomain-rcu-credentials A full list of parameters in the create-domain-inputs.yaml file are shown below:\n Parameter Definition Default adminPort Port number for the Administration Server inside the Kubernetes cluster. 7001 adminNodePort Port number of the Administration Server outside the Kubernetes cluster. 30701 adminServerName Name of the Administration Server. AdminServer clusterName Name of the WebLogic cluster instance to generate for the domain. By default the cluster name is oam_cluster for the OAM domain. oam_cluster configuredManagedServerCount Number of Managed Server instances to generate for the domain. 5 createDomainFilesDir Directory on the host machine to locate all the files to create a WebLogic domain, including the script that is specified in the createDomainScriptName property. By default, this directory is set to the relative path wlst, and the create script will use the built-in WLST offline scripts in the wlst directory to create the WebLogic domain. It can also be set to the relative path wdt, and then the built-in WDT scripts will be used instead. An absolute path is also supported to point to an arbitrary directory in the file system. The built-in scripts can be replaced by the user-provided scripts or model files as long as those files are in the specified directory. Files in this directory are put into a Kubernetes config map, which in turn is mounted to the createDomainScriptsMountPath, so that the Kubernetes pod can use the scripts and supporting files to create a domain home. wlst createDomainScriptsMountPath Mount path where the create domain scripts are located inside a pod. The create-domain.sh script creates a Kubernetes job to run the script (specified in the createDomainScriptName property) in a Kubernetes pod to create a domain home. Files in the createDomainFilesDir directory are mounted to this location in the pod, so that the Kubernetes pod can use the scripts and supporting files to create a domain home. /u01/weblogic createDomainScriptName Script that the create domain script uses to create a WebLogic domain. The create-domain.sh script creates a Kubernetes job to run this script to create a domain home. The script is located in the in-pod directory that is specified in the createDomainScriptsMountPath property. If you need to provide your own scripts to create the domain home, instead of using the built-it scripts, you must use this property to set the name of the script that you want the create domain job to run. create-domain-job.sh domainHome Home directory of the OAM domain. If not specified, the value is derived from the domainUID as /shared/domains/\u0026lt;domainUID\u0026gt;. /u01/oracle/user_projects/domains/accessinfra domainPVMountPath Mount path of the domain persistent volume. /u01/oracle/user_projects domainUID Unique ID that will be used to identify this particular domain. Used as the name of the generated WebLogic domain as well as the name of the Kubernetes domain resource. This ID must be unique across all domains in a Kubernetes cluster. This ID cannot contain any character that is not valid in a Kubernetes service name. accessinfra domainType Type of the domain. Mandatory input for OAM domains. You must provide one of the supported domain type value: oam (deploys an OAM domain) oam exposeAdminNodePort Boolean indicating if the Administration Server is exposed outside of the Kubernetes cluster. false exposeAdminT3Channel Boolean indicating if the T3 administrative channel is exposed outside the Kubernetes cluster. true image OAM Docker image. The operator requires OAM 12.2.1.4. Refer to OAM domains for details on how to obtain or create the image. oracle/oam:12.2.1.4.0 imagePullPolicy WebLogic Docker image pull policy. Legal values are IfNotPresent, Always, or Never IfNotPresent imagePullSecretName Name of the Kubernetes secret to access the Docker Store to pull the WebLogic Server Docker image. The presence of the secret will be validated when this parameter is specified. includeServerOutInPodLog Boolean indicating whether to include the server .out to the pod\u0026rsquo;s stdout. true initialManagedServerReplicas Number of Managed Servers to initially start for the domain. 2 javaOptions Java options for starting the Administration Server and Managed Servers. A Java option can have references to one or more of the following pre-defined variables to obtain WebLogic domain information: $(DOMAIN_NAME), $(DOMAIN_HOME), $(ADMIN_NAME), $(ADMIN_PORT), and $(SERVER_NAME). -Dweblogic.StdoutDebugEnabled=false logHome The in-pod location for the domain log, server logs, server out, and Node Manager log files. If not specified, the value is derived from the domainUID as /shared/logs/\u0026lt;domainUID\u0026gt;. /u01/oracle/user_projects/domains/logs/accessinfra managedServerNameBase Base string used to generate Managed Server names. oam_server managedServerPort Port number for each Managed Server. 8001 namespace Kubernetes namespace in which to create the domain. accessns persistentVolumeClaimName Name of the persistent volume claim created to host the domain home. If not specified, the value is derived from the domainUID as \u0026lt;domainUID\u0026gt;-weblogic-sample-pvc. accessinfra-domain-pvc productionModeEnabled Boolean indicating if production mode is enabled for the domain. true serverStartPolicy Determines which WebLogic Server instances will be started. Legal values are NEVER, IF_NEEDED, ADMIN_ONLY. IF_NEEDED t3ChannelPort Port for the T3 channel of the NetworkAccessPoint. 30012 t3PublicAddress Public address for the T3 channel. This should be set to the public address of the Kubernetes cluster. This would typically be a load balancer address. For development environments only: In a single server (all-in-one) Kubernetes deployment, this may be set to the address of the master, or at the very least, it must be set to the address of one of the worker nodes. If not provided, the script will attempt to set it to the IP address of the Kubernetes cluster weblogicCredentialsSecretName Name of the Kubernetes secret for the Administration Server\u0026rsquo;s user name and password. If not specified, then the value is derived from the domainUID as \u0026lt;domainUID\u0026gt;-weblogic-credentials. accessinfra-domain-credentials weblogicImagePullSecretName Name of the Kubernetes secret for the Docker Store, used to pull the WebLogic Server image. serverPodCpuRequest, serverPodMemoryRequest, serverPodCpuCLimit, serverPodMemoryLimit The maximum amount of compute resources allowed, and minimum amount of compute resources required, for each server pod. Please refer to the Kubernetes documentation on Managing Compute Resources for Containers for details. Resource requests and resource limits are not specified. rcuSchemaPrefix The schema prefix to use in the database, for example OAM1. You may wish to make this the same as the domainUID in order to simplify matching domains to their RCU schemas. OAM1 rcuDatabaseURL The database URL. oracle-db.default.svc.cluster.local:1521/devpdb.k8s rcuCredentialsSecret The Kubernetes secret containing the database credentials. accessinfra-rcu-credentials Note that the names of the Kubernetes resources in the generated YAML files may be formed with the value of some of the properties specified in the create-inputs.yaml file. Those properties include the adminServerName, clusterName and managedServerNameBase. If those values contain any characters that are invalid in a Kubernetes service name, those characters are converted to valid values in the generated YAML files. For example, an uppercase letter is converted to a lowercase letter and an underscore (\u0026quot;_\u0026quot;) is converted to a hyphen (\u0026quot;-\u0026quot;).\nThe sample demonstrates how to create an OAM domain home and associated Kubernetes resources for a domain that has one cluster only. In addition, the sample provides the capability for users to supply their own scripts to create the domain home for other use cases. The generated domain YAML file could also be modified to cover more use cases.\nRun the create domain script Run the create domain script, specifying your inputs file and an output directory to store the generated artifacts:\ncd $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv $ ./create-domain.sh -i create-domain-inputs.yaml -o /\u0026lt;path to output-directory\u0026gt; For example:\n$ cd $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv $ ./create-domain.sh -i create-domain-inputs.yaml -o output The output will look similar to the following:\nInput parameters being used export version=\u0026quot;create-weblogic-sample-domain-inputs-v1\u0026quot; export adminPort=\u0026quot;7001\u0026quot; export adminServerName=\u0026quot;AdminServer\u0026quot; export domainUID=\u0026quot;accessdomain\u0026quot; export domainType=\u0026quot;oam\u0026quot; export domainHome=\u0026quot;/u01/oracle/user_projects/domains/accessdomain\u0026quot; export serverStartPolicy=\u0026quot;IF_NEEDED\u0026quot; export clusterName=\u0026quot;oam_cluster\u0026quot; export configuredManagedServerCount=\u0026quot;5\u0026quot; export initialManagedServerReplicas=\u0026quot;2\u0026quot; export managedServerNameBase=\u0026quot;oam_server\u0026quot; export managedServerPort=\u0026quot;14100\u0026quot; export image=\u0026quot;oracle/oam:12.2.1.4.0-8-ol7-210721.0755\u0026quot; export imagePullPolicy=\u0026quot;IfNotPresent\u0026quot; export productionModeEnabled=\u0026quot;true\u0026quot; export weblogicCredentialsSecretName=\u0026quot;accessdomain-credentials\u0026quot; export includeServerOutInPodLog=\u0026quot;true\u0026quot; export logHome=\u0026quot;/u01/oracle/user_projects/domains/logs/accessdomain\u0026quot; export httpAccessLogInLogHome=\u0026quot;true\u0026quot; export t3ChannelPort=\u0026quot;30012\u0026quot; export exposeAdminT3Channel=\u0026quot;false\u0026quot; export adminNodePort=\u0026quot;30701\u0026quot; export exposeAdminNodePort=\u0026quot;false\u0026quot; export namespace=\u0026quot;oamns\u0026quot; javaOptions=-Dweblogic.StdoutDebugEnabled=false export persistentVolumeClaimName=\u0026quot;accessdomain-domain-pvc\u0026quot; export domainPVMountPath=\u0026quot;/u01/oracle/user_projects/domains\u0026quot; export createDomainScriptsMountPath=\u0026quot;/u01/weblogic\u0026quot; export createDomainScriptName=\u0026quot;create-domain-job.sh\u0026quot; export createDomainFilesDir=\u0026quot;wlst\u0026quot; export rcuSchemaPrefix=\u0026quot;OAMK8S\u0026quot; export rcuDatabaseURL=\u0026quot;mydatabasehost.example.com:1521/orcl.example.com\u0026quot; export rcuCredentialsSecret=\u0026quot;accessdomain-rcu-credentials\u0026quot; createFiles - valuesInputFile is create-domain-inputs.yaml createDomainScriptName is create-domain-job.sh Generating output/weblogic-domains/accessdomain/create-domain-job.yaml Generating output/weblogic-domains/accessdomain/delete-domain-job.yaml Generating output/weblogic-domains/accessdomain/domain.yaml Checking to see if the secret accessdomain-credentials exists in namespace oamns configmap/accessdomain-create-oam-infra-domain-job-cm created Checking the configmap accessdomain-create-oam-infra-domain-job-cm was created configmap/accessdomain-create-oam-infra-domain-job-cm labeled Checking if object type job with name accessdomain-create-oam-infra-domain-job exists No resources found in oamns namespace. Creating the domain by creating the job output/weblogic-domains/accessdomain/create-domain-job.yaml job.batch/accessdomain-create-oam-infra-domain-job created Waiting for the job to complete... status on iteration 1 of 20 pod accessdomain-create-oam-infra-domain-job-6tgw4 status is Running status on iteration 2 of 20 pod accessdomain-create-oam-infra-domain-job-6tgw4 status is Running status on iteration 3 of 20 pod accessdomain-create-oam-infra-domain-job-6tgw4 status is Running status on iteration 4 of 20 pod accessdomain-create-oam-infra-domain-job-6tgw4 status is Running status on iteration 5 of 20 pod accessdomain-create-oam-infra-domain-job-6tgw4 status is Running status on iteration 6 of 20 pod accessdomain-create-oam-infra-domain-job-6tgw4 status is Completed Domain accessdomain was created and will be started by the WebLogic Kubernetes Operator The following files were generated: output/weblogic-domains/accessdomain/create-domain-inputs.yaml output/weblogic-domains/accessdomain/create-domain-job.yaml output/weblogic-domains/accessdomain/domain.yaml Note: If the domain creation fails, refer to the Troubleshooting section.\nThe command creates a domain.yaml file required for domain creation.\n Set the OAM server memory parameters By default, the java memory parameters assigned to the oam_server cluster are very small. The minimum recommended values are -Xms4096m -Xmx8192m. However, Oracle recommends you to set these to -Xms8192m -Xmx8192m in a production environment.\n Navigate to the /output/weblogic-domains/\u0026lt;domain_uid\u0026gt; directory:\n$ cd $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv/output/weblogic-domains/\u0026lt;domain_uid\u0026gt; For example:\n$ cd $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv/output/weblogic-domains/accessdomain Edit the domain.yaml file and locate the section of the file starting with: - clusterName: oam_cluster. Immediately after the line: topologyKey: \u0026quot;kubernetes.io/hostname\u0026quot;, add the following lines:\nenv: - name: USER_MEM_ARGS value: \u0026quot;-XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m\u0026quot; For example:\n - clusterName: oam_cluster serverService: precreateService: true serverStartState: \u0026quot;RUNNING\u0026quot; serverPod: # Instructs Kubernetes scheduler to prefer nodes for new cluster members where there are not # already members of the same cluster. affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 podAffinityTerm: labelSelector: matchExpressions: - key: \u0026quot;weblogic.clusterName\u0026quot; operator: In values: - $(CLUSTER_NAME) topologyKey: \u0026quot;kubernetes.io/hostname\u0026quot; env:\t- name: USER_MEM_ARGS value: \u0026quot;-XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m\u0026quot; replicas: 2 In the domain.yaml locate the section of the file starting with adminServer:. Under the env: tag add the following CLASSPATH entries. This is required for running the idmconfigtool from the Administration Server.\n- name: CLASSPATH value: \u0026quot;/u01/oracle/wlserver/server/lib/weblogic.jar\u0026quot; For example:\n adminServer: # serverStartState legal values are \u0026quot;RUNNING\u0026quot; or \u0026quot;ADMIN\u0026quot; # \u0026quot;RUNNING\u0026quot; means the listed server will be started up to \u0026quot;RUNNING\u0026quot; mode # \u0026quot;ADMIN\u0026quot; means the listed server will be start up to \u0026quot;ADMIN\u0026quot; mode serverStartState: \u0026quot;RUNNING\u0026quot; adminService: channels: # The Admin Server's NodePort - channelName: default nodePort: 30701 # Uncomment to export the T3Channel as a service - channelName: T3Channel serverPod: # an (optional) list of environment variable to be set on the admin servers env: - name: USER_MEM_ARGS value: \u0026quot;-Djava.security.egd=file:/dev/./urandom -Xms512m -Xmx1024m \u0026quot; - name: CLASSPATH value: \u0026quot;/u01/oracle/wlserver/server/lib/weblogic.jar\u0026quot; If required, you can add the optional parameter maxClusterConcurrentStartup to the spec section of the domain.yaml. This parameter specifies the number of managed servers to be started in sequence per cluster. For example if you updated the initialManagedServerReplicas to 4 in create-domain-inputs.yaml and only had 2 nodes, then setting maxClusterConcurrentStartup: 1 will start one managed server at a time on each node, rather than starting them all at once. This can be useful to take the strain off individual nodes at startup. Below is an example with the parameter added:\napiVersion: \u0026quot;weblogic.oracle/v8\u0026quot; kind: Domain metadata: name: accessdomain namespace: oamns labels: weblogic.domainUID: accessdomain spec: # The WebLogic Domain Home domainHome: /u01/oracle/user_projects/domains/accessdomain maxClusterConcurrentStartup: 1 # The domain home source type # Set to PersistentVolume for domain-in-pv, Image for domain-in-image, or FromModel for model-in-image domainHomeSourceType: PersistentVolume # The WebLogic Server Docker image that the Operator uses to start the domain image: \u0026quot;oracle/oam:12.2.1.4.0\u0026quot; .... Save the changes to domain.yaml\n Initializing the domain Create the Kubernetes resource using the following command:\n$ kubectl apply -f $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv/output/weblogic-domains/accessdomain/domain.yaml The output will look similar to the following:\ndomain.weblogic.oracle/accessdomain created Verify the domain Verify the domain, servers pods and services are created and in the READY state with a status of 1/1, by running the following command:\n$ kubectl get all,domains -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get all,domains -n oamns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE pod/accessdomain-adminserver 1/1 Running 0 11m pod/accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 18m pod/accessdomain-oam-policy-mgr1 1/1 Running 0 3m31s pod/accessdomain-oam-policy-mgr2 1/1 Running 0 3m31s pod/accessdomain-oam-server1 1/1 Running 0 3m31s pod/accessdomain-oam-server2 1/1 Running 0 3m31s pod/helper 1/1 Running 0 33m NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/accessdomain-adminserver ClusterIP None \u0026lt;none\u0026gt; 7001/TCP 11m service/accessdomain-cluster-oam-cluster ClusterIP 10.101.59.154 \u0026lt;none\u0026gt; 14100/TCP 3m31s service/accessdomain-cluster-policy-cluster ClusterIP 10.98.236.51 \u0026lt;none\u0026gt; 15100/TCP 3m31s service/accessdomain-oam-policy-mgr1 ClusterIP None \u0026lt;none\u0026gt; 15100/TCP 3m31s service/accessdomain-oam-policy-mgr2 ClusterIP None \u0026lt;none\u0026gt; 15100/TCP 3m31s service/accessdomain-oam-policy-mgr3 ClusterIP 10.96.244.37 \u0026lt;none\u0026gt; 15100/TCP 3m31s service/accessdomain-oam-policy-mgr4 ClusterIP 10.105.201.23 \u0026lt;none\u0026gt; 15100/TCP 3m31s service/accessdomain-oam-policy-mgr5 ClusterIP 10.110.12.227 \u0026lt;none\u0026gt; 15100/TCP 3m31s service/accessdomain-oam-server1 ClusterIP None \u0026lt;none\u0026gt; 14100/TCP 3m31s service/accessdomain-oam-server2 ClusterIP None \u0026lt;none\u0026gt; 14100/TCP 3m31s service/accessdomain-oam-server3 ClusterIP 10.103.178.35 \u0026lt;none\u0026gt; 14100/TCP 3m31s service/accessdomain-oam-server4 ClusterIP 10.97.254.78 \u0026lt;none\u0026gt; 14100/TCP 3m31s service/accessdomain-oam-server5 ClusterIP 10.105.65.104 \u0026lt;none\u0026gt; 14100/TCP 3m31s NAME COMPLETIONS DURATION AGE job.batch/accessdomain-create-oam-infra-domain-job 1/1 2m6s 18m NAME AGE domain.weblogic.oracle/accessdomain 12m Note: It will take several minutes before all the services listed above show. When a pod has a STATUS of 0/1 the pod is started but the OAM server associated with it is currently starting. While the pods are starting you can check the startup status in the pod logs, by running the following command:\n$ kubectl logs accessdomain-adminserver -n oamns $ kubectl logs accessdomain-oam-policy-mgr1 -n oamns $ kubectl logs accessdomain-oam-server1 -n oamns etc.. The default domain created by the script has the following characteristics:\n An Administration Server named AdminServer listening on port 7001. A configured OAM cluster named oam_cluster of size 5. A configured Policy Manager cluster named policy_cluster of size 5. Two started OAM managed Servers, named oam_server1 and oam_server2, listening on port 14100. Two started Policy Manager managed servers named oam-policy-mgr1 and oam-policy-mgr2, listening on port 15100. Log files that are located in \u0026lt;persistent_volume\u0026gt;/logs/\u0026lt;domainUID\u0026gt;. Run the following command to describe the domain:\n$ kubectl describe domain \u0026lt;domain_uid\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl describe domain accessdomain -n oamns The output will look similar to the following:\nName: accessdomain Namespace: oamns Labels: weblogic.domainUID=accessdomain Annotations: \u0026lt;none\u0026gt; API Version: weblogic.oracle/v8 Kind: Domain Metadata: Creation Timestamp: 2021-11-01T11:59:51Z Generation: 1 Managed Fields: API Version: weblogic.oracle/v8 Fields Type: FieldsV1 fieldsV1: f:status: .: f:clusters: f:conditions: f:introspectJobFailureCount: f:servers: f:startTime: Manager: Kubernetes Java Client Operation: Update Time: 2021-11-01T11:59:51Z API Version: weblogic.oracle/v8 Fields Type: FieldsV1 fieldsV1: f:metadata: f:annotations: .: f:kubectl.kubernetes.io/last-applied-configuration: f:labels: .: f:weblogic.domainUID: Manager: kubectl-client-side-apply Operation: Update Time: 2021-11-01T11:59:51Z Resource Version: 1495179 UID: a90107d5-dbaf-4d86-9439-d5369faabd35 Spec: Admin Server: Server Pod: Env: Name: USER_MEM_ARGS Value: -Djava.security.egd=file:/dev/./urandom -Xms512m -Xmx1024m Name: CLASSPATH Value: /u01/oracle/wlserver/server/lib/weblogic.jar Server Start State: RUNNING Clusters: Cluster Name: policy_cluster Replicas: 2 Server Pod: Affinity: Pod Anti Affinity: Preferred During Scheduling Ignored During Execution: Pod Affinity Term: Label Selector: Match Expressions: Key: weblogic.clusterName Operator: In Values: $(CLUSTER_NAME) Topology Key: kubernetes.io/hostname Weight: 100 Server Service: Precreate Service: true Server Start State: RUNNING Cluster Name: oam_cluster Replicas: 2 Server Pod: Affinity: Pod Anti Affinity: Preferred During Scheduling Ignored During Execution: Pod Affinity Term: Label Selector: Match Expressions: Key: weblogic.clusterName Operator: In Values: $(CLUSTER_NAME) Topology Key: kubernetes.io/hostname Weight: 100 Env: Name: USER_MEM_ARGS Value: -XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m Server Service: Precreate Service: true Server Start State: RUNNING Data Home: Domain Home: /u01/oracle/user_projects/domains/accessdomain Domain Home Source Type: PersistentVolume Http Access Log In Log Home: true Image: oracle/oam:12.2.1.4.0-8-ol7-210721.0755 Image Pull Policy: IfNotPresent Include Server Out In Pod Log: true Log Home: /u01/oracle/user_projects/domains/logs/accessdomain Log Home Enabled: true Server Pod: Env: Name: JAVA_OPTIONS Value: -Dweblogic.StdoutDebugEnabled=false Name: USER_MEM_ARGS Value: -Djava.security.egd=file:/dev/./urandom -Xms256m -Xmx1024m Volume Mounts: Mount Path: /u01/oracle/user_projects/domains Name: weblogic-domain-storage-volume Volumes: Name: weblogic-domain-storage-volume Persistent Volume Claim: Claim Name: accessdomain-domain-pvc Server Start Policy: IF_NEEDED Web Logic Credentials Secret: Name: accessdomain-credentials Status: Clusters: Cluster Name: oam_cluster Maximum Replicas: 5 Minimum Replicas: 0 Ready Replicas: 2 Replicas: 2 Replicas Goal: 2 Cluster Name: policy_cluster Maximum Replicas: 5 Minimum Replicas: 0 Ready Replicas: 2 Replicas: 2 Replicas Goal: 2 Conditions: Last Transition Time: 2021-11-01T12:11:52.623959Z Reason: ServersReady Status: True Type: Available Introspect Job Failure Count: 0 Servers: Desired State: RUNNING Health: Activation Time: 2021-11-01T12:08:29.271000Z Overall Health: ok Subsystems: Subsystem Name: ServerRuntime Symptoms: Node Name: 10.250.42.252 Server Name: AdminServer State: RUNNING Cluster Name: oam_cluster Desired State: RUNNING Health: Activation Time: 2021-11-01T12:11:02.696000Z Overall Health: ok Subsystems: Subsystem Name: ServerRuntime Symptoms: Node Name: 10.250.42.255 Server Name: oam_server1 State: RUNNING Cluster Name: oam_cluster Desired State: RUNNING Health: Activation Time: 2021-11-01T12:11:46.175000Z Overall Health: ok Subsystems: Subsystem Name: ServerRuntime Symptoms: Node Name: 10.250.42.252 Server Name: oam_server2 State: RUNNING Cluster Name: oam_cluster Desired State: SHUTDOWN Server Name: oam_server3 Cluster Name: oam_cluster Desired State: SHUTDOWN Server Name: oam_server4 Cluster Name: oam_cluster Desired State: SHUTDOWN Server Name: oam_server5 Cluster Name: policy_cluster Desired State: RUNNING Health: Activation Time: 2021-11-01T12:11:20.404000Z Overall Health: ok Subsystems: Subsystem Name: ServerRuntime Symptoms: Node Name: 10.250.42.255 Server Name: oam_policy_mgr1 State: RUNNING Cluster Name: policy_cluster Desired State: RUNNING Health: Activation Time: 2021-11-01T12:11:09.719000Z Overall Health: ok Subsystems: Subsystem Name: ServerRuntime Symptoms: Node Name: 10.250.42.252 Server Name: oam_policy_mgr2 State: RUNNING Cluster Name: policy_cluster Desired State: SHUTDOWN Server Name: oam_policy_mgr3 Cluster Name: policy_cluster Desired State: SHUTDOWN Server Name: oam_policy_mgr4 Cluster Name: policy_cluster Desired State: SHUTDOWN Server Name: oam_policy_mgr5 Start Time: 2021-11-01T11:59:51.682687Z Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal DomainCreated 13m weblogic.operator Domain resource accessdomain was created Normal DomainProcessingStarting 5m9s (x2 over 13m) weblogic.operator Creating or updating Kubernetes presence for WebLogic Domain with UID accessdomain Normal DomainProcessingCompleted 114s weblogic.operator Successfully completed processing domain resource accessdomain In the Status section of the output, the available servers and clusters are listed.\n Run the following command to see the pods running the servers and which nodes they are running on:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; -o wide For example:\n$ kubectl get pods -n oamns -o wide The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES accessdomain-adminserver 1/1 Running 0 18m 10.244.6.63 10.250.42.252 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 25m 10.244.6.61 10.250.42.252 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; accessdomain-oam-policy-mgr1 1/1 Running 0 10m 10.244.5.13 10.250.42.255 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; accessdomain-oam-policy-mgr2 1/1 Running 0 10m 10.244.6.65 10.250.42.252 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; accessdomain-oam-server1 1/1 Running 0 10m 10.244.5.12 10.250.42.255 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; accessdomain-oam-server2 1/1 Running 0 10m 10.244.6.64 10.250.42.252 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; helper 1/1 Running 0 40m 10.244.6.60 10.250.42.252 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; You are now ready to configure an Ingress to direct traffic for your OAM domain as per Configure an Ingress for an OAM domain.\n " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oig/create-oig-domains/", + "title": "Create OIG domains", + "tags": [], + "description": "Sample for creating an OIG domain home on an existing PV or PVC, and the domain resource YAML file for deploying the generated OIG domain.", + "content": " Introduction\n Prerequisites\n Prepare the create domain script\na. Create Docker registry secret\nb. Edit configuration parameters\n Run the create domain script\na. Generate the create domain script\nb. Setting the OIM server memory parameters\nc. Run the create domain scripts\n Verify the results\na. Verify the domain, pods and services\nb. Verify the domain\nc. Verify the pods\n Introduction The OIG deployment scripts demonstrate the creation of an OIG domain home on an existing Kubernetes persistent volume (PV) and persistent volume claim (PVC). The scripts also generate the domain YAML file, which can then be used to start the Kubernetes artifacts of the corresponding domain.\nPrerequisites Before you begin, perform the following steps:\n Review the Domain resource documentation. Ensure that you have executed all the preliminary steps documented in Prepare your environment. Ensure that the database is up and running. Create Docker registry secret This section should only be followed if you are using a registry to store your container images and have not downloaded the container image to the master and worker nodes.\n Create a Docker Registry Secret with name oig-docker:\n$ kubectl create secret docker-registry oig-docker -n \u0026lt;domain_namespace\u0026gt; --docker-username=\u0026#39;\u0026lt;user_name\u0026gt;\u0026#39; --docker-password=\u0026#39;\u0026lt;password\u0026gt;\u0026#39; --docker-server=\u0026#39;\u0026lt;docker_registry_url\u0026gt;\u0026#39; --docker-email=\u0026#39;\u0026lt;email_address\u0026gt;\u0026#39; For example:\n$ kubectl create secret docker-registry oig-docker -n oigns --docker-username=\u0026#39;user1\u0026#39; --docker-password=\u0026#39;\u0026lt;password\u0026gt;\u0026#39; --docker-server=\u0026#39;https://registry.example.com\u0026#39; --docker-email=\u0026#39;user1@example.com\u0026#39; The output will look similar to the following:\nsecret/oig-docker created Prepare the create domain script The sample scripts for Oracle Identity Governance domain deployment are available at $WORKDIR/kubernetes/create-oim-domain.\n Make a copy of the create-domain-inputs.yaml file:\n$ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv $ cp create-domain-inputs.yaml create-domain-inputs.yaml.orig You must edit create-domain-inputs.yaml (or a copy of it) to provide the details for your domain. Please refer to the configuration parameters below to understand the information that you must provide in this file.\n Edit configuration parameters Edit the create-domain-inputs.yaml and modify the following parameters. Save the file when complete:\ndomainUID: \u0026lt;domain_uid\u0026gt; domainHome: /u01/oracle/user_projects/domains/\u0026lt;domain_uid\u0026gt; image: \u0026lt;image_name\u0026gt; imagePullSecretName: \u0026lt;docker-secret\u0026gt; weblogicCredentialsSecretName: \u0026lt;kubernetes_domain_secret\u0026gt; logHome: /u01/oracle/user_projects/domains/logs/\u0026lt;domain_id\u0026gt; namespace: \u0026lt;domain_namespace\u0026gt; persistentVolumeClaimName: \u0026lt;pvc_name\u0026gt; rcuSchemaPrefix: \u0026lt;rcu_prefix\u0026gt; rcuDatabaseURL: \u0026lt;rcu_db_host\u0026gt;:\u0026lt;rcu_db_port\u0026gt;/\u0026lt;rcu_db_service_name\u0026gt; rcuCredentialsSecret: \u0026lt;kubernetes_rcu_secret\u0026gt; frontEndHost: \u0026lt;front_end_hostname\u0026gt; frontEndPort: \u0026lt;front_end_port\u0026gt; For example:\ndomainUID: governancedomain domainHome: /u01/oracle/user_projects/domains/governancedomain image: oracle/oig:12.2.1.4.0-8-ol7-211022.0723 imagePullSecretName: oig-docker weblogicCredentialsSecretName: oig-domain-credentials logHome: /u01/oracle/user_projects/domains/logs/governancedomain namespace: oigns persistentVolumeClaimName: governancedomain-domain-pvc rcuSchemaPrefix: OIGK8S rcuDatabaseURL: mydatabasehost.example.com:1521/orcl.example.com rcuCredentialsSecret: oig-rcu-credentials frontEndHost: masternode.example.com frontEndPort: 14100 Note: frontEndHost and front_end_port should be set to the entry point host and port for OIM. This can be changed later in Set OIMFrontendURL using MBeans.\nNote: If using a container registry for your container images then you need to set image to the repository image name and imagePullSecretName to the name of the secret created earlier e.g: oig-docker. If not using a docker registry to pull docker images, comment out imagePullSecretName: \u0026lt;docker-secret\u0026gt;.\n A full list of parameters in the create-domain-inputs.yaml file are shown below:\n Parameter Definition Default adminPort Port number for the Administration Server inside the Kubernetes cluster. 7001 adminNodePort Port number of the Administration Server outside the Kubernetes cluster. 30701 adminServerName Name of the Administration Server. AdminServer clusterName Name of the WebLogic cluster instance to generate for the domain. By default the cluster name is oimcluster for the OIG domain. oimcluster configuredManagedServerCount Number of Managed Server instances to generate for the domain. 5 createDomainFilesDir Directory on the host machine to locate all the files to create a WebLogic domain, including the script that is specified in the createDomainScriptName property. By default, this directory is set to the relative path wlst, and the create script will use the built-in WLST offline scripts in the wlst directory to create the WebLogic domain. It can also be set to the relative path wdt, and then the built-in WDT scripts will be used instead. An absolute path is also supported to point to an arbitrary directory in the file system. The built-in scripts can be replaced by the user-provided scripts or model files as long as those files are in the specified directory. Files in this directory are put into a Kubernetes config map, which in turn is mounted to the createDomainScriptsMountPath, so that the Kubernetes pod can use the scripts and supporting files to create a domain home. wlst createDomainScriptsMountPath Mount path where the create domain scripts are located inside a pod. The create-domain.sh script creates a Kubernetes job to run the script (specified in the createDomainScriptName property) in a Kubernetes pod to create a domain home. Files in the createDomainFilesDir directory are mounted to this location in the pod, so that the Kubernetes pod can use the scripts and supporting files to create a domain home. /u01/weblogic createDomainScriptName Script that the create domain script uses to create a WebLogic domain. The create-domain.sh script creates a Kubernetes job to run this script to create a domain home. The script is located in the in-pod directory that is specified in the createDomainScriptsMountPath property. If you need to provide your own scripts to create the domain home, instead of using the built-it scripts, you must use this property to set the name of the script that you want the create domain job to run. create-domain-job.sh domainHome Home directory of the OIG domain. If not specified, the value is derived from the domainUID as /shared/domains/\u0026lt;domainUID\u0026gt;. /u01/oracle/user_projects/domains/oimcluster domainPVMountPath Mount path of the domain persistent volume. /u01/oracle/user_projects domainUID Unique ID that will be used to identify this particular domain. Used as the name of the generated WebLogic domain as well as the name of the Kubernetes domain resource. This ID must be unique across all domains in a Kubernetes cluster. This ID cannot contain any character that is not valid in a Kubernetes service name. oimcluster exposeAdminNodePort Boolean indicating if the Administration Server is exposed outside of the Kubernetes cluster. false exposeAdminT3Channel Boolean indicating if the T3 administrative channel is exposed outside the Kubernetes cluster. true image OIG Docker image. The operator requires OIG 12.2.1.4. Refer to OIG domains for details on how to obtain or create the image. oracle/oig:12.2.1.4.0 imagePullPolicy WebLogic Docker image pull policy. Legal values are IfNotPresent, Always, or Never IfNotPresent imagePullSecretName Name of the Kubernetes secret to access the Docker Store to pull the WebLogic Server Docker image. The presence of the secret will be validated when this parameter is specified. includeServerOutInPodLog Boolean indicating whether to include the server .out to the pod\u0026rsquo;s stdout. true initialManagedServerReplicas Number of Managed Servers to initially start for the domain. 2 javaOptions Java options for starting the Administration Server and Managed Servers. A Java option can have references to one or more of the following pre-defined variables to obtain WebLogic domain information: $(DOMAIN_NAME), $(DOMAIN_HOME), $(ADMIN_NAME), $(ADMIN_PORT), and $(SERVER_NAME). -Dweblogic.StdoutDebugEnabled=false logHome The in-pod location for the domain log, server logs, server out, and Node Manager log files. If not specified, the value is derived from the domainUID as /shared/logs/\u0026lt;domainUID\u0026gt;. /u01/oracle/user_projects/domains/logs/oimcluster managedServerNameBase Base string used to generate Managed Server names. oim_server managedServerPort Port number for each Managed Server. 8001 namespace Kubernetes namespace in which to create the domain. oimcluster persistentVolumeClaimName Name of the persistent volume claim created to host the domain home. If not specified, the value is derived from the domainUID as \u0026lt;domainUID\u0026gt;-weblogic-sample-pvc. oimcluster-domain-pvc productionModeEnabled Boolean indicating if production mode is enabled for the domain. true serverStartPolicy Determines which WebLogic Server instances will be started. Legal values are NEVER, IF_NEEDED, ADMIN_ONLY. IF_NEEDED t3ChannelPort Port for the T3 channel of the NetworkAccessPoint. 30012 t3PublicAddress Public address for the T3 channel. This should be set to the public address of the Kubernetes cluster. This would typically be a load balancer address. For development environments only: In a single server (all-in-one) Kubernetes deployment, this may be set to the address of the master, or at the very least, it must be set to the address of one of the worker nodes. If not provided, the script will attempt to set it to the IP address of the Kubernetes cluster weblogicCredentialsSecretName Name of the Kubernetes secret for the Administration Server\u0026rsquo;s user name and password. If not specified, then the value is derived from the domainUID as \u0026lt;domainUID\u0026gt;-weblogic-credentials. oimcluster-domain-credentials weblogicImagePullSecretName Name of the Kubernetes secret for the Docker Store, used to pull the WebLogic Server image. serverPodCpuRequest, serverPodMemoryRequest, serverPodCpuCLimit, serverPodMemoryLimit The maximum amount of compute resources allowed, and minimum amount of compute resources required, for each server pod. Please refer to the Kubernetes documentation on Managing Compute Resources for Containers for details. Resource requests and resource limits are not specified. rcuSchemaPrefix The schema prefix to use in the database, for example OIGK8S. You may wish to make this the same as the domainUID in order to simplify matching domains to their RCU schemas. OIGK8S rcuDatabaseURL The database URL. oracle-db.default.svc.cluster.local:1521/devpdb.k8s rcuCredentialsSecret The Kubernetes secret containing the database credentials. oimcluster-rcu-credentials frontEndHost The entry point URL for the OIM. Not set frontEndPort The entry point port for the OIM. Not set Note that the names of the Kubernetes resources in the generated YAML files may be formed with the value of some of the properties specified in the create-inputs.yaml file. Those properties include the adminServerName, clusterName and managedServerNameBase. If those values contain any characters that are invalid in a Kubernetes service name, those characters are converted to valid values in the generated YAML files. For example, an uppercase letter is converted to a lowercase letter and an underscore (\u0026quot;_\u0026quot;) is converted to a hyphen (\u0026quot;-\u0026quot;).\nThe sample demonstrates how to create an OIG domain home and associated Kubernetes resources for a domain that has one cluster only. In addition, the sample provides the capability for users to supply their own scripts to create the domain home for other use cases. The generated domain YAML file could also be modified to cover more use cases.\nRun the create domain script Generate the create domain script Run the create domain script, specifying your inputs file and an output directory to store the generated artifacts:\n$ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv $ mkdir output $ ./create-domain.sh -i create-domain-inputs.yaml -o /\u0026lt;path to output-directory\u0026gt; For example:\n$ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv $ mkdir output $ ./create-domain.sh -i create-domain-inputs.yaml -o output The output will look similar to the following:\nInput parameters being used export version=\u0026quot;create-weblogic-sample-domain-inputs-v1\u0026quot; export adminPort=\u0026quot;7001\u0026quot; export adminServerName=\u0026quot;AdminServer\u0026quot; export domainUID=\u0026quot;governancedomain\u0026quot; export domainHome=\u0026quot;/u01/oracle/user_projects/domains/governancedomain\u0026quot; export serverStartPolicy=\u0026quot;IF_NEEDED\u0026quot; export clusterName=\u0026quot;oim_cluster\u0026quot; export configuredManagedServerCount=\u0026quot;5\u0026quot; export initialManagedServerReplicas=\u0026quot;1\u0026quot; export managedServerNameBase=\u0026quot;oim_server\u0026quot; export managedServerPort=\u0026quot;14000\u0026quot; export image=\u0026quot;oracle/oig:12.2.1.4.0-8-ol7-211022.0723\u0026quot; export imagePullPolicy=\u0026quot;IfNotPresent\u0026quot; export imagePullSecretName=\u0026quot;oig-docker\u0026quot; export productionModeEnabled=\u0026quot;true\u0026quot; export weblogicCredentialsSecretName=\u0026quot;oig-domain-credentials\u0026quot; export includeServerOutInPodLog=\u0026quot;true\u0026quot; export logHome=\u0026quot;/u01/oracle/user_projects/domains/logs/governancedomain\u0026quot; export t3ChannelPort=\u0026quot;30012\u0026quot; export exposeAdminT3Channel=\u0026quot;false\u0026quot; export adminNodePort=\u0026quot;30701\u0026quot; export exposeAdminNodePort=\u0026quot;false\u0026quot; export namespace=\u0026quot;oigns\u0026quot; javaOptions=-Dweblogic.StdoutDebugEnabled=false export persistentVolumeClaimName=\u0026quot;governancedomain-domain-pvc\u0026quot; export domainPVMountPath=\u0026quot;/u01/oracle/user_projects/domains\u0026quot; export createDomainScriptsMountPath=\u0026quot;/u01/weblogic\u0026quot; export createDomainScriptName=\u0026quot;create-domain-job.sh\u0026quot; export createDomainFilesDir=\u0026quot;wlst\u0026quot; export rcuSchemaPrefix=\u0026quot;OIGK8S\u0026quot; export rcuDatabaseURL=\u0026quot;slc12cpn.us.oracle.com:1521/orcl.us.oracle.com\u0026quot; export rcuCredentialsSecret=\u0026quot;oig-rcu-credentials\u0026quot; export frontEndHost=\u0026quot;masternode.example.com\u0026quot; export frontEndPort=\u0026quot;14100\u0026quot; Generating output/weblogic-domains/governancedomain/create-domain-job.yaml Generating output/weblogic-domains/governancedomain/delete-domain-job.yaml Generating output/weblogic-domains/governancedomain/domain.yaml Checking to see if the secret governancedomain-domain-credentials exists in namespace oigns configmap/governancedomain-create-fmw-infra-sample-domain-job-cm created Checking the configmap governancedomain-create-fmw-infra-sample-domain-job-cm was created configmap/governancedomain-create-fmw-infra-sample-domain-job-cm labeled Checking if object type job with name governancedomain-create-fmw-infra-sample-domain-job exists No resources found in oigns namespace. Creating the domain by creating the job output/weblogic-domains/governancedomain/create-domain-job.yaml job.batch/governancedomain-create-fmw-infra-sample-domain-job created Waiting for the job to complete... status on iteration 1 of 40 pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running status on iteration 2 of 40 pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running status on iteration 3 of 40 pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running status on iteration 4 of 40 pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running status on iteration 5 of 40 pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running status on iteration 6 of 40 pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running status on iteration 7 of 40 pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running status on iteration 8 of 40 pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running status on iteration 9 of 40 pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running status on iteration 10 of 40 pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running status on iteration 11 of 40 pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Completed Domain governancedomain was created and will be started by the WebLogic Kubernetes Operator The following files were generated: output/weblogic-domains/governancedomain/create-domain-inputs.yaml output/weblogic-domains/governancedomain/create-domain-job.yaml output/weblogic-domains/governancedomain/domain.yaml sed Completed $ Note: If the create domain script creation fails, refer to the Troubleshooting section.\n Setting the OIM server memory parameters Navigate to the output directory:\n$ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv/output/weblogic-domains/governancedomain Edit the domain_oim_soa.yaml and locate the section of the file starting with: - clusterName: oim_cluster. Immediately after the line: topologyKey: \u0026quot;kubernetes.io/hostname\u0026quot;, add the following lines:\nenv: - name: USER_MEM_ARGS value: \u0026quot;-Djava.security.egd=file:/dev/./urandom -Xms2408m -Xmx8192m\u0026quot; The file should looks as follows:\n- clusterName: oim_cluster serverService: precreateService: true serverStartState: \u0026quot;RUNNING\u0026quot; serverPod: # Instructs Kubernetes scheduler to prefer nodes for new cluster members where there are not # already members of the same cluster. affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 podAffinityTerm: labelSelector: matchExpressions: - key: \u0026quot;weblogic.clusterName\u0026quot; operator: In values: - $(CLUSTER_NAME) topologyKey: \u0026quot;kubernetes.io/hostname\u0026quot; env: - name: USER_MEM_ARGS value: \u0026quot;-Djava.security.egd=file:/dev/./urandom -Xms2408m -Xmx8192m\u0026quot; replicas: 1 ... Run the create domain scripts Create the Kubernetes resource using the following command:\n$ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv/output/weblogic-domains/governancedomain $ kubectl apply -f domain.yaml For example:\n$ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv/output/weblogic-domains/governancedomain $ kubectl apply -f domain.yaml The output will look similar to the following:\ndomain.weblogic.oracle/governancedomain created Run the following command to view the status of the OIG pods:\n$ kubectl get pods -n oigns The output will initially look similar to the following:\nNAME READY STATUS RESTARTS AGE helper 1/1 Running 0 3h30m governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 27m governancedomain-introspect-domain-job-p4brt 1/1 Running 0 6s The introspect-domain-job pod will be displayed first. Run the command again after several minutes and check to see that the Administration Server and SOA Server are both started. When started they should have STATUS = Running and READY = 1/1.\nNAME READY STATUS RESTARTS AGE helper 1/1 Running 0 3h38m governancedomain-adminserver 1/1 Running 0 7m30s governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 35m governancedomain-soa-server1 1/1 Running 0 4m Note: It will take several minutes before all the pods listed above show. When a pod has a STATUS of 0/1 the pod is started but the OIG server associated with it is currently starting. While the pods are starting you can check the startup status in the pod logs, by running the following command:\n$ kubectl logs governancedomain-adminserver -n oigns $ kubectl logs governancedomain-soa-server1 -n oigns Once both pods are running, start the OIM Server using the following command:\n$ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv/output/weblogic-domains/governancedomain/ $ kubectl apply -f domain_oim_soa.yaml For example:\n$ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv/output/weblogic-domains/governancedomain/ $ kubectl apply -f domain_oim_soa.yaml The output will look similar to the following:\ndomain.weblogic.oracle/governancedomain configured Verify the results Verify the domain, pods and services Verify the domain, servers pods and services are created and in the READY state with a STATUS of 1/1, by running the following command:\n$ kubectl get all,domains -n oigns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE pod/governancedomain-adminserver 1/1 Running 0 16m pod/governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 36m pod/governancedomain-oim-server1 1/1 Running 0 5m57s pod/governancedomain-soa-server1 1/1 Running 0 13m pod/helper 1/1 Running 0 3h40m NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/governancedomain-adminserver ClusterIP None \u0026lt;none\u0026gt; 7001/TCP 16m service/governancedomain-cluster-oim-cluster ClusterIP 10.97.121.159 \u0026lt;none\u0026gt; 14000/TCP 13m service/governancedomain-cluster-soa-cluster ClusterIP 10.111.231.242 \u0026lt;none\u0026gt; 8001/TCP 13m service/governancedomain-oim-server1 ClusterIP None \u0026lt;none\u0026gt; 14000/TCP 5m57s service/governancedomain-oim-server2 ClusterIP 10.108.139.30 \u0026lt;none\u0026gt; 14000/TCP 5m57s service/governancedomain-oim-server3 ClusterIP 10.97.170.104 \u0026lt;none\u0026gt; 14000/TCP 5m57s service/governancedomain-oim-server4 ClusterIP 10.99.82.214 \u0026lt;none\u0026gt; 14000/TCP 5m57s service/governancedomain-oim-server5 ClusterIP 10.98.75.228 \u0026lt;none\u0026gt; 14000/TCP 5m57s service/governancedomain-soa-server1 ClusterIP None \u0026lt;none\u0026gt; 8001/TCP 13m service/governancedomain-soa-server2 ClusterIP 10.107.232.220 \u0026lt;none\u0026gt; 8001/TCP 13m service/governancedomain-soa-server3 ClusterIP 10.108.203.6 \u0026lt;none\u0026gt; 8001/TCP 13m service/governancedomain-soa-server4 ClusterIP 10.96.178.0 \u0026lt;none\u0026gt; 8001/TCP 13m service/governancedomain-soa-server5 ClusterIP 10.107.83.62 \u0026lt;none\u0026gt; 8001/TCP 13m NAME COMPLETIONS DURATION AGE job.batch/governancedomain-create-fmw-infra-sample-domain-job 1/1 5m30s 36m NAME AGE domain.weblogic.oracle/governancedomain 17m Note: It will take several minutes before all the services listed above show. While the governancedomain-oim-server1 pod has a STATUS of 0/1 the pod is started but the OIG server associated with it is currently starting. While the pod is starting you can check the startup status in the pod logs, by running the following command:\n$ kubectl logs governancedomain-oim-server1 -n oigns The default domain created by the script has the following characteristics:\n An Administration Server named AdminServer listening on port 7001. A configured OIG cluster named oig_cluster of size 5. A configured SOA cluster named soa_cluster of size 5. One started OIG managed Server, named oim_server1, listening on port 14000. One started SOA managed Server, named soa_server1, listening on port 8001. Log files that are located in \u0026lt;persistent_volume\u0026gt;/logs/\u0026lt;domainUID\u0026gt; Verify the domain Run the following command to describe the domain:\n$ kubectl describe domain \u0026lt;domain_uid\u0026gt; -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl describe domain governancedomain -n oigns The output will look similar to the following:\nName: governancedomain Namespace: oigns Labels: weblogic.domainUID=governancedomain Annotations: \u0026lt;none\u0026gt; API Version: weblogic.oracle/v8 Kind: Domain Metadata: Creation Timestamp: 2021-11-12T14:50:18Z Generation: 2 Managed Fields: API Version: weblogic.oracle/v8 Fields Type: FieldsV1 fieldsV1: f:metadata: f:annotations: .: f:kubectl.kubernetes.io/last-applied-configuration: f:labels: .: f:weblogic.domainUID: Manager: kubectl-client-side-apply Operation: Update Time: 2021-11-12T14:59:44Z API Version: weblogic.oracle/v8 Fields Type: FieldsV1 fieldsV1: f:status: .: f:clusters: f:conditions: f:introspectJobFailureCount: f:servers: f:startTime: Manager: Kubernetes Java Client Operation: Update Time: 2021-11-12T14:59:49Z Resource Version: 383381 UID: ea95c549-c414-42a6-8de4-beaf1204872e Spec: Admin Server: Server Pod: Env: Name: USER_MEM_ARGS Value: -Djava.security.egd=file:/dev/./urandom -Xms512m -Xmx1024m Server Start State: RUNNING Clusters: Cluster Name: soa_cluster Replicas: 1 Server Pod: Affinity: Pod Anti Affinity: Preferred During Scheduling Ignored During Execution: Pod Affinity Term: Label Selector: Match Expressions: Key: weblogic.clusterName Operator: In Values: $(CLUSTER_NAME) Topology Key: kubernetes.io/hostname Weight: 100 Server Service: Precreate Service: true Server Start State: RUNNING Cluster Name: oim_cluster Replicas: 1 Server Pod: Affinity: Pod Anti Affinity: Preferred During Scheduling Ignored During Execution: Pod Affinity Term: Label Selector: Match Expressions: Key: weblogic.clusterName Operator: In Values: $(CLUSTER_NAME) Topology Key: kubernetes.io/hostname Weight: 100 Env: Name: USER_MEM_ARGS Value: -Djava.security.egd=file:/dev/./urandom -Xms2408m -Xmx8192m Server Service: Precreate Service: true Server Start State: RUNNING Data Home: Domain Home: /u01/oracle/user_projects/domains/governancedomain Domain Home Source Type: PersistentVolume Http Access Log In Log Home: true Image: oracle/oig:12.2.1.4.0-8-ol7-211022.0723 Image Pull Policy: IfNotPresent Include Server Out In Pod Log: true Log Home: /u01/oracle/user_projects/domains/logs/governancedomain Log Home Enabled: true Server Pod: Env: Name: JAVA_OPTIONS Value: -Dweblogic.StdoutDebugEnabled=false Name: USER_MEM_ARGS Value: -Djava.security.egd=file:/dev/./urandom -Xms256m -Xmx1024m Volume Mounts: Mount Path: /u01/oracle/user_projects/domains Name: weblogic-domain-storage-volume Volumes: Name: weblogic-domain-storage-volume Persistent Volume Claim: Claim Name: governancedomain-domain-pvc Server Start Policy: IF_NEEDED Web Logic Credentials Secret: Name: oig-domain-credentials Status: Clusters: Cluster Name: oim_cluster Maximum Replicas: 5 Minimum Replicas: 0 Ready Replicas: 1 Replicas: 1 Replicas Goal: 1 Cluster Name: soa_cluster Maximum Replicas: 5 Minimum Replicas: 0 Ready Replicas: 1 Replicas: 1 Replicas Goal: 1 Conditions: Last Transition Time: 2021-11-12T15:06:30.709900Z Reason: ServersReady Status: True Type: Available Introspect Job Failure Count: 0 Servers: Desired State: RUNNING Health: Activation Time: 2021-11-12T14:54:46.370000Z Overall Health: ok Subsystems: Subsystem Name: ServerRuntime Symptoms: Node Name: 10.250.40.59 Server Name: AdminServer State: RUNNING Cluster Name: oim_cluster Desired State: RUNNING Health: Activation Time: 2021-11-12T15:06:21.693000Z Overall Health: ok Subsystems: Subsystem Name: ServerRuntime Symptoms: Node Name: 10.250.40.59 Server Name: oim_server1 State: RUNNING Cluster Name: oim_cluster Desired State: SHUTDOWN Server Name: oim_server2 Cluster Name: oim_cluster Desired State: SHUTDOWN Server Name: oim_server3 Cluster Name: oim_cluster Desired State: SHUTDOWN Server Name: oim_server4 Cluster Name: oim_cluster Desired State: SHUTDOWN Server Name: oim_server5 Cluster Name: soa_cluster Desired State: RUNNING Health: Activation Time: 2021-11-12T14:57:49.506000Z Overall Health: ok Subsystems: Subsystem Name: ServerRuntime Symptoms: Node Name: 10.250.40.59 Server Name: soa_server1 State: RUNNING Cluster Name: soa_cluster Desired State: SHUTDOWN Server Name: soa_server2 Cluster Name: soa_cluster Desired State: SHUTDOWN Server Name: soa_server3 Cluster Name: soa_cluster Desired State: SHUTDOWN Server Name: soa_server4 Cluster Name: soa_cluster Desired State: SHUTDOWN Server Name: soa_server5 Start Time: 2021-11-12T14:50:19.148541Z Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal DomainCreated 19m weblogic.operator Domain resource governancedomain was created Normal DomainProcessingCompleted 12m weblogic.operator Successfully completed processing domain resource governancedomain Normal DomainChanged 10m weblogic.operator Domain resource governancedomain was changed Normal DomainProcessingStarting 10m (x2 over 19m) weblogic.operator Creating or updating Kubernetes presence for WebLogic Domain with UID governancedomai In the Status section of the output, the available servers and clusters are listed.\n Verify the pods Use the following command to see the pods running the servers and which nodes they are running on:\n$ kubectl get pods -n \u0026lt;namespace\u0026gt; -o wide For example:\n$ kubectl get pods -n oigns -o wide The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES helper 1/1 Running 0 3h50m 10.244.1.39 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; governancedomain-adminserver 1/1 Running 0 27m 10.244.1.42 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 47m 10.244.1.40 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; governancedomain-oim-server1 1/1 Running 0 16m 10.244.1.44 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; governancedomain-soa-server1 1/1 Running 0 24m 10.244.1.43 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; You are now ready to configure an Ingress to direct traffic for your OIG domain as per Configure an ingress for an OIG domain.\n" +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oig/manage-oig-domains/logging-and-visualization/", + "title": "Logging and visualization", + "tags": [], + "description": "Describes the steps for logging and visualization with Elasticsearch and Kibana.", + "content": "After the OIG domain is set up you can publish operator and WebLogic Server logs into Elasticsearch and interact with them in Kibana.\nIn Prepare your environment if you decided to use the Elasticsearch and Kibana by setting the parameter elkIntegrationEnabled to true, then the steps below must be followed to complete the setup.\nIf you did not set elkIntegrationEnabled to true and want to do so post configuration, run the following command from the $WORKDIR directory:\n$ helm upgrade --reuse-values --namespace operator --set \u0026#34;elkIntegrationEnabled=true\u0026#34; --set \u0026#34;logStashImage=logstash:6.6.0\u0026#34; --set \u0026#34;elasticSearchHost=elasticsearch.default.svc.cluster.local\u0026#34; --set \u0026#34;elasticSearchPort=9200\u0026#34; --wait weblogic-kubernetes-operator kubernetes/charts/weblogic-operator The output will look similar to the following:\nRelease \u0026quot;weblogic-kubernetes-operator\u0026quot; has been upgraded. Happy Helming! NAME: weblogic-kubernetes-operator LAST DEPLOYED: Mon Nov 15 09:04:11 2021 NAMESPACE: operator STATUS: deployed REVISION: 3 TEST SUITE: None Install Elasticsearch and Kibana Create the Kubernetes resource using the following command:\n$ kubectl apply -f $WORKDIR/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml The output will look similar to the following:\ndeployment.apps/elasticsearch created service/elasticsearch created deployment.apps/kibana created service/kibana created Run the following command to ensure Elasticsearch is used by the operator:\n$ helm get values --all weblogic-kubernetes-operator -n opns The output will look similar to the following:\nCOMPUTED VALUES: clusterSizePaddingValidationEnabled: true domainNamespaceLabelSelector: weblogic-operator=enabled domainNamespaceSelectionStrategy: LabelSelector domainNamespaces: - default elasticSearchHost: elasticsearch.default.svc.cluster.local elasticSearchPort: 9200 elkIntegrationEnabled: true enableClusterRoleBinding: true externalDebugHttpPort: 30999 externalRestEnabled: false externalRestHttpsPort: 31001 externalServiceNameSuffix: -ext image: weblogic-kubernetes-operator:3.3.0 imagePullPolicy: IfNotPresent internalDebugHttpPort: 30999 introspectorJobNameSuffix: -introspector javaLoggingFileCount: 10 javaLoggingFileSizeLimit: 20000000 javaLoggingLevel: FINE logStashImage: logstash:6.6.0 remoteDebugNodePortEnabled: false serviceAccount: op-sa suspendOnDebugStartup: false To check that Elasticsearch and Kibana are deployed in the Kubernetes cluster, run the following command:\n$ kubectl get pods The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE elasticsearch-857bd5ff6b-tvqdn 1/1 Running 0 2m9s kibana-594465687d-zc2rt 1/1 Running 0 2m9s Create the logstash pod OIG Server logs can be pushed to the Elasticsearch server using the logstash pod. The logstash pod needs access to the persistent volume of the OIG domain created previously, for example governancedomain-domain-pv. The steps to create the logstash pod are as follows:\n Obtain the OIG domain persistence volume details:\n$ kubectl get pv -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pv -n oigns The output will look similar to the following:\nNAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE governancedomain-domain-pv 10Gi RWX Retain Bound oigns/governancedomain-domain-pvc governancedomain-oim-storage-class 28h Make note of the CLAIM value, for example in this case governancedomain-oim-pvc\n Run the following command to get the mountPath of your domain:\n$ kubectl describe domains \u0026lt;domain_uid\u0026gt; -n \u0026lt;domain_namespace\u0026gt; | grep \u0026#34;Mount Path\u0026#34; For example:\n$ kubectl describe domains governancedomain -n oigns | grep \u0026#34;Mount Path\u0026#34; The output will look similar to the following:\nMount Path: /u01/oracle/user_projects/domains Navigate to the $WORKDIR/kubernetes/elasticsearch-and-kibana directory and create a logstash.yaml file as follows. Change the claimName and mountPath values to match the values returned in the previous commands:\napiVersion: apps/v1 kind: Deployment metadata: name: logstash-wls namespace: oigns spec: selector: matchLabels: k8s-app: logstash-wls template: # create pods using pod definition in this template metadata: labels: k8s-app: logstash-wls spec: volumes: - name: weblogic-domain-storage-volume persistentVolumeClaim: claimName: governancedomain-domain-pvc - name: shared-logs emptyDir: {} containers: - name: logstash image: logstash:6.6.0 command: [\u0026quot;/bin/sh\u0026quot;] args: [\u0026quot;/usr/share/logstash/bin/logstash\u0026quot;, \u0026quot;-f\u0026quot;, \u0026quot;/u01/oracle/user_projects/domains/logstash/logstash.conf\u0026quot;] imagePullPolicy: IfNotPresent volumeMounts: - mountPath: /u01/oracle/user_projects/domains name: weblogic-domain-storage-volume - name: shared-logs mountPath: /shared-logs ports: - containerPort: 5044 name: logstash In the NFS persistent volume directory that corresponds to the mountPath /u01/oracle/user_projects/domains, create a logstash directory. For example:\n$ mkdir -p /scratch/OIGK8S/governancedomainpv/logstash Create a logstash.conf in the newly created logstash directory that contains the following. Make sure the paths correspond to your mountPath and domain name:\ninput { file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/logs/governancedomain/AdminServer*.log\u0026quot; tags =\u0026gt; \u0026quot;Adminserver_log\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/logs/governancedomain/soa_server*.log\u0026quot; tags =\u0026gt; \u0026quot;soaserver_log\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/logs/governancedomain/oim_server*.log\u0026quot; tags =\u0026gt; \u0026quot;Oimserver_log\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/governancedomain/servers/AdminServer/logs/AdminServer-diagnostic.log\u0026quot; tags =\u0026gt; \u0026quot;Adminserver_diagnostic\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/governancedomain/servers/**/logs/soa_server*-diagnostic.log\u0026quot; tags =\u0026gt; \u0026quot;Soa_diagnostic\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/governancedomain/servers/**/logs/oim_server*-diagnostic.log\u0026quot; tags =\u0026gt; \u0026quot;Oimserver_diagnostic\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/governancedomain/servers/**/logs/access*.log\u0026quot; tags =\u0026gt; \u0026quot;Access_logs\u0026quot; start_position =\u0026gt; beginning } } filter { grok { match =\u0026gt; [ \u0026quot;message\u0026quot;, \u0026quot;\u0026lt;%{DATA:log_timestamp}\u0026gt; \u0026lt;%{WORD:log_level}\u0026gt; \u0026lt;%{WORD:thread}\u0026gt; \u0026lt;%{HOSTNAME:hostname}\u0026gt; \u0026lt;%{HOSTNAME:servername}\u0026gt; \u0026lt;%{DATA:timer}\u0026gt; \u0026lt;\u0026lt;%{DATA:kernel}\u0026gt;\u0026gt; \u0026lt;\u0026gt; \u0026lt;%{DATA:uuid}\u0026gt; \u0026lt;%{NUMBER:timestamp}\u0026gt; \u0026lt;%{DATA:misc}\u0026gt; \u0026lt;%{DATA:log_number}\u0026gt; \u0026lt;%{DATA:log_message}\u0026gt;\u0026quot; ] } if \u0026quot;_grokparsefailure\u0026quot; in [tags] { mutate { remove_tag =\u0026gt; [ \u0026quot;_grokparsefailure\u0026quot; ] } } } output { elasticsearch { hosts =\u0026gt; [\u0026quot;elasticsearch.default.svc.cluster.local:9200\u0026quot;] } } Deploy the logstash pod by executing the following command:\n$ kubectl create -f $WORKDIR/kubernetes/elasticsearch-and-kibana/logstash.yaml The output will look similar to the following:\ndeployment.apps/logstash-wls created Run the following command to check the logstash pod is created correctly:\n$ kubectl get pods -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl get pods -n oigns The output should look similar to the following:\nNAME READY STATUS RESTARTS AGE governancedomain-adminserver 1/1 Running 0 90m governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 25h governancedomain-oim-server1 1/1 Running 0 87m governancedomain-soa-server1 1/1 Running 0 87m logstash-wls-f448b44c8-92l27 1/1 Running 0 7s Then run the following to get the Elasticsearch pod name:\n$ kubectl get pods The output should look similar to the following:\nNAME READY STATUS RESTARTS AGE elasticsearch-857bd5ff6b-tvqdn 1/1 Running 0 7m48s kibana-594465687d-zc2rt 1/1 Running 0 7m48s Verify and access the Kibana console Check if the indices are created correctly in the elasticsearch pod:\n$ kubectl exec -it elasticsearch-857bd5ff6b-tvqdn -- /bin/bash This will take you into a bash shell in the elasticsearch pod:\n[root@elasticsearch-857bd5ff6b-tvqdn elasticsearch]# In the elasticsearch bash shell run the following to check the indices:\n[root@elasticsearch-857bd5ff6b-tvqdn elasticsearch]# curl -i \u0026#34;127.0.0.1:9200/_cat/indices?v\u0026#34; The output will look similar to the following:\nHTTP/1.1 200 OK content-type: text/plain; charset=UTF-8 content-length: 580 health status index uuid pri rep docs.count docs.deleted store.size pri.store.size green open .kibana_1 Nb3C1lpMQrmptapuYb2PIQ 1 0 2 0 7.6kb 7.6kb yellow open logstash-2021.11.11 OWbA_M5EQ2m6l2xZdS2zXw 5 1 150 0 107.6kb 107.6kb green open .kibana_task_manager Qn_oHzAvQlWVcj_lItVdKQ 1 0 2 0 12.5kb 12.5kb yellow open logstash-2021.11.15 5-V6CXrnQrOOmZDW4JOUgw 5 1 126338 0 45.6mb 45.6mb Exit the bash shell by typing exit.\n Find the Kibana port by running the following command:\n$ kubectl get svc The output will look similar to the following:\nNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE elasticsearch ClusterIP 10.111.37.189 \u0026lt;none\u0026gt; 9200/TCP,9300/TCP 11m kibana NodePort 10.111.224.230 \u0026lt;none\u0026gt; 5601:31490/TCP 11m kubernetes ClusterIP 10.96.0.1 \u0026lt;none\u0026gt; 443/TCP 7d5h In the example above the Kibana port is 31490.\n Access the Kibana console with http://${MASTERNODE-HOSTNAME}:${KIBANA-PORT}/app/kibana.\n Click on Dashboard in the left hand Navigation Menu.\n In the Create index pattern page enter logstash* and click Next Step.\n From the Time Filter field name drop down menu select @timestamp and click Create index pattern.\n Once the index pattern is created click on Discover in the navigation menu to view the logs.\n For more details on how to use the Kibana console see the Kibana Guide\n" +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oid/troubleshooting/", + "title": "Troubleshooting", + "tags": [], + "description": "How to Troubleshoot issues.", + "content": " Check the Status of a Namespace View POD Logs View Pod Description Check the Status of a Namespace To check the status of objects in a namespace use the following command:\n$ kubectl --namespace \u0026lt;namespace\u0026gt; get nodes,pod,service,secret,pv,pvc,ingress -o wide For example:\n$ kubectl --namespace oidns get nodes,pod,service,secret,pv,pvc,ingress -o wide Output will be similar to the following:\nNAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME node/myoidhost Ready master 99d v1.18.4 100.94.12.231 \u0026lt;none\u0026gt; Oracle Linux Server 7.8 5.4.17-2036.102.0.2.el7uek.x86_64 docker://19.3.11 NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/oidhost1 1/1 Running 0 3h34m 10.244.0.137 myoidhost \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oidhost2 1/1 Running 0 3h34m 10.244.0.138 myoidhost \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oidhost3 1/1 Running 0 3h34m 10.244.0.136 myoidhost \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR service/oid-lbr-ldap ClusterIP 10.103.103.151 \u0026lt;none\u0026gt; 3060/TCP,3131/TCP 3h34m app.kubernetes.io/instance=oid,app.kubernetes.io/name=oid service/oidhost1 ClusterIP 10.108.25.249 \u0026lt;none\u0026gt; 3060/TCP,3131/TCP,7001/TCP,7002/TCP 3h34m app.kubernetes.io/instance=oid,app.kubernetes.io/name=oid,oid/instance=oidhost1 service/oidhost2 ClusterIP 10.99.99.62 \u0026lt;none\u0026gt; 3060/TCP,3131/TCP 3h34m app.kubernetes.io/instance=oid,app.kubernetes.io/name=oid,oid/instance=oidhost2 service/oidhost3 ClusterIP 10.107.13.174 \u0026lt;none\u0026gt; 3060/TCP,3131/TCP 3h34m app.kubernetes.io/instance=oid,app.kubernetes.io/name=oid,oid/instance=oidhost3 NAME TYPE DATA AGE secret/default-token-ngdrb kubernetes.io/service-account-token 3 99d secret/oid-creds opaque 7 3h34m secret/oid-tls-cert kubernetes.io/tls 2 3h34m secret/oid-token-n9wp6 kubernetes.io/service-account-token 3 3h34m secret/oiddomain kubernetes.io/tls 2 48d secret/sh.helm.release.v1.app.v1 helm.sh/release.v1 1 3h34m NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE VOLUMEMODE persistentvolume/oid-pv 20Gi RWX Delete Bound myhelmns/oid-pvc manual 3h34m Filesystem NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE VOLUMEMODE persistentvolumeclaim/oid-pvc Bound oid-pv 20Gi RWX manual 3h34m Filesystem NAME CLASS HOSTS ADDRESS PORTS AGE ingress.extensions/oid-ingress-nginx \u0026lt;none\u0026gt; * 10.103.111.88 80, 443 3h34m Include/exclude elements (nodes,pod,service,secret,pv,pvc,ingress) as required.\nView POD Logs To view logs for a POD use the following command:\n$ kubectl logs \u0026lt;pod\u0026gt; -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl logs oidhost1 -n oidns Output will depend on the application running in the POD.\nView Pod Description Details about a POD can be viewed using the kubectl describe command:\n$ kubectl describe pod \u0026lt;pod\u0026gt; -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl describe pod oidhost1 -n oidns Output will be similar to the following:\nName: oidhost1 Namespace: oidns Priority: 0 Node: myoidhost/100.94.12.231 Start Time: Tue, 19 Oct 2021 05:27:24 +0000 Labels: app.kubernetes.io/instance=oid app.kubernetes.io/managed-by=Helm app.kubernetes.io/name=oid app.kubernetes.io/version=12.2.1.4.0 helm.sh/chart=oid-0.1 oid/instance=oidhost1 Annotations: meta.helm.sh/release-name: oid meta.helm.sh/release-namespace: oidns Status: Running IP: 10.244.0.137 IPs: IP: 10.244.0.137 Containers: oid: Container ID: docker://8017433f42d2d6159e89b03daf47ac2f854ecbad9df3b92e157c36d353fd9cb8 Image: oracle/oid:12.2.1.4.0 Image ID: docker-pullable://oracle/oid@sha256:acc2df0a87bb53fcf71abe28e5387794f94b9f2eb900404dee7b2ffafe27887d Ports: 3060/TCP, 3131/TCP, 7001/TCP, 7002/TCP Host Ports: 0/TCP, 0/TCP, 0/TCP, 0/TCP State: Running Started: Tue, 19 Oct 2021 05:27:26 +0000 Ready: True Restart Count: 0 Readiness: exec [/u01/oracle/dockertools/healthcheck_status.sh] delay=600s timeout=30s period=60s #success=1 #failure=15 Environment: INSTANCE_TYPE: PRIMARY sleepBeforeConfig: 180 INSTANCE_NAME: oid1 ADMIN_LISTEN_HOST: oidhost1 REALM_DN: dc=oid,dc=example,dc=com CONNECTION_STRING: oid.example.com:1521/oidpdb.example.com LDAP_PORT: 3060 LDAPS_PORT: 3131 ADMIN_LISTEN_PORT: 7001 ADMIN_LISTEN_SSL_PORT: 7002 DOMAIN_NAME: oid_domain DOMAIN_HOME: /u01/oracle/user_projects/domains/oid_domain RCUPREFIX: OIDPD ADMIN_USER: \u0026lt;set to the key 'adminUser' in secret 'oid-creds'\u0026gt; Optional: false ADMIN_PASSWORD: \u0026lt;set to the key 'adminPassword' in secret 'oid-creds'\u0026gt; Optional: false DB_USER: \u0026lt;set to the key 'dbUser' in secret 'oid-creds'\u0026gt; Optional: false DB_PASSWORD: \u0026lt;set to the key 'dbPassword' in secret 'oid-creds'\u0026gt; Optional: false DB_SCHEMA_PASSWORD: \u0026lt;set to the key 'dbschemaPassword' in secret 'oid-creds'\u0026gt; Optional: false ORCL_ADMIN_PASSWORD: \u0026lt;set to the key 'orcladminPassword' in secret 'oid-creds'\u0026gt; Optional: false SSL_WALLET_PASSWORD: \u0026lt;set to the key 'sslwalletPassword' in secret 'oid-creds'\u0026gt; Optional: false ldapPort: 3060 ldapsPort: 3131 httpPort: 7001 httpsPort: 7002 Mounts: /u01/oracle/user_projects from oid-pv (rw) /var/run/secrets/kubernetes.io/serviceaccount from oid-token-n9wp6 (ro) Conditions: Type Status Initialized True Ready True ContainersReady True PodScheduled True Volumes: oid-pv: Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace) ClaimName: oid-pvc ReadOnly: false oid-token-n9wp6: Type: Secret (a volume populated by a Secret) SecretName: oid-token-n9wp6 Optional: false QoS Class: BestEffort Node-Selectors: \u0026lt;none\u0026gt; Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s node.kubernetes.io/unreachable:NoExecute for 300s Events: \u0026lt;none\u0026gt; " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/soa-domains/create-or-update-image/", + "title": "Create or update an image", + "tags": [], + "description": "Create or update an Oracle SOA Suite Docker image used for deploying Oracle SOA Suite domains. An Oracle SOA Suite Docker image can be created using the WebLogic Image Tool or using the Dockerfile approach.", + "content": "If you have access to the My Oracle Support (MOS), and there is a need to build a new image with a patch (bundle or interim), it is recommended to use the WebLogic Image Tool to build an Oracle SOA Suite image for production deployments.\n Create or update an Oracle SOA Suite Docker image using the WebLogic Image Tool Set up the WebLogic Image Tool Create an image Update an image Create an Oracle SOA Suite Docker image using Dockerfile Create or update an Oracle SOA Suite Docker image using the WebLogic Image Tool Using the WebLogic Image Tool, you can create a new Oracle SOA Suite Docker image (can include patches as well) or update an existing image with one or more patches (bundle patch and interim patches).\n Recommendations:\n Use create for creating a new Oracle SOA Suite Docker image either: without any patches or, containing the Oracle SOA Suite binaries, bundle patch and interim patches. This is the recommended approach if you have access to the Oracle SOA Suite patches because it optimizes the size of the image. Use update for patching an existing Oracle SOA Suite Docker image with a single interim patch. Note that the patched image size may increase considerably due to additional image layers introduced by the patch application tool. Set up the WebLogic Image Tool Prerequisites Set up the WebLogic Image Tool Validate setup WebLogic Image Tool build directory WebLogic Image Tool cache Set up additional build scripts Prerequisites Verify that your environment meets the following prerequisites:\n Docker client and daemon on the build machine, with minimum Docker version 18.03.1.ce. Bash version 4.0 or later, to enable the command complete feature. JAVA_HOME environment variable set to the appropriate JDK location. Set up the WebLogic Image Tool To set up the WebLogic Image Tool:\n Create a working directory and change to it. In these steps, this directory is imagetool-setup.\n$ mkdir imagetool-setup $ cd imagetool-setup Download the latest version of the WebLogic Image Tool from the releases page.\n Unzip the release ZIP file to the imagetool-setup directory.\n Execute the following commands to set up the WebLogic Image Tool on a Linux environment:\n$ cd imagetool-setup/imagetool/bin $ source setup.sh Validate setup To validate the setup of the WebLogic Image Tool:\n Enter the following command to retrieve the version of the WebLogic Image Tool:\n$ imagetool --version Enter imagetool then press the Tab key to display the available imagetool commands:\n$ imagetool \u0026lt;TAB\u0026gt; cache create help rebase update WebLogic Image Tool build directory The WebLogic Image Tool creates a temporary Docker context directory, prefixed by wlsimgbuilder_temp, every time the tool runs. Under normal circumstances, this context directory will be deleted. However, if the process is aborted or the tool is unable to remove the directory, it is safe for you to delete it manually. By default, the WebLogic Image Tool creates the Docker context directory under the user\u0026rsquo;s home directory. If you prefer to use a different directory for the temporary context, set the environment variable WLSIMG_BLDDIR:\n$ export WLSIMG_BLDDIR=\u0026#34;/path/to/buid/dir\u0026#34; WebLogic Image Tool cache The WebLogic Image Tool maintains a local file cache store. This store is used to look up where the Java, WebLogic Server installers, and WebLogic Server patches reside in the local file system. By default, the cache store is located in the user\u0026rsquo;s $HOME/cache directory. Under this directory, the lookup information is stored in the .metadata file. All automatically downloaded patches also reside in this directory. You can change the default cache store location by setting the environment variable WLSIMG_CACHEDIR:\n$ export WLSIMG_CACHEDIR=\u0026#34;/path/to/cachedir\u0026#34; Set up additional build scripts Creating an Oracle SOA Suite Docker image using the WebLogic Image Tool requires additional container scripts for Oracle SOA Suite domains.\n Clone the docker-images repository to set up those scripts. In these steps, this directory is DOCKER_REPO:\n$ cd imagetool-setup $ git clone https://github.com/oracle/docker-images.git Copy the additional WebLogic Image Tool build files from the operator source repository to the imagetool-setup location:\n$ mkdir -p imagetool-setup/docker-images/OracleSOASuite/imagetool/12.2.1.4.0 $ cd imagetool-setup/docker-images/OracleSOASuite/imagetool/12.2.1.4.0 $ cp -rf ${WORKDIR}/imagetool-scripts/* . Note: If you want to create the image continue with the following steps, otherwise to update the image see update an image.\n Create an image After setting up the WebLogic Image Tool and required build scripts, follow these steps to use the WebLogic Image Tool to create a new Oracle SOA Suite Docker image.\nDownload the Oracle SOA Suite installation binaries and patches You must download the required Oracle SOA Suite installation binaries and patches as listed below from the Oracle Software Delivery Cloud and save them in a directory of your choice. In these steps, this directory is download location.\nThe installation binaries and patches required for release 21.4.2 are:\n JDK:\n jdk-8u311-linux-x64.tar.gz Fusion Middleware Infrastructure installer:\n fmw_12.2.1.4.0_infrastructure.jar Oracle SOA Suite installers:\n fmw_12.2.1.4.0_soa.jar fmw_12.2.1.4.0_osb.jar fmw_12.2.1.4.0_b2bhealthcare.jar In this release, Oracle B2B is not supported to be configured, but the installer is required for completeness.\n Fusion Middleware Infrastructure patches:\n p28186730_139427_Generic.zip (OPATCH 13.9.4.2.7 FOR EM 13.4, 13.5 AND FMW/WLS 12.2.1.3.0, 12.2.1.4.0 AND 14.1.1.0.0) p33416868_122140_Generic.zip (WLS PATCH SET UPDATE 12.2.1.4.210930) p32880070_122140_Generic.zip (FMW COMMON THIRDPARTY SPU 12.2.1.4.0 FOR APRIL2021CPU) p32784652_122140_Generic.zip (OPSS BUNDLE PATCH 12.2.1.4.210418) p32905339_122140_Generic.zip (OWSM BUNDLE PATCH 12.2.1.4.210520) p33313802_122140_Generic.zip (ADF BUNDLE PATCH 12.2.1.4.210903) p33286160_122140_Generic.zip (Coherence 12.2.1.4 Cumulative Patch 11 (12.2.1.4.11)) p33093748_122140_Generic.zip (FMW PLATFORM 12.2.1.4.0 SPU FOR APRCPU2021) p31544353_122140_Linux-x86-64.zip (ADR FOR WEBLOGIC SERVER 12.2.1.4.0 JULY CPU 2020) Oracle SOA Suite and Oracle Service Bus patches\n p33408307_122140_Generic.zip (SOA BUNDLE PATCH 12.2.1.4.210928) p32121987_122140_Generic.zip (Oracle Service Bus BUNDLE PATCH 12.2.1.4.201105) p33404495_122140_Generic.zip (SOA One-off) p31857456_122140_Generic.zip (Oracle Service Bus One-off) p30741105_122140_Generic.zip (Oracle Service Bus One-off) p31713053_122140_Linux-x86-64.zip (One-off patch) Update required build files The following files in the code repository location \u0026lt;imagetool-setup-location\u0026gt;/docker-images/OracleSOASuite/imagetool/12.2.1.4.0 are used for creating the image:\n additionalBuildCmds.txt buildArgs In the buildArgs file, update all occurrences of %DOCKER_REPO% with the docker-images repository location, which is the complete path of \u0026lt;imagetool-setup-location\u0026gt;/docker-images.\nFor example, update:\n%DOCKER_REPO%/OracleSOASuite/imagetool/12.2.1.4.0/\nto:\n\u0026lt;imagetool-setup-location\u0026gt;/docker-images/OracleSOASuite/imagetool/12.2.1.4.0/\n Similarly, update the placeholders %JDK_VERSION% and %BUILDTAG% with appropriate values.\n Update the response file \u0026lt;imagetool-setup-location\u0026gt;/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file to add the parameter INSTALL_TYPE=\u0026quot;Fusion Middleware Infrastructure\u0026quot; in the [GENERIC] section.\n Create the image Add a JDK package to the WebLogic Image Tool cache:\n$ imagetool cache addInstaller --type jdk --version 8u311 --path \u0026lt;download location\u0026gt;/jdk-8u311-linux-x64.tar.gz Add the downloaded installation binaries to the WebLogic Image Tool cache:\n$ imagetool cache addInstaller --type fmw --version 12.2.1.4.0 --path \u0026lt;download location\u0026gt;/fmw_12.2.1.4.0_infrastructure.jar $ imagetool cache addInstaller --type soa --version 12.2.1.4.0 --path \u0026lt;download location\u0026gt;/fmw_12.2.1.4.0_soa.jar $ imagetool cache addInstaller --type osb --version 12.2.1.4.0 --path \u0026lt;download location\u0026gt;/fmw_12.2.1.4.0_osb.jar $ imagetool cache addInstaller --type b2b --version 12.2.1.4.0 --path \u0026lt;download location\u0026gt;/fmw_12.2.1.4.0_b2bhealthcare.jar Add the downloaded OPatch patch to the WebLogic Image Tool cache:\n$ imagetool cache addEntry --key 28186730_13.9.4.2.7 --value \u0026lt;download location\u0026gt;/p28186730_139427_Generic.zip Append the --opatchBugNumber flag and the OPatch patch key to the create command in the buildArgs file:\n--opatchBugNumber 28186730_13.9.4.2.7 Add the downloaded product patches to the WebLogic Image Tool cache:\n$ imagetool cache addEntry --key 30741105_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p30741105_122140_Generic.zip $ imagetool cache addEntry --key 31544353_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p31544353_122140_Linux-x86-64.zip $ imagetool cache addEntry --key 31713053_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p31713053_122140_Linux-x86-64.zip $ imagetool cache addEntry --key 31857456_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p31857456_122140_Generic.zip $ imagetool cache addEntry --key 32121987_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p32121987_122140_Generic.zip $ imagetool cache addEntry --key 32784652_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p32784652_122140_Generic.zip $ imagetool cache addEntry --key 32808126_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p32808126_122140_Generic.zip $ imagetool cache addEntry --key 32827327_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p32827327_122140_Generic.zip $ imagetool cache addEntry --key 32880070_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p32880070_122140_Generic.zip $ imagetool cache addEntry --key 32905339_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p32905339_122140_Generic.zip $ imagetool cache addEntry --key 33093748_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33093748_122140_Generic.zip $ imagetool cache addEntry --key 33286160_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33286160_122140_Generic.zip $ imagetool cache addEntry --key 33313802_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33313802_122140_Generic.zip $ imagetool cache addEntry --key 33404495_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33404495_122140_Generic.zip $ imagetool cache addEntry --key 33408307_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33408307_122140_Generic.zip $ imagetool cache addEntry --key 33416868_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33416868_122140_Generic.zip Append the --patches flag and the product patch keys to the create command in the buildArgs file. The --patches list must be a comma-separated collection of patch --key values used in the imagetool cache addEntry commands above.\nSample --patches list for the product patches added in to the cache:\n--patches 30741105_12.2.1.4.0,31544353_12.2.1.4.0,31713053_12.2.1.4.0,31857456_12.2.1.4.0,32121987_12.2.1.4.0,32784652_12.2.1.4.0,32808126_12.2.1.4.0,32827327_12.2.1.4.0,32880070_12.2.1.4.0,32905339_12.2.1.4.0,33093748_12.2.1.4.0,33286160_12.2.1.4.0,33313802_12.2.1.4.0,33404495_12.2.1.4.0,33408307_12.2.1.4.0,33416868_12.2.1.4.0 Example buildArgs file after appending the OPatch patch and product patches:\ncreate --jdkVersion 8u311 --type soa_osb_b2b --version 12.2.1.4.0 --tag oracle/soasuite:12.2.1.4.0 --pull --chown oracle:root --additionalBuildCommands \u0026lt;imagetool-setup-location\u0026gt;/docker-images/OracleSOASuite/imagetool/12.2.1.4.0/additionalBuildCmds.txt --additionalBuildFiles \u0026lt;imagetool-setup-location\u0026gt;/docker-images/OracleSOASuite/dockerfiles/12.2.1.4/container-scripts --installerResponseFile \u0026lt;imagetool-setup-location\u0026gt;/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file,\u0026lt;imagetool-setup-location\u0026gt;/docker-images/OracleSOASuite/dockerfiles/12.2.1.4/install/soasuite.response,\u0026lt;imagetool-setup-location\u0026gt;/docker-images/OracleSOASuite/dockerfiles/12.2.1.4/install/osb.response,\u0026lt;imagetool-setup-location\u0026gt;/docker-images/OracleSOASuite/dockerfiles/12.2.1.4/install/b2b.response --patches 30741105_12.2.1.4.0,31544353_12.2.1.4.0,31713053_12.2.1.4.0,31857456_12.2.1.4.0,32121987_12.2.1.4.0,32784652_12.2.1.4.0,32808126_12.2.1.4.0,32827327_12.2.1.4.0,32880070_12.2.1.4.0,32905339_12.2.1.4.0,33093748_12.2.1.4.0,33286160_12.2.1.4.0,33313802_12.2.1.4.0,33404495_12.2.1.4.0,33408307_12.2.1.4.0,33416868_12.2.1.4.0 Note: In the buildArgs file:\n --jdkVersion value must match the --version value used in the imagetool cache addInstaller command for --type jdk. --version value must match the --version value used in the imagetool cache addInstaller command for --type soa. --pull always pulls the latest base Linux image oraclelinux:7-slim from the Docker registry. This flag can be removed if you want to use the Linux image oraclelinux:7-slim, which is already available on the host where the SOA image is created. Refer to this page for the complete list of options available with the WebLogic Image Tool create command.\n Create the Oracle SOA Suite image:\n$ imagetool @\u0026lt;absolute path to buildargs file\u0026gt; Note: Make sure that the absolute path to the buildargs file is prepended with a @ character, as shown in the example above.\n For example:\n$ imagetool @\u0026lt;imagetool-setup-location\u0026gt;/docker-images/OracleSOASuite/imagetool/12.2.1.4.0/buildArgs Click here to see the sample Dockerfile generated with the `imagetool` command. ########## BEGIN DOCKERFILE ########## # Copyright (c) 2019, 2021, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. # FROM ghcr.io/oracle/oraclelinux:7-slim as os_update LABEL com.oracle.weblogic.imagetool.buildid=\u0026quot;b4554a25-22dd-4793-b121-9989bd4be40a\u0026quot; USER root # Use package manager to make sure that unzip, tar, and other required packages are installed # # Copyright (c) 2021, Oracle and/or its affiliates. # # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. # # Ensure necessary OS packages are installed RUN yum -y --downloaddir=/tmp/imagetool install gzip tar unzip libaio jq findutils diffutils hostname \\ \u0026amp;\u0026amp; yum -y --downloaddir=/tmp/imagetool clean all \\ \u0026amp;\u0026amp; rm -rf /var/cache/yum/* \\ \u0026amp;\u0026amp; rm -rf /tmp/imagetool # Create the Oracle user that will be the owner of the installed software # # Copyright (c) 2021, Oracle and/or its affiliates. # # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. # # Create user and group RUN if [ -z \u0026quot;$(getent group root)\u0026quot; ]; then hash groupadd \u0026amp;\u0026gt; /dev/null \u0026amp;\u0026amp; groupadd root || exit -1 ; fi \\ \u0026amp;\u0026amp; if [ -z \u0026quot;$(getent passwd oracle)\u0026quot; ]; then hash useradd \u0026amp;\u0026gt; /dev/null \u0026amp;\u0026amp; useradd -g root oracle || exit -1; fi \\ \u0026amp;\u0026amp; mkdir -p /u01 \\ \u0026amp;\u0026amp; chown oracle:root /u01 \\ \u0026amp;\u0026amp; chmod 775 /u01 # If Java is not already in the base image, install it # Copyright (c) 2021, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. # # Installing Java FROM os_update as jdk_build LABEL com.oracle.weblogic.imagetool.buildid=\u0026quot;b4554a25-22dd-4793-b121-9989bd4be40a\u0026quot; ENV JAVA_HOME=/u01/jdk COPY --chown=oracle:root jdk-8u301-linux-x64.tar.gz /tmp/imagetool/ USER oracle RUN tar xzf /tmp/imagetool/jdk-8u301-linux-x64.tar.gz -C /u01 \\ \u0026amp;\u0026amp; $(test -d /u01/jdk* \u0026amp;\u0026amp; mv /u01/jdk* /u01/jdk || mv /u01/graal* /u01/jdk) \\ \u0026amp;\u0026amp; rm -rf /tmp/imagetool \\ \u0026amp;\u0026amp; rm -f /u01/jdk/javafx-src.zip /u01/jdk/src.zip # If an Oracle Home is not already in the base image, install the middleware components # Copyright (c) 2021, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. # # Installing Middleware FROM os_update as wls_build LABEL com.oracle.weblogic.imagetool.buildid=\u0026quot;b4554a25-22dd-4793-b121-9989bd4be40a\u0026quot; ENV JAVA_HOME=/u01/jdk \\ ORACLE_HOME=/u01/oracle \\ OPATCH_NO_FUSER=true RUN mkdir -p /u01/oracle \\ \u0026amp;\u0026amp; mkdir -p /u01/oracle/oraInventory \\ \u0026amp;\u0026amp; chown oracle:root /u01/oracle/oraInventory \\ \u0026amp;\u0026amp; chown oracle:root /u01/oracle COPY --from=jdk_build --chown=oracle:root /u01/jdk /u01/jdk/ COPY --chown=oracle:root fmw_12.2.1.4.0_infrastructure.jar install.file /tmp/imagetool/ COPY --chown=oracle:root fmw_12.2.1.4.0_soa.jar soasuite.response /tmp/imagetool/ COPY --chown=oracle:root fmw_12.2.1.4.0_osb.jar osb.response /tmp/imagetool/ COPY --chown=oracle:root fmw_12.2.1.4.0_b2bhealthcare.jar b2b.response /tmp/imagetool/ COPY --chown=oracle:root oraInst.loc /u01/oracle/ USER oracle RUN echo \u0026quot;INSTALLING MIDDLEWARE\u0026quot; \\ \u0026amp;\u0026amp; echo \u0026quot;INSTALLING fmw\u0026quot; \\ \u0026amp;\u0026amp; \\ /u01/jdk/bin/java -Xmx1024m -jar /tmp/imagetool/fmw_12.2.1.4.0_infrastructure.jar -silent ORACLE_HOME=/u01/oracle \\ -responseFile /tmp/imagetool/install.file -invPtrLoc /u01/oracle/oraInst.loc -ignoreSysPrereqs -force -novalidation \\ \u0026amp;\u0026amp; echo \u0026quot;INSTALLING soa\u0026quot; \\ \u0026amp;\u0026amp; \\ /u01/jdk/bin/java -Xmx1024m -jar /tmp/imagetool/fmw_12.2.1.4.0_soa.jar -silent ORACLE_HOME=/u01/oracle \\ -responseFile /tmp/imagetool/soasuite.response -invPtrLoc /u01/oracle/oraInst.loc -ignoreSysPrereqs -force -novalidation \\ \u0026amp;\u0026amp; echo \u0026quot;INSTALLING osb\u0026quot; \\ \u0026amp;\u0026amp; \\ /u01/jdk/bin/java -Xmx1024m -jar /tmp/imagetool/fmw_12.2.1.4.0_osb.jar -silent ORACLE_HOME=/u01/oracle \\ -responseFile /tmp/imagetool/osb.response -invPtrLoc /u01/oracle/oraInst.loc -ignoreSysPrereqs -force -novalidation \\ \u0026amp;\u0026amp; echo \u0026quot;INSTALLING b2b\u0026quot; \\ \u0026amp;\u0026amp; \\ /u01/jdk/bin/java -Xmx1024m -jar /tmp/imagetool/fmw_12.2.1.4.0_b2bhealthcare.jar -silent ORACLE_HOME=/u01/oracle \\ -responseFile /tmp/imagetool/b2b.response -invPtrLoc /u01/oracle/oraInst.loc -ignoreSysPrereqs -force -novalidation \\ \u0026amp;\u0026amp; test $? -eq 0 \\ \u0026amp;\u0026amp; chmod -R g+r /u01/oracle \\ || (grep -vh \u0026quot;NOTIFICATION\u0026quot; /tmp/OraInstall*/install*.log \u0026amp;\u0026amp; exit 1) # # Copyright (c) 2021, Oracle and/or its affiliates. # # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. # # Update OPatch and apply WebLogic patches COPY --chown=oracle:root p28186730_139426_Generic-24269359.zip /tmp/imagetool/opatch/ RUN cd /tmp/imagetool/opatch \\ \u0026amp;\u0026amp; /u01/jdk/bin/jar -xf /tmp/imagetool/opatch/p28186730_139426_Generic-24269359.zip \\ \u0026amp;\u0026amp; /u01/jdk/bin/java -jar /tmp/imagetool/opatch/6880880/opatch_generic.jar -silent -ignoreSysPrereqs -force -novalidation oracle_home=/u01/oracle \\ \u0026amp;\u0026amp; rm -rf /tmp/imagetool COPY --chown=oracle:root patches/* /tmp/imagetool/patches/ # Apply all patches provided at the same time RUN /u01/oracle/OPatch/opatch napply -silent -oh /u01/oracle -nonrollbackable -phBaseDir /tmp/imagetool/patches \\ \u0026amp;\u0026amp; test $? -eq 0 \\ \u0026amp;\u0026amp; /u01/oracle/OPatch/opatch util cleanup -silent -oh /u01/oracle FROM os_update as final_build ENV ORACLE_HOME=/u01/oracle \\ JAVA_HOME=/u01/jdk \\ PATH=${PATH}:/u01/jdk/bin:/u01/oracle/oracle_common/common/bin:/u01/oracle/wlserver/common/bin:/u01/oracle LABEL com.oracle.weblogic.imagetool.buildid=\u0026quot;b4554a25-22dd-4793-b121-9989bd4be40a\u0026quot; COPY --from=jdk_build --chown=oracle:root /u01/jdk /u01/jdk/ COPY --from=wls_build --chown=oracle:root /u01/oracle /u01/oracle/ USER oracle WORKDIR /u01/oracle #ENTRYPOINT /bin/bash ENV ORACLE_HOME=/u01/oracle \\ VOLUME_DIR=/u01/oracle/user_projects \\ SCRIPT_FILE=/u01/oracle/container-scripts/* \\ HEALTH_SCRIPT_FILE=/u01/oracle/container-scripts/get_healthcheck_url.sh \\ JAVA_OPTIONS=\u0026quot;-Doracle.jdbc.fanEnabled=false -Dweblogic.StdoutDebugEnabled=false\u0026quot; \\ PATH=$PATH:/u01/oracle/container-scripts:/u01/oracle/oracle_common/modules/thirdparty/org.apache.ant/1.10.5.0.0/apache-ant-1.10.5/bin USER root RUN mkdir -p $VOLUME_DIR \u0026amp;\u0026amp; chown oracle:root /u01 $VOLUME_DIR \u0026amp;\u0026amp; \\ mkdir -p /u01/oracle/container-scripts COPY --chown=oracle:root files/container-scripts/ /u01/oracle/container-scripts/ RUN chmod +xr $SCRIPT_FILE USER oracle RUN if [ -f \u0026quot;${ORACLE_HOME}/soa/soa/thirdparty/edifecs/XEngine_8_4_1_23.tar.gz\u0026quot; ]; then \\ cd $ORACLE_HOME/soa/soa/thirdparty/edifecs \u0026amp;\u0026amp; \\ tar -zxvf XEngine_8_4_1_23.tar.gz; \\ else \\ echo -e \u0026quot;\\nXEngine_8_4_1_23.tar.gz not present in ${ORACLE_HOME}/soa/soa/thirdparty/edifecs directory. Skipping untar.\u0026quot;; \\ fi HEALTHCHECK --start-period=5m --interval=1m CMD curl -k -s --fail `$HEALTH_SCRIPT_FILE` || exit 1 WORKDIR ${ORACLE_HOME} CMD [\u0026quot;/u01/oracle/container-scripts/createDomainAndStart.sh\u0026quot;] ########## END DOCKERFILE ########## Check the created image using the docker images command:\n$ docker images | grep soasuite Update an image After setting up the WebLogic Image Tool and required build scripts, use the WebLogic Image Tool to update an existing Oracle SOA Suite Docker image:\n Enter the following command to add the OPatch patch to the WebLogic Image Tool cache:\n$ imagetool cache addEntry --key 28186730_13.9.4.2.7 --value \u0026lt;download location\u0026gt;/p28186730_139427_Generic.zip Execute the imagetool cache addEntry command for each patch to add the required patch(es) to the WebLogic Image Tool cache. For example, to add patch p30761841_122140_Generic.zip:\n$ imagetool cache addEntry --key=30761841_12.2.1.4.0 --value \u0026lt;downloaded-patches-location\u0026gt;/p30761841_122140_Generic.zip Provide the following arguments to the WebLogic Image Tool update command:\n –-fromImage - Identify the image that needs to be updated. In the example below, the image to be updated is soasuite:12.2.1.4. –-patches - Multiple patches can be specified as a comma-separated list. --tag - Specify the new tag to be applied for the image being built. Refer here for the complete list of options available with the WebLogic Image Tool update command.\n Note: The WebLogic Image Tool cache should have the latest OPatch zip. The WebLogic Image Tool will update the OPatch if it is not already updated in the image.\n Examples Click here to see the example \u0026#39;update\u0026#39; command: $ imagetool update --fromImage soasuite:12.2.1.4 --chown oracle:root --tag=soasuite:12.2.1.4-30761841 --patches=30761841_12.2.1.4.0 --opatchBugNumber=28186730_13.9.4.2.5 [INFO ] Image Tool build ID: bd21dc73-b775-4186-ae03-8219bf02113e [INFO ] Temporary directory used for docker build context: \u0026lt;work-directory\u0026gt;/wlstmp/wlsimgbuilder_temp1117031733123594064 [INFO ] Using patch 28186730_13.9.4.2.5 from cache: \u0026lt;downloaded-patches-location\u0026gt;/p28186730_139425_Generic.zip [WARNING] skipping patch conflict check, no support credentials provided [WARNING] No credentials provided, skipping validation of patches [INFO ] Using patch 30761841_12.2.1.4.0 from cache: \u0026lt;downloaded-patches-location\u0026gt;/p30761841_122140_Generic.zip [INFO ] docker cmd = docker build --force-rm=true --no-cache --tag soasuite:12.2.1.4-30761841 --build-arg http_proxy=http://\u0026lt;YOUR-COMPANY-PROXY\u0026gt; --build-arg https_proxy=http://\u0026lt;YOUR-COMPANY-PROXY\u0026gt; --build-arg no_proxy=\u0026lt;IP addresses and Domain address for no_proxy\u0026gt;,/var/run/docker.sock \u0026lt;work-directory\u0026gt;/wlstmp/wlsimgbuilder_temp1117031733123594064 Sending build context to Docker daemon 53.47MB Step 1/7 : FROM soasuite:12.2.1.4 as FINAL_BUILD ---\u0026gt; 445b649a3459 Step 2/7 : USER root ---\u0026gt; Running in 27f45e6958c3 Removing intermediate container 27f45e6958c3 ---\u0026gt; 150ae0161d46 Step 3/7 : ENV OPATCH_NO_FUSER=true ---\u0026gt; Running in daddfbb8fd9e Removing intermediate container daddfbb8fd9e ---\u0026gt; a5fc6b74be39 Step 4/7 : LABEL com.oracle.weblogic.imagetool.buildid=\u0026quot;bd21dc73-b775-4186-ae03-8219bf02113e\u0026quot; ---\u0026gt; Running in cdfec79c3fd4 Removing intermediate container cdfec79c3fd4 ---\u0026gt; 4c773aeb956f Step 5/7 : USER oracle ---\u0026gt; Running in ed3432e43e89 Removing intermediate container ed3432e43e89 ---\u0026gt; 54fe6b07c447 Step 6/7 : COPY --chown=oracle:oracle patches/* /tmp/imagetool/patches/ ---\u0026gt; d6d12f02a9be Step 7/7 : RUN /u01/oracle/OPatch/opatch napply -silent -oh /u01/oracle -phBaseDir /tmp/imagetool/patches \u0026amp;\u0026amp; /u01/oracle/OPatch/opatch util cleanup -silent -oh /u01/oracle \u0026amp;\u0026amp; rm -rf /tmp/imagetool ---\u0026gt; Running in a79addca4d2f Oracle Interim Patch Installer version 13.9.4.2.5 Copyright (c) 2020, Oracle Corporation. All rights reserved. Oracle Home : /u01/oracle Central Inventory : /u01/oracle/oraInventory from : /u01/oracle/oraInst.loc OPatch version : 13.9.4.2.5 OUI version : 13.9.4.0.0 Log file location : /u01/oracle/cfgtoollogs/opatch/opatch2020-06-01_10-56-13AM_1.log OPatch detects the Middleware Home as \u0026quot;/u01/oracle\u0026quot; Verifying environment and performing prerequisite checks... OPatch continues with these patches: 30761841 Do you want to proceed? [y|n] Y (auto-answered by -silent) User Responded with: Y All checks passed. Please shutdown Oracle instances running out of this ORACLE_HOME on the local system. (Oracle Home = '/u01/oracle') Is the local system ready for patching? [y|n] Y (auto-answered by -silent) User Responded with: Y Backing up files... Applying interim patch '30761841' to OH '/u01/oracle' ApplySession: Optional component(s) [ oracle.org.bouncycastle.bcprov.ext.jdk15on, 1.55.0.0.0 ] , [ oracle.org.bouncycastle.bcprov.ext.jdk15on, 1.55.0.0.0 ] , [ oracle.org.bouncycastle.bcprov.ext.jdk15on, 1.5.0.0.0 ] , [ oracle.org.bouncycastle.bcprov.ext.jdk15on, 1.5.0.0.0 ] , [ oracle.org.bouncycastle.bcprov.jdk15on, 1.55.0.0.0 ] , [ oracle.org.bouncycastle.bcprov.jdk15on, 1.55.0.0.0 ] , [ oracle.org.bouncycastle.bcprov.jdk15on, 1.52.0.0.0 ] , [ oracle.org.bouncycastle.bcprov.jdk15on, 1.52.0.0.0 ] , [ oracle.org.bouncycastle.bcprov.ext.jdk15on, 1.48.0.0.0 ] , [ oracle.org.bouncycastle.bcprov.ext.jdk15on, 1.48.0.0.0 ] , [ oracle.org.bouncycastle.bcpkix.jdk15on, 1.49.0.0.0 ] , [ oracle.org.bouncycastle.bcpkix.jdk15on, 1.49.0.0.0 ] , [ oracle.org.bouncycastle.bcprov.jdk15on, 1.51.0.0.0 ] , [ oracle.org.bouncycastle.bcprov.jdk15on, 1.51.0.0.0 ] , [ oracle.org.bouncycastle.bcprov.jdk15on, 1.54.0.0.0 ] , [ oracle.org.bouncycastle.bcprov.jdk15on, 1.54.0.0.0 ] , [ oracle.org.bouncycastle.bcprov.ext.jdk15on, 1.54.0.0.0 ] , [ oracle.org.bouncycastle.bcprov.ext.jdk15on, 1.54.0.0.0 ] , [ oracle.org.bouncycastle.bcpkix.jdk15on, 1.5.0.0.0 ] , [ oracle.org.bouncycastle.bcpkix.jdk15on, 1.5.0.0.0 ] , [ oracle.org.bouncycastle.bcpkix.jdk15on, 1.54.0.0.0 ] , [ oracle.org.bouncycastle.bcpkix.jdk15on, 1.54.0.0.0 ] , [ oracle.org.bouncycastle.bcpkix.jdk15on, 1.55.0.0.0 ] , [ oracle.org.bouncycastle.bcpkix.jdk15on, 1.55.0.0.0 ] , [ oracle.org.bouncycastle.bcprov.jdk15on, 1.49.0.0.0 ] , [ oracle.org.bouncycastle.bcprov.jdk15on, 1.49.0.0.0 ] , [ oracle.org.bouncycastle.bcprov.jdk15on, 1.5.0.0.0 ] , [ oracle.org.bouncycastle.bcprov.jdk15on, 1.5.0.0.0 ] not present in the Oracle Home or a higher version is found. Patching component oracle.org.bouncycastle.bcprov.jdk15on, 1.60.0.0.0... Patching component oracle.org.bouncycastle.bcprov.jdk15on, 1.60.0.0.0... Patching component oracle.org.bouncycastle.bcprov.ext.jdk15on, 1.60.0.0.0... Patching component oracle.org.bouncycastle.bcprov.ext.jdk15on, 1.60.0.0.0... Patching component oracle.org.bouncycastle.bcpkix.jdk15on, 1.60.0.0.0... Patching component oracle.org.bouncycastle.bcpkix.jdk15on, 1.60.0.0.0... Patch 30761841 successfully applied. Log file location: /u01/oracle/cfgtoollogs/opatch/opatch2020-06-01_10-56-13AM_1.log OPatch succeeded. Oracle Interim Patch Installer version 13.9.4.2.5 Copyright (c) 2020, Oracle Corporation. All rights reserved. Oracle Home : /u01/oracle Central Inventory : /u01/oracle/oraInventory from : /u01/oracle/oraInst.loc OPatch version : 13.9.4.2.5 OUI version : 13.9.4.0.0 Log file location : /u01/oracle/cfgtoollogs/opatch/opatch2020-06-01_10-57-19AM_1.log OPatch detects the Middleware Home as \u0026quot;/u01/oracle\u0026quot; Invoking utility \u0026quot;cleanup\u0026quot; OPatch will clean up 'restore.sh,make.txt' files and 'scratch,backup' directories. You will be still able to rollback patches after this cleanup. Do you want to proceed? [y|n] Y (auto-answered by -silent) User Responded with: Y Backup area for restore has been cleaned up. For a complete list of files/directories deleted, Please refer log file. OPatch succeeded. Removing intermediate container a79addca4d2f ---\u0026gt; 2ef2a67a685b Successfully built 2ef2a67a685b Successfully tagged soasuite:12.2.1.4-30761841 [INFO ] Build successful. Build time=112s. Image tag=soasuite:12.2.1.4-30761841 Click here to see the example Dockerfile generated by the WebLogic Image Tool with the \u0026#39;--dryRun\u0026#39; option: $ imagetool update --fromImage soasuite:12.2.1.4 --chown oracle:root --tag=soasuite:12.2.1.4-30761841 --patches=30761841_12.2.1.4.0 --opatchBugNumber=28186730_13.9.4.2.5 --dryRun [INFO ] Image Tool build ID: f9feea35-c52c-4974-b155-eb7f34d95892 [INFO ] Temporary directory used for docker build context: \u0026lt;work-directory\u0026gt;/wlstmp/wlsimgbuilder_temp1799120592903014749 [INFO ] Using patch 28186730_13.9.4.2.5 from cache: \u0026lt;downloaded-patches-location\u0026gt;/p28186730_139425_Generic.zip [WARNING] skipping patch conflict check, no support credentials provided [WARNING] No credentials provided, skipping validation of patches [INFO ] Using patch 30761841_12.2.1.4.0 from cache: \u0026lt;downloaded-patches-location\u0026gt;/p30761841_122140_Generic.zip [INFO ] docker cmd = docker build --force-rm=true --no-cache --tag soasuite:12.2.1.4-30761841 --build-arg http_proxy=http://www.yourcompany.proxy.com:80 --build-arg https_proxy=http://www.yourcompany.proxy.com:80 --build-arg no_proxy=10.250.109.251,localhost,127.0.0.1,/var/run/docker.sock \u0026lt;work-directory\u0026gt;/wlstmp/wlsimgbuilder_temp1799120592903014749 ########## BEGIN DOCKERFILE ########## # # Copyright (c) 2019, 2020, Oracle and/or its affiliates. # # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. # # FROM soasuite:12.2.1.4 as FINAL_BUILD USER root ENV OPATCH_NO_FUSER=true LABEL com.oracle.weblogic.imagetool.buildid=\u0026quot;f9feea35-c52c-4974-b155-eb7f34d95892\u0026quot; USER oracle COPY --chown=oracle:oracle patches/* /tmp/imagetool/patches/ RUN /u01/oracle/OPatch/opatch napply -silent -oh /u01/oracle -phBaseDir /tmp/imagetool/patches \\ \u0026amp;\u0026amp; /u01/oracle/OPatch/opatch util cleanup -silent -oh /u01/oracle \\ \u0026amp;\u0026amp; rm -rf /tmp/imagetool ########## END DOCKERFILE ########## Check the built image using the docker images command:\n$ docker images | grep soasuite soasuite 12.2.1.4-30761841 2ef2a67a685b About a minute ago 4.84GB $ Create an Oracle SOA Suite Docker image using Dockerfile For test and development purposes, you can create an Oracle SOA Suite image using the Dockerfile. Consult the README file for important prerequisite steps, such as building or pulling the Server JRE Docker image, Oracle FMW Infrastructure Docker image, and downloading the Oracle SOA Suite installer and bundle patch binaries.\nA prebuilt Oracle Fusion Middleware Infrastructure image, container-registry.oracle.com/middleware/fmw-infrastructure:12.2.1.4, is available at container-registry.oracle.com. We recommend that you pull and rename this image to build the Oracle SOA Suite image.\n$ docker pull container-registry.oracle.com/middleware/fmw-infrastructure:12.2.1.4 $ docker tag container-registry.oracle.com/middleware/fmw-infrastructure:12.2.1.4 oracle/fmw-infrastructure:12.2.1.4.0 Follow these steps to build an Oracle Fusion Middleware Infrastructure image, and then the Oracle SOA Suite image as a layer on top of that:\n Make a local clone of the sample repository:\n$ git clone https://github.com/oracle/docker-images Build the oracle/fmw-infrastructure:12.2.1.4 image:\n$ cd docker-images/OracleFMWInfrastructure/dockerfiles $ sh buildDockerImage.sh -v 12.2.1.4 -s This will produce an image named oracle/fmw-infrastructure:12.2.1.4.\n Tag the image as follows:\n$ docker tag oracle/fmw-infrastructure:12.2.1.4 oracle/fmw-infrastructure:12.2.1.4.0 Download the Oracle SOA Suite installer from the Oracle Technology Network or e-delivery.\n Note: Copy the installer binaries to the same location as the Dockerfile.\n To build the Oracle SOA Suite image with patches, you must download and drop the patch zip files (for example, p29928100_122140_Generic.zip) into the patches/ folder under the version that is required. For example, for 12.2.1.4.0 the folder is 12.2.1.4/patches. Similarly, to build the image by including the OPatch patch, download and drop the OPatch patch zip file (for e.g. p28186730_139424_Generic.zip) into the opatch_patch/ folder.\n Create the Oracle SOA Suite image by running the provided script:\n$ cd docker-images/OracleSOASuite/dockerfiles $ ./buildDockerImage.sh -v 12.2.1.4 -s The image produced will be named oracle/soasuite:12.2.1.4. The samples and instructions assume the Oracle SOA Suite image is named soasuite:12.2.1.4. You must rename your image to match this name, or update the samples to refer to the image you created.\n$ docker tag oracle/soasuite:12.2.1.4 soasuite:12.2.1.4 " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oud/manage-oud-containers/", + "title": "Manage Oracle Unified Directory Containers", + "tags": [], + "description": "This document provides steps manage Oracle Unified Directory containers.", + "content": "Important considerations for Oracle Unified Directory instances in Kubernetes.\n a) Logging and Visualization for Helm Chart oud-ds-rs Deployment Describes the steps for logging and visualization with Elasticsearch and Kibana.\n b) Monitoring an Oracle Unified Directory Instance Describes the steps for Monitoring the Oracle Unified Directory environment.\n " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oudsm/manage-oudsm-containers/", + "title": "Manage Oracle Unified Directory Services Manager Containers", + "tags": [], + "description": "This document provides steps to manage Oracle Unified Directory Services Manager containers.", + "content": "Important considerations for Oracle Unified Directory Services Manager instances in Kubernetes.\n a) Logging and Visualization for Helm Chart oudsm Deployment Describes the steps for logging and visualization with Elasticsearch and Kibana.\n b) Monitoring an Oracle Unified Directory Services Manager Instance Describes the steps for Monitoring the Oracle Unified Directory Services Manager environment.\n " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oud/", + "title": "Oracle Unified Directory", + "tags": [], + "description": "Oracle Unified Directory provides a comprehensive Directory Solution for robust Identity Management", + "content": "Oracle Unified Directory provides a comprehensive Directory Solution for robust Identity Management. Oracle Unified Directory is an all-in-one directory solution with storage, proxy, synchronization and virtualization capabilities. While unifying the approach, it provides all the services required for high-performance Enterprise and carrier-grade environments. Oracle Unified Directory ensures scalability to billions of entries, ease of installation, elastic deployments, enterprise manageability and effective monitoring.\nThis project supports deployment of Oracle Unified Directory (OUD) Docker images based on the 12cPS4 (12.2.1.4.0) release within a Kubernetes environment. The OUD Docker Image refers to binaries for OUD Release 12.2.1.4.0 and it has the capability to create different types of OUD Instances (Directory Service, Proxy, Replication) in containers.\nImage: oracle/oud:12.2.1.4.0\nThis project has several key features to assist you with deploying and managing Oracle Unified Directory in a Kubernetes environment. You can:\n Create Oracle Unified Directory instances in a Kubernetes persistent volume (PV). This PV can reside in an NFS file system or other Kubernetes volume types. Start servers based on declarative startup parameters and desired states. Expose the Oracle Unified Directory services for external access. Scale Oracle Unified Directory by starting and stopping servers on demand. Monitor the Oracle Unified Directory instance using Prometheus and Grafana. Follow the instructions in this guide to set up Oracle Unified Directory on Kubernetes.\nGetting started For detailed information about deploying Oracle Unified Directory, start at Prerequisites and follow this documentation sequentially.\nIf performing an Enterprise Deployment, refer to the Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster instead.\nCurrent release The current production release for Oracle Unified Directory 12c PS4 (12.2.1.4.0) deployment on Kubernetes is 21.4.2.\n" +}, +{ + "uri": "/fmw-kubernetes/21.4.2/soa-domains/adminguide/enablingt3/", + "title": "Expose the T3/T3S protocol", + "tags": [], + "description": "Create a T3/T3S channel and the corresponding Kubernetes service to expose the T3/T3S protocol for the Administration Server and Managed Servers in an Oracle SOA Suite domain.", + "content": " Oracle strongly recommends that you do not expose non-HTTPS traffic (T3/T3s/LDAP/IIOP/IIOPs) outside of the external firewall. You can control this access using a combination of network channels and firewalls.\n You can create T3/T3S channels and the corresponding Kubernetes service to expose the T3/T3S protocol for the Administration Server and Managed Servers in an Oracle SOA Suite domain.\nThe WebLogic Kubernetes Operator provides an option to expose a T3 channel for the Administration Server using the exposeAdminT3Channel setting during domain creation, then the matching T3 service can be used to connect. By default, when exposeAdminT3Channel is set, the WebLogic Kubernetes Operator environment exposes the NodePort for the T3 channel of the NetworkAccessPoint at 30012 (use t3ChannelPort to configure the port to a different value).\nIf you miss enabling exposeAdminT3Channel during domain creation, follow these steps to create a T3 channel for Administration Server manually.\nExpose a T3/T3S Channel for the Administration Server To create a custom T3/T3S channel for the Administration Server that has a listen port listen_port and a paired public port public_port:\n Create t3_admin_config.py with the following content:\nadmin_pod_name = sys.argv[1] admin_port = sys.argv[2] user_name = sys.argv[3] password = sys.argv[4] listen_port = sys.argv[5] public_port = sys.argv[6] public_address = sys.argv[7] AdminServerName = sys.argv[8] channelType = sys.argv[9] print(\u0026#39;custom admin_pod_name : [%s]\u0026#39; % admin_pod_name); print(\u0026#39;custom admin_port : [%s]\u0026#39; % admin_port); print(\u0026#39;custom user_name : [%s]\u0026#39; % user_name); print(\u0026#39;custom password : ********\u0026#39;); print(\u0026#39;public address : [%s]\u0026#39; % public_address); print(\u0026#39;channel listen port : [%s]\u0026#39; % listen_port); print(\u0026#39;channel public listen port : [%s]\u0026#39; % public_port); connect(user_name, password, \u0026#39;t3://\u0026#39; + admin_pod_name + \u0026#39;:\u0026#39; + admin_port) edit() startEdit() cd(\u0026#39;/\u0026#39;) cd(\u0026#39;Servers/%s/\u0026#39; % AdminServerName ) if channelType == \u0026#39;t3\u0026#39;: create(\u0026#39;T3Channel_AS\u0026#39;,\u0026#39;NetworkAccessPoint\u0026#39;) cd(\u0026#39;NetworkAccessPoints/T3Channel_AS\u0026#39;) set(\u0026#39;Protocol\u0026#39;,\u0026#39;t3\u0026#39;) set(\u0026#39;ListenPort\u0026#39;,int(listen_port)) set(\u0026#39;PublicPort\u0026#39;,int(public_port)) set(\u0026#39;PublicAddress\u0026#39;, public_address) print(\u0026#39;Channel T3Channel_AS added\u0026#39;) elif channelType == \u0026#39;t3s\u0026#39;:\tcreate(\u0026#39;T3SChannel_AS\u0026#39;,\u0026#39;NetworkAccessPoint\u0026#39;) cd(\u0026#39;NetworkAccessPoints/T3SChannel_AS\u0026#39;) set(\u0026#39;Protocol\u0026#39;,\u0026#39;t3s\u0026#39;) set(\u0026#39;ListenPort\u0026#39;,int(listen_port)) set(\u0026#39;PublicPort\u0026#39;,int(public_port)) set(\u0026#39;PublicAddress\u0026#39;, public_address) set(\u0026#39;HttpEnabledForThisProtocol\u0026#39;, true) set(\u0026#39;OutboundEnabled\u0026#39;, false) set(\u0026#39;Enabled\u0026#39;, true) set(\u0026#39;TwoWaySSLEnabled\u0026#39;, true) set(\u0026#39;ClientCertificateEnforced\u0026#39;, false) else: print(\u0026#39;channelType [%s] not supported\u0026#39;,channelType) activate() disconnect() Copy t3_admin_config.py into the domain home (for example, /u01/oracle/user_projects/domains/soainfra) of the Administration Server pod (for example, soainfra-adminserver in soans namespace).\n $ kubectl cp t3_admin_config.py soans/soainfra-adminserver:/u01/oracle/user_projects/domains/soainfra Run wlst.sh t3_admin_config.py by using exec into the Administration Server pod with the following parameters:\n admin_pod_name: soainfra-adminserver # Administration Server pod admin_port: 7001 user_name: weblogic password: Welcome1 # weblogic password listen_port: 30014 # New port for T3 Administration Server public_port: 30014 # Kubernetes NodePort which will be used to expose T3 port externally public_address: AdminServerName: AdminServer # Give administration Server name channelType: t3 # t3 or t3s protocol channel $ kubectl exec -it \u0026lt;Administration Server pod\u0026gt; -n \u0026lt;namespace\u0026gt; -- /u01/oracle/oracle_common/common/bin/wlst.sh \u0026lt;domain_home\u0026gt;/t3_admin_config.py \u0026lt;Administration Server pod\u0026gt; \u0026lt;Administration Server port\u0026gt; weblogic \u0026lt;password for weblogic\u0026gt; \u0026lt;t3 port on Administration Server\u0026gt; \u0026lt;t3 nodeport\u0026gt; \u0026lt;master_ip\u0026gt; \u0026lt;AdminServerName\u0026gt; \u0026lt;channelType t3 or t3s\u0026gt; For example:\n$ kubectl exec -it soainfra-adminserver -n soans -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/user_projects/domains/soainfra/t3_admin_config.py soainfra-adminserver 7001 weblogic Welcome1 30014 30014 xxx.xxx.xxx.xxx AdminServer t3 Create t3_admin_svc.yaml with the following contents to expose T3 at NodePort 30014 for domainName and domainUID as soainfra and domain deployed in soans namespace:\n Note: For T3S, replace NodePort 30014 with the appropriate value used with public_port while creating the T3S channel using wlst.sh in the previous step.\n apiVersion: v1 kind: Service metadata: name: soainfra-adminserver-t3-external namespace: soans labels: weblogic.serverName: AdminServer weblogic.domainName: soainfra weblogic.domainUID: soainfra spec: type: NodePort selector: weblogic.domainName: soainfra weblogic.domainUID: soainfra weblogic.serverName: AdminServer ports: - name: t3adminport protocol: TCP port: 30014 targetPort: 30014 nodePort: 30014 Create the NodePort service for port 30014:\n$ kubectl create -f t3_admin_svc.yaml Verify that you can access T3 for the Administration Server with the following URL:\nt3://\u0026lt;master_ip\u0026gt;:30014 Similarly, you can access T3S as follows:\na. First get the certificates from the Administration Server to be used for secured (T3S) connection from the client. You can export the certificate from the Administration Server with WLST commands. For example, to export the default demoidentity:\n Note: If you are using the custom SSL certificate, replace the steps accordingly.\n $ kubectl exec -it soainfra-adminserver -n soans -- bash $ /u01/oracle/oracle_common/common/bin/wlst.sh $ connect('weblogic','Welcome1','t3://soainfra-adminserver:7001') $ svc = getOpssService(name='KeyStoreService') $ svc.exportKeyStoreCertificate(appStripe='system', name='demoidentity', password='DemoIdentityKeyStorePassPhrase', alias='DemoIdentity', type='Certificate', filepath='/tmp/cert.txt/') These steps download the certificate at /tmp/cert.txt.\nb. Use the same certificates from the client side and connect using t3s. For example:\n$ export JAVA_HOME=/u01/jdk $ keytool -import -v -trustcacerts -alias soadomain -file cert.txt -keystore $JAVA_HOME/jre/lib/security/cacerts -keypass changeit -storepass changeit $ export WLST_PROPERTIES=\u0026quot;-Dweblogic.security.SSL.ignoreHostnameVerification=true\u0026quot; $ cd $ORACLE_HOME/oracle_common/common/bin $ ./wlst.sh Initializing WebLogic Scripting Tool (WLST) ... Welcome to WebLogic Server Administration Scripting Shell Type help() for help on available commands $ wls:/offline\u0026gt; connect('weblogic','Welcome1','t3s://\u0026lt;Master IP address\u0026gt;:30014') Expose T3/T3S for Managed Servers To create a custom T3/T3S channel for all Managed Servers, with a listen port listen_port and a paired public port public_port:\n Create t3_ms_config.py with the following content:\nadmin_pod_name = sys.argv[1] admin_port = sys.argv[2] user_name = sys.argv[3] password = sys.argv[4] listen_port = sys.argv[5] public_port = sys.argv[6] public_address = sys.argv[7] managedNameBase = sys.argv[8] ms_count = sys.argv[9] channelType = sys.argv[10] print(\u0026#39;custom host : [%s]\u0026#39; % admin_pod_name); print(\u0026#39;custom port : [%s]\u0026#39; % admin_port); print(\u0026#39;custom user_name : [%s]\u0026#39; % user_name); print(\u0026#39;custom password : ********\u0026#39;); print(\u0026#39;public address : [%s]\u0026#39; % public_address); print(\u0026#39;channel listen port : [%s]\u0026#39; % listen_port); print(\u0026#39;channel public listen port : [%s]\u0026#39; % public_port); connect(user_name, password, \u0026#39;t3://\u0026#39; + admin_pod_name + \u0026#39;:\u0026#39; + admin_port) edit() startEdit() for index in range(0, int(ms_count)): cd(\u0026#39;/\u0026#39;) msIndex = index+1 cd(\u0026#39;/\u0026#39;) name = \u0026#39;%s%s\u0026#39; % (managedNameBase, msIndex) cd(\u0026#39;Servers/%s/\u0026#39; % name ) if channelType == \u0026#39;t3\u0026#39;: create(\u0026#39;T3Channel_MS\u0026#39;,\u0026#39;NetworkAccessPoint\u0026#39;) cd(\u0026#39;NetworkAccessPoints/T3Channel_MS\u0026#39;) set(\u0026#39;Protocol\u0026#39;,\u0026#39;t3\u0026#39;) set(\u0026#39;ListenPort\u0026#39;,int(listen_port)) set(\u0026#39;PublicPort\u0026#39;,int(public_port)) set(\u0026#39;PublicAddress\u0026#39;, public_address) print(\u0026#39;Channel T3Channel_MS added ...for \u0026#39; + name) elif channelType == \u0026#39;t3s\u0026#39;:\tcreate(\u0026#39;T3SChannel_MS\u0026#39;,\u0026#39;NetworkAccessPoint\u0026#39;) cd(\u0026#39;NetworkAccessPoints/T3SChannel_MS\u0026#39;) set(\u0026#39;Protocol\u0026#39;,\u0026#39;t3s\u0026#39;) set(\u0026#39;ListenPort\u0026#39;,int(listen_port)) set(\u0026#39;PublicPort\u0026#39;,int(public_port)) set(\u0026#39;PublicAddress\u0026#39;, public_address) set(\u0026#39;HttpEnabledForThisProtocol\u0026#39;, true) set(\u0026#39;OutboundEnabled\u0026#39;, false) set(\u0026#39;Enabled\u0026#39;, true) set(\u0026#39;TwoWaySSLEnabled\u0026#39;, true) set(\u0026#39;ClientCertificateEnforced\u0026#39;, false) print(\u0026#39;Channel T3SChannel_MS added ...for \u0026#39; + name) else: print(\u0026#39;Protocol [%s] not supported\u0026#39; % channelType) activate() disconnect() Copy t3_ms_config.py into the domain home (for example, /u01/oracle/user_projects/domains/soainfra) of the Administration Server pod (for example, soainfra-adminserver in soans namespace).\n$ kubectl cp t3_ms_config.py soans/soainfra-adminserver:/u01/oracle/user_projects/domains/soainfra Run wlst.sh t3_ms_config.py by exec into the Administration Server pod with the following parameters:\n admin_pod_name: soainfra-adminserver # Administration Server pod admin_port: 7001 user_name: weblogic password: Welcome1 # weblogic password listen_port: 30016 # New port for T3 Managed Servers public_port: 30016 # Kubernetes NodePort which will be used to expose T3 port externally public_address: managedNameBase: soa_server # Give Managed Server base name. For osb_cluster this will be osb_server ms_count: 5 # Number of configured Managed Servers channelType: t3 # channelType is t3 or t3s $ kubectl exec -it \u0026lt;Administration Server pod\u0026gt; -n \u0026lt;namespace\u0026gt; -- /u01/oracle/oracle_common/common/bin/wlst.sh \u0026lt;domain_home\u0026gt;/t3_ms_config.py \u0026lt;Administration Server pod\u0026gt; \u0026lt;Administration Server port\u0026gt; weblogic \u0026lt;password for weblogic\u0026gt; \u0026lt;t3 port on Managed Server\u0026gt; \u0026lt;t3 nodeport\u0026gt; \u0026lt;master_ip\u0026gt; \u0026lt;managedNameBase\u0026gt; \u0026lt;ms_count\u0026gt; \u0026lt;channelType t3 or t3s\u0026gt; For example:\n$ kubectl exec -it soainfra-adminserver -n soans -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/user_projects/domains/soainfra/t3_ms_config.py soainfra-adminserver 7001 weblogic Welcome1 30016 30016 xxx.xxx.xxx.xxx soa_server 5 t3 Create t3_ms_svc.yaml with the following contents to expose T3 at Managed Server port 30016 for domainName, domainUID as soainfra, and clusterName as soa_cluster for the SOA cluster. Similarly, you can create the Kubernetes service with clusterName as osb_cluster for an Oracle Service Bus cluster:\n Note: For T3S, replace NodePort 30016 with the appropriate value used with public_port while creating the T3S channel using wlst.sh in the previous step.\n apiVersion: v1 kind: Service metadata: name: soainfra-soa-cluster-t3-external namespace: soans labels: weblogic.clusterName: soa_cluster weblogic.domainName: soainfra weblogic.domainUID: soainfra spec: type: NodePort selector: weblogic.domainName: soainfra weblogic.domainUID: soainfra weblogic.clusterName: soa_cluster ports: - name: t3soaport protocol: TCP port: 30016 targetPort: 30016 nodePort: 30016 Create the NodePort service for port 30016:\n$ kubectl create -f t3_ms_svc.yaml Verify that you can access T3 for the Managed Server with the following URL:\nt3://\u0026lt;master_ip\u0026gt;:30016 Similarly, you can access T3S as follows:\na. First get the certificates from the Administration Server to be used for secured (t3s) connection from client. You can export the certificate from the Administration Server with wlst commands. Sample commands to export the default demoidentity:\n Note: In case you are using the custom SSL certificate, replaces the steps accordingly\n $ kubectl exec -it soainfra-adminserver -n soans -- bash $ /u01/oracle/oracle_common/common/bin/wlst.sh $ connect('weblogic','Welcome1','t3://soainfra-adminserver:7001') $ svc = getOpssService(name='KeyStoreService') $ svc.exportKeyStoreCertificate(appStripe='system', name='demoidentity', password='DemoIdentityKeyStorePassPhrase', alias='DemoIdentity', type='Certificate', filepath='/tmp/cert.txt/') The above steps download the certificate at /tmp/cert.txt.\nb. Use the same certificates from the client side and connect using t3s. For example:\n$ export JAVA_HOME=/u01/jdk $ keytool -import -v -trustcacerts -alias soadomain -file cert.txt -keystore $JAVA_HOME/jre/lib/security/cacerts -keypass changeit -storepass changeit $ export WLST_PROPERTIES=\u0026quot;-Dweblogic.security.SSL.ignoreHostnameVerification=true\u0026quot; $ cd $ORACLE_HOME/oracle_common/common/bin $ ./wlst.sh Initializing WebLogic Scripting Tool (WLST) ... Welcome to WebLogic Server Administration Scripting Shell Type help() for help on available commands $ wls:/offline\u0026gt; connect('weblogic','Welcome1','t3s://\u0026lt;Master IP address\u0026gt;:30016') Remove T3/T3S configuration For Administration Server Create t3_admin_delete.py with the following content:\nadmin_pod_name = sys.argv[1] admin_port = sys.argv[2] user_name = sys.argv[3] password = sys.argv[4] AdminServerName = sys.argv[5] channelType = sys.argv[6] print(\u0026#39;custom admin_pod_name : [%s]\u0026#39; % admin_pod_name); print(\u0026#39;custom admin_port : [%s]\u0026#39; % admin_port); print(\u0026#39;custom user_name : [%s]\u0026#39; % user_name); print(\u0026#39;custom password : ********\u0026#39;); connect(user_name, password, \u0026#39;t3://\u0026#39; + admin_pod_name + \u0026#39;:\u0026#39; + admin_port) edit() startEdit() cd(\u0026#39;/\u0026#39;) cd(\u0026#39;Servers/%s/\u0026#39; % AdminServerName ) if channelType == \u0026#39;t3\u0026#39;: delete(\u0026#39;T3Channel_AS\u0026#39;,\u0026#39;NetworkAccessPoint\u0026#39;) elif channelType == \u0026#39;t3s\u0026#39;: delete(\u0026#39;T3SChannel_AS\u0026#39;,\u0026#39;NetworkAccessPoint\u0026#39;) else: print(\u0026#39;channelType [%s] not supported\u0026#39;,channelType) activate() disconnect() Copy t3_admin_delete.py into the domain home (for example, /u01/oracle/user_projects/domains/soainfra) of the Administration Server pod (for example, soainfra-adminserver in soans namespace).\n$ kubectl cp t3_admin_delete.py soans/soainfra-adminserver:/u01/oracle/user_projects/domains/soainfra Run wlst.sh t3_admin_delete.py by exec into the Administration Server pod with the following parameters:\n admin_pod_name: soainfra-adminserver # Administration Server pod admin_port: 7001 user_name: weblogic password: Welcome1 # weblogic password AdminServerName: AdminServer # Give administration Server name channelType: t3 # T3 channel $ kubectl exec -it \u0026lt;Administration Server pod\u0026gt; -n \u0026lt;namespace\u0026gt; -- /u01/oracle/oracle_common/common/bin/wlst.sh \u0026lt;domain_home\u0026gt;/t3_admin_delete.py \u0026lt;Administration Server pod\u0026gt; \u0026lt;Administration Server port\u0026gt; weblogic \u0026lt;password for weblogic\u0026gt; \u0026lt;AdminServerName\u0026gt; \u0026lt;protocol t3 or t3s\u0026gt; For example:\n$ kubectl exec -it soainfra-adminserver -n soans -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/user_projects/domains/soainfra/t3_admin_delete.py soainfra-adminserver 7001 weblogic Welcome1 AdminServer t3 Delete the NodePort service for port 30014:\n$ kubectl delete -f t3_admin_svc.yaml For Managed Servers These steps delete the custom T3/T3S channel created by Expose T3/T3S for Managed Servers for all Managed Servers.\n Create t3_ms_delete.py with the following content:\nadmin_pod_name = sys.argv[1] admin_port = sys.argv[2] user_name = sys.argv[3] password = sys.argv[4] managedNameBase = sys.argv[5] ms_count = sys.argv[6] channelType = sys.argv[7] print(\u0026#39;custom host : [%s]\u0026#39; % admin_pod_name); print(\u0026#39;custom port : [%s]\u0026#39; % admin_port); print(\u0026#39;custom user_name : [%s]\u0026#39; % user_name); print(\u0026#39;custom password : ********\u0026#39;); connect(user_name, password, \u0026#39;t3://\u0026#39; + admin_pod_name + \u0026#39;:\u0026#39; + admin_port) edit() startEdit() for index in range(0, int(ms_count)): cd(\u0026#39;/\u0026#39;) msIndex = index+1 cd(\u0026#39;/\u0026#39;) name = \u0026#39;%s%s\u0026#39; % (managedNameBase, msIndex) cd(\u0026#39;Servers/%s/\u0026#39; % name ) if channelType == \u0026#39;t3\u0026#39;: delete(\u0026#39;T3Channel_MS\u0026#39;,\u0026#39;NetworkAccessPoint\u0026#39;) elif channelType == \u0026#39;t3s\u0026#39;: delete(\u0026#39;T3SChannel_MS\u0026#39;,\u0026#39;NetworkAccessPoint\u0026#39;) else: print(\u0026#39;Protocol [%s] not supported\u0026#39; % channelType) activate() disconnect() Copy t3_ms_delete.py into the domain home (for example, /u01/oracle/user_projects/domains/soainfra) of the Administration Server pod (for example, soainfra-adminserver in soans namespace).\n$ kubectl cp t3_ms_delete.py soans/soainfra-adminserver:/u01/oracle/user_projects/domains/soainfra Run wlst.sh t3_ms_delete.py by exec into the Administration Server pod with the following parameters:\n admin_pod_name: soainfra-adminserver # Administration Server pod admin_port: 7001 user_name: weblogic password: Welcome1 # weblogic password managedNameBase: soa_server # Give Managed Server base name. For osb_cluster this will be osb_server ms_count: 5 # Number of configured Managed Servers channelType: t3 # channelType is t3 or t3s $ kubectl exec -it \u0026lt;Administration Server pod\u0026gt; -n \u0026lt;namespace\u0026gt; -- /u01/oracle/oracle_common/common/bin/wlst.sh \u0026lt;domain_home\u0026gt;/t3_ms_delete.py \u0026lt;Administration Server pod\u0026gt; \u0026lt;Administration Server port\u0026gt; weblogic \u0026lt;password for weblogic\u0026gt; \u0026lt;t3 port on Managed Server\u0026gt; \u0026lt;t3 nodeport\u0026gt; \u0026lt;master_ip\u0026gt; \u0026lt;managedNameBase\u0026gt; \u0026lt;ms_count\u0026gt; \u0026lt;channelType t3 or t3s\u0026gt; For example:\n$ kubectl exec -it soainfra-adminserver -n soans -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/user_projects/domains/soainfra/t3_ms_delete.py soainfra-adminserver 7001 weblogic Welcome1 soa_server 5 t3 Delete the NodePort service for port 30016 (or the NodePort used while creating the Kubernetes service):\n$ kubectl delete -f t3_ms_svc.yaml " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oam/configure-ingress/", + "title": "Configure an Ingress for an OAM domain", + "tags": [], + "description": "This document provides steps to configure an Ingress to direct traffic to the OAM domain.", + "content": "Setting up an ingress for NGINX for the OAM Domain The instructions below explain how to set up NGINX as an ingress for the OAM domain with SSL termination.\nNote: All the steps below should be performed on the master node.\n Generate a SSL Certificate Install NGINX Create an Ingress for the Domain Verify that you can access the domain URL Generate a SSL Certificate Generate a private key and certificate signing request (CSR) using a tool of your choice. Send the CSR to your certificate authority (CA) to generate the certificate.\nIf you want to use a certificate for testing purposes you can generate a self signed certificate using openssl:\n$ mkdir \u0026lt;workdir\u0026gt;/ssl $ cd \u0026lt;workdir\u0026gt;/ssl $ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj \u0026#34;/CN=\u0026lt;nginx-hostname\u0026gt;\u0026#34; For example:\n$ mkdir /scratch/OAMK8S/ssl $ cd /scratch/OAMK8S/ssl $ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj \u0026#34;/CN=masternode.example.com\u0026#34; Note: The CN should match the host.domain of the master node in order to prevent hostname problems during certificate verification.\nThe output will look similar to the following:\nGenerating a 2048 bit RSA private key ..........................................+++ .......................................................................................................+++ writing new private key to 'tls.key' ----- Create a secret for SSL by running the following command:\n$ kubectl -n oamns create secret tls \u0026lt;domain_uid\u0026gt;-tls-cert --key \u0026lt;workdir\u0026gt;/tls.key --cert \u0026lt;workdir\u0026gt;/tls.crt For example:\n$ kubectl -n oamns create secret tls accessdomain-tls-cert --key /scratch/OAMK8S/ssl/tls.key --cert /scratch/OAMK8S/ssl/tls.crt The output will look similar to the following:\nsecret/accessdomain-tls-cert created Install NGINX Use helm to install NGINX.\n Add the helm chart repository for NGINX using the following command:\n$ helm repo add stable https://kubernetes.github.io/ingress-nginx The output will look similar to the following:\n\u0026quot;stable\u0026quot; has been added to your repositories Update the repository using the following command:\n$ helm repo update The output will look similar to the following:\nHang tight while we grab the latest from your chart repositories... ...Successfully got an update from the \u0026quot;stable\u0026quot; chart repository Update Complete. ⎈ Happy Helming!⎈ Install NGINX using helm If you can connect directly to the master node IP address from a browser, then install NGINX with the --set controller.service.type=NodePort parameter.\nIf you are using a Managed Service for your Kubernetes cluster, for example Oracle Kubernetes Engine (OKE) on Oracle Cloud Infrastructure (OCI), and connect from a browser to the Load Balancer IP address, then use the --set controller.service.type=LoadBalancer parameter. This instructs the Managed Service to setup a Load Balancer to direct traffic to the NGINX ingress.\n To install NGINX use the following helm command depending on if you are using NodePort or LoadBalancer:\na) Using NodePort\n$ helm install nginx-ingress -n \u0026lt;domain_namespace\u0026gt; --set controller.extraArgs.default-ssl-certificate=\u0026lt;domain_namespace\u0026gt;/\u0026lt;ssl_secret\u0026gt; --set controller.service.type=NodePort --set controller.admissionWebhooks.enabled=false stable/ingress-nginx For example:\n$ helm install nginx-ingress -n oamns --set controller.extraArgs.default-ssl-certificate=oamns/accessdomain-tls-cert --set controller.service.type=NodePort --set controller.admissionWebhooks.enabled=false stable/ingress-nginx Note: If using Kubernetes 1.18 then add --version=3.34.0 to the end of command.\nThe output will look similar to the following:\nNAME: nginx-ingress LAST DEPLOYED: Mon Nov 1 07:34:25 2021 NAMESPACE: oamns STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: The nginx-ingress controller has been installed. Get the application URL by running these commands: export HTTP_NODE_PORT=$(kubectl --namespace oamns get services -o jsonpath=\u0026quot;{.spec.ports[0].nodePort}\u0026quot; nginx-ingress-controller) export HTTPS_NODE_PORT=$(kubectl --namespace oamns get services -o jsonpath=\u0026quot;{.spec.ports[1].nodePort}\u0026quot; nginx-ingress-controller) export NODE_IP=$(kubectl --namespace oamns get nodes -o jsonpath=\u0026quot;{.items[0].status.addresses[1].address}\u0026quot;) echo \u0026quot;Visit http://$NODE_IP:$HTTP_NODE_PORT to access your application via HTTP.\u0026quot; echo \u0026quot;Visit https://$NODE_IP:$HTTPS_NODE_PORT to access your application via HTTPS.\u0026quot; An example Ingress that makes use of the controller: apiVersion: networking.k8s.io/v1 kind: Ingress metadata: annotations: kubernetes.io/ingress.class: nginx name: example namespace: foo spec: ingressClassName: example-class rules: - host: www.example.com http: paths: - path: / pathType: Prefix backend: service: name: exampleService port: 80 # This section is only required if TLS is to be enabled for the Ingress tls: - hosts: - www.example.com secretName: example-tls If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: apiVersion: v1 kind: Secret metadata: name: example-tls namespace: foo data: tls.crt: \u0026lt;base64 encoded cert\u0026gt; tls.key: \u0026lt;base64 encoded key\u0026gt; type: kubernetes.io/tls b) Using LoadBalancer\n$ helm install nginx-ingress -n oamns --set controller.extraArgs.default-ssl-certificate=oamns/accessdomain-tls-cert --set controller.service.type=LoadBalancer --set controller.admissionWebhooks.enabled=false stable/ingress-nginx Note: If using Kubernetes 1.18 then add --version=3.34.0 to the end of command.\nThe output will look similar to the following:\n$ helm install nginx-ingress -n oamns --set controller.extraArgs.default-ssl-certificate=oamns/accessdomain-tls-cert --set controller.service.type=LoadBalancer --set controller.admissionWebhooks.enabled=false stable/ingress-nginx NAME: nginx-ingress LAST DEPLOYED: Mon Nov 1 07:34:25 2021 NAMESPACE: nginxssl STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: The ingress-nginx controller has been installed. It may take a few minutes for the LoadBalancer IP to be available. You can watch the status by running 'kubectl --namespace oamns get services -o wide -w nginx-ingress-ingress-nginx-controller' An example Ingress that makes use of the controller: apiVersion: networking.k8s.io/v1 kind: Ingress metadata: annotations: kubernetes.io/ingress.class: nginx name: example namespace: foo spec: ingressClassName: example-class rules: - host: www.example.com http: paths: - path: / pathType: Prefix backend: service: name: exampleService port: 80 # This section is only required if TLS is to be enabled for the Ingress tls: - hosts: - www.example.com secretName: example-tls If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: apiVersion: v1 kind: Secret metadata: name: example-tls namespace: foo data: tls.crt: \u0026lt;base64 encoded cert\u0026gt; tls.key: \u0026lt;base64 encoded key\u0026gt; type: kubernetes.io/tls Create an Ingress for the Domain Navigate to the following directory:\n$ cd $WORKDIR/kubernetes/charts/ingress-per-domain Edit the values.yaml and change the domainUID: parameter to match your domainUID, for example domainUID: accessdomain. The file should look as follows:\n# Load balancer type. Supported values are: NGINX type: NGINX # Type of Configuration Supported Values are : SSL and NONSSL sslType: SSL # domainType Supported values are soa,osb and soaosb. #WLS domain as backend to the load balancer wlsDomain: domainUID: accessdomain adminServerName: AdminServer adminServerPort: 7001 adminServerSSLPort: oamClusterName: oam_cluster oamManagedServerPort: 14100 oamManagedServerSSLPort: policyClusterName: policy_cluster policyManagedServerPort: 15100 policyManagedServerSSLPort: Run the following helm command to install the ingress:\n$ cd $WORKDIR $ helm install oam-nginx kubernetes/charts/ingress-per-domain --namespace \u0026lt;domain_namespace\u0026gt; --values kubernetes/charts/ingress-per-domain/values.yaml For example:\n$ cd $WORKDIR $ helm install oam-nginx kubernetes/charts/ingress-per-domain --namespace oamns --values kubernetes/charts/ingress-per-domain/values.yaml The output will look similar to the following:\nNAME: oam-nginx LAST DEPLOYED: Mon Nov 1 07:57:30 2021 NAMESPACE: oamns STATUS: deployed REVISION: 1 TEST SUITE: None Run the following command to show the ingress is created successfully:\n$ kubectl get ing -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get ing -n oamns The output will look similar to the following:\nNAME CLASS HOSTS ADDRESS PORTS AGE access-ingress \u0026lt;none\u0026gt; * 10.101.132.251 80 2m53s Find the node port of NGINX using the following command:\n$ kubectl --namespace \u0026lt;domain_namespace\u0026gt; get services -o jsonpath=\u0026#34;{.spec.ports[1].nodePort}\u0026#34; nginx-ingress-ingress-nginx-controller For example:\n$ kubectl --namespace oamns get services -o jsonpath=\u0026#34;{.spec.ports[1].nodePort}\u0026#34; nginx-ingress-ingress-nginx-controller The output will look similar to the following:\n31051 Run the following command to check the ingress:\n$ kubectl describe ing access-ingress -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl describe ing access-ingress -n oamns The output will look similar to the following:\nName: access-ingress Namespace: oamns Address: 10.101.132.251 Default backend: default-http-backend:80 (\u0026lt;error: endpoints \u0026quot;default-http-backend\u0026quot; not found\u0026gt;) Rules: Host Path Backends ---- ---- -------- * /console accessdomain-adminserver:7001 (10.244.6.63:7001) /rreg/rreg accessdomain-adminserver:7001 (10.244.6.63:7001) /em accessdomain-adminserver:7001 (10.244.6.63:7001) /oamconsole accessdomain-adminserver:7001 (10.244.6.63:7001) /dms accessdomain-adminserver:7001 (10.244.6.63:7001) /oam/services/rest accessdomain-adminserver:7001 (10.244.6.63:7001) /iam/admin/config accessdomain-adminserver:7001 (10.244.6.63:7001) /iam/admin/diag accessdomain-adminserver:7001 (10.244.6.63:7001) /iam/access accessdomain-cluster-oam-cluster:14100 (10.244.5.12:14100,10.244.6.64:14100) /oam/admin/api accessdomain-adminserver:7001 (10.244.6.63:7001) /oam/services/rest/access/api accessdomain-cluster-oam-cluster:14100 (10.244.5.12:14100,10.244.6.64:14100) /access accessdomain-cluster-policy-cluster:15100 (10.244.5.13:15100,10.244.6.65:15100) / accessdomain-cluster-oam-cluster:14100 (10.244.5.12:14100,10.244.6.64:14100) Annotations: kubernetes.io/ingress.class: nginx meta.helm.sh/release-name: oam-nginx meta.helm.sh/release-namespace: oamns nginx.ingress.kubernetes.io/configuration-snippet: more_set_input_headers \u0026quot;X-Forwarded-Proto: https\u0026quot;; more_set_input_headers \u0026quot;WL-Proxy-SSL: true\u0026quot;; nginx.ingress.kubernetes.io/enable-access-log: false nginx.ingress.kubernetes.io/ingress.allow-http: false nginx.ingress.kubernetes.io/proxy-buffer-size: 2000k Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Sync 6m22s (x2 over 6m31s) nginx-ingress-controller Scheduled for sync To confirm that the new ingress is successfully routing to the domain\u0026rsquo;s server pods, run the following command to send a request to the URL for the WebLogic ReadyApp framework:\n$ curl -v -k https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/weblogic/ready For example:\na) For NodePort\n$ curl -v -k https://masternode.example.com:31051/weblogic/ready b) For LoadBalancer:\n$ curl -v -k https://masternode.example.com/weblogic/ready The output will look similar to the following:\n* Trying 12.345.67.89... * Connected to 12.345.67.89 (12.345.67.89) port 31051 (#0) * Initializing NSS with certpath: sql:/etc/pki/nssdb * skipping SSL peer certificate verification * SSL connection using TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 * Server certificate: * subject: CN=masternode.example.com * start date: Nov 01 14:31:07 2021 GMT * expire date: Nov 01 14:31:07 2022 GMT * common name: masternode.example.com * issuer: CN=masternode.example.com \u0026gt; GET /weblogic/ready HTTP/1.1 \u0026gt; User-Agent: curl/7.29.0 \u0026gt; Host: masternode.example.com:31051 \u0026gt; Accept: */* \u0026gt; \u0026lt; HTTP/1.1 200 OK \u0026lt; Date: Mon, 01 Nov 2021 15:06:12 GMT \u0026lt; Content-Length: 0 \u0026lt; Connection: keep-alive \u0026lt; Strict-Transport-Security: max-age=15724800; includeSubDomains \u0026lt; * Connection #0 to host 12.345.67.89 left intact Verify that you can access the domain URL After setting up the NGINX ingress, verify that the domain applications are accessible through the NGINX ingress port (for example 31051) as per Validate Domain URLs \n" +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oig/configure-ingress/", + "title": "Configure an ingress for an OIG domain", + "tags": [], + "description": "This document provides steps to configure an Ingress to direct traffic to the OIG domain.", + "content": "Choose one of the following supported methods to configure an Ingress to direct traffic for your OIG domain.\n a. Using an Ingress with NGINX (non-SSL) Steps to set up an Ingress for NGINX to direct traffic to the OIG domain (non-SSL).\n b. Using an Ingress with NGINX (SSL) Steps to set up an Ingress for NGINX to direct traffic to the OIG domain (SSL).\n " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oig/manage-oig-domains/monitoring-oim-domains/", + "title": "Monitoring an OIG domain", + "tags": [], + "description": "Describes the steps for Monitoring the OIG domain and Publising the logs to Elasticsearch.", + "content": "After the OIG domain is set up you can monitor the OIG instance using Prometheus and Grafana. See Monitoring a domain.\nThe WebLogic Monitoring Exporter uses the WLS RESTful Management API to scrape runtime information and then exports Prometheus-compatible metrics. It is deployed as a web application in a WebLogic Server (WLS) instance, version 12.2.1 or later, typically, in the instance from which you want to get metrics.\nThere are two ways to setup monitoring and you should choose one method or the other:\n Setup automatically using setup-monitoring.sh Setup using manual configuration Setup automatically using setup-monitoring.sh The $WORKDIR/kubernetes/monitoring-service/setup-monitoring.sh sets up the monitoring for the OIG domain. It installs Prometheus, Grafana, WebLogic Monitoring Exporter and deploys the web applications to the OIG domain. It also deploys the WebLogic Server Grafana dashboard.\nFor usage details execute ./setup-monitoring.sh -h.\n Edit the $WORKDIR/kubernetes/monitoring-service/monitoring-inputs.yaml and change the domainUID, domainNamespace, and weblogicCredentialsSecretName to correspond to your deployment. For example:\nversion: create-oimcluster-monitoring-inputs-v1 # Unique ID identifying your domain. # This ID must not contain an underscope (\u0026quot;_\u0026quot;), and must be lowercase and unique across all domains in a Kubernetes cluster. domainUID: governancedomain # Name of the domain namespace domainNamespace: oigns # Boolean value indicating whether to install kube-prometheus-stack setupKubePrometheusStack: true # Additional parameters for helm install kube-prometheus-stack # Refer https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/values.yaml for additional parameters # Sample : # additionalParamForKubePrometheusStack: --set nodeExporter.enabled=false --set prometheusOperator.tls.enabled=false --set prometheusOperator.admissionWebhooks.enabled=false additionalParamForKubePrometheusStack: # Name of the monitoring namespace monitoringNamespace: monitoring # Name of the Admin Server adminServerName: AdminServer # # Port number for admin server adminServerPort: 7001 # Cluster name soaClusterName: soa_cluster # Port number for managed server soaManagedServerPort: 8001 # WebLogic Monitoring Exporter to Cluster wlsMonitoringExporterTosoaCluster: true # Cluster name oimClusterName: oim_cluster # Port number for managed server oimManagedServerPort: 14000 # WebLogic Monitoring Exporter to Cluster wlsMonitoringExporterTooimCluster: true # Boolean to indicate if the adminNodePort will be exposed exposeMonitoringNodePort: true # NodePort to expose Prometheus prometheusNodePort: 32101 # NodePort to expose Grafana grafanaNodePort: 32100 # NodePort to expose Alertmanager alertmanagerNodePort: 32102 # Name of the Kubernetes secret for the Admin Server's username and password weblogicCredentialsSecretName: oig-domain-credentials Run the following command to setup monitoring:\n$ cd $WORKDIR/kubernetes/monitoring-service $ ./setup-monitoring.sh -i monitoring-inputs.yaml The output should be similar to the following:\nMonitoring setup in monitoring in progress node/worker-node1 not labeled node/worker-node2 not labeled node/master-node not labeled Setup prometheus-community/kube-prometheus-stack started \u0026quot;prometheus-community\u0026quot; already exists with the same configuration, skipping Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the \u0026quot;stable\u0026quot; chart repository ...Successfully got an update from the \u0026quot;prometheus\u0026quot; chart repository ...Successfully got an update from the \u0026quot;prometheus-community\u0026quot; chart repository ...Successfully got an update from the \u0026quot;appscode\u0026quot; chart repository Update Complete. ⎈Happy Helming!⎈ Setup prometheus-community/kube-prometheus-stack in progress NAME: monitoring LAST DEPLOYED: Thu Nov 18 03:38:04 2021 NAMESPACE: monitoring STATUS: deployed REVISION: 1 NOTES: kube-prometheus-stack has been installed. Check its status by running: kubectl --namespace monitoring get pods -l \u0026quot;release=monitoring\u0026quot; Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create \u0026amp; configure Alertmanager and Prometheus instances using the Operator. Setup prometheus-community/kube-prometheus-stack completed Deploy WebLogic Monitoring Exporter started Deploying WebLogic Monitoring Exporter with domainNamespace[oigns], domainUID[governancedomain], adminServerPodName[governancedomain-adminserver] % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 655 100 655 0 0 1159 0 --:--:-- --:--:-- --:--:-- 1159 100 2196k 100 2196k 0 0 1763k 0 0:00:01 0:00:01 --:--:-- 20.7M created $WORKDIR/kubernetes/monitoring-service/scripts/wls-exporter-deploy dir created /tmp/ci-GJSQsiXrFE /tmp/ci-GJSQsiXrFE $WORKDIR/kubernetes/monitoring-service in temp dir adding: WEB-INF/weblogic.xml (deflated 61%) adding: config.yml (deflated 60%) $WORKDIR/kubernetes/monitoring-service created /tmp/ci-KeyZrdouMD /tmp/ci-KeyZrdouMD $WORKDIR/kubernetes/monitoring-service in temp dir adding: WEB-INF/weblogic.xml (deflated 61%) adding: config.yml (deflated 60%) $WORKDIR/kubernetes/monitoring-service created /tmp/ci-QE9HawIIgT /tmp/ci-QE9HawIIgT $WORKDIR/kubernetes/monitoring-service in temp dir adding: WEB-INF/weblogic.xml (deflated 61%) adding: config.yml (deflated 60%) $WORKDIR/kubernetes/monitoring-service Initializing WebLogic Scripting Tool (WLST) ... Welcome to WebLogic Server Administration Scripting Shell Type help() for help on available commands Connecting to t3://governancedomain-adminserver:7001 with userid weblogic ... Successfully connected to Admin Server \u0026quot;AdminServer\u0026quot; that belongs to domain \u0026quot;governancedomain\u0026quot;. Warning: An insecure protocol was used to connect to the server. To ensure on-the-wire security, the SSL port or Admin port should be used instead. Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war to targets AdminServer (upload=true) ... \u0026lt;Nov 18, 2021 11:38:53 AM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating deploy operation for application, wls-exporter-adminserver [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war], to AdminServer .\u0026gt; .Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-adminserver. \u0026lt;Nov 18, 2021 11:39:05 AM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating start operation for application, wls-exporter-adminserver [archive: null], to AdminServer .\u0026gt; .Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start Deployment State : completed Deployment Message : no message Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-soa.war to targets soa_cluster (upload=true) ... \u0026lt;Nov 18, 2021 11:39:08 AM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating deploy operation for application, wls-exporter-soa [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-soa.war], to soa_cluster .\u0026gt; .Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-soa. \u0026lt;Nov 18, 2021 11:39:17 AM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating start operation for application, wls-exporter-soa [archive: null], to soa_cluster .\u0026gt; .Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start Deployment State : completed Deployment Message : no message Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-oim.war to targets oim_cluster (upload=true) ... \u0026lt;Nov 18, 2021 11:39:20 AM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating deploy operation for application, wls-exporter-oim [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-oim.war], to oim_cluster .\u0026gt; .Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-oim. \u0026lt;Nov 18, 2021 11:39:30 AM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating start operation for application, wls-exporter-oim [archive: null], to oim_cluster .\u0026gt; .Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start Deployment State : completed Deployment Message : no message Disconnected from weblogic server: AdminServer Exiting WebLogic Scripting Tool. \u0026lt;Nov 18, 2021 11:39:33 AM GMT\u0026gt; \u0026lt;Warning\u0026gt; \u0026lt;JNDI\u0026gt; \u0026lt;BEA-050001\u0026gt; \u0026lt;WLContext.close() was called in a different thread than the one in which it was created.\u0026gt; Deploy WebLogic Monitoring Exporter completed secret/basic-auth created servicemonitor.monitoring.coreos.com/wls-exporter created Deploying WebLogic Server Grafana Dashboard.... {\u0026quot;id\u0026quot;:25,\u0026quot;slug\u0026quot;:\u0026quot;weblogic-server-dashboard\u0026quot;,\u0026quot;status\u0026quot;:\u0026quot;success\u0026quot;,\u0026quot;uid\u0026quot;:\u0026quot;5yUwzbZWz\u0026quot;,\u0026quot;url\u0026quot;:\u0026quot;/d/5yUwzbZWz/weblogic-server-dashboard\u0026quot;,\u0026quot;version\u0026quot;:1} Deployed WebLogic Server Grafana Dashboard successfully Grafana is available at NodePort: 32100 Prometheus is available at NodePort: 32101 Altermanager is available at NodePort: 32102 ============================================================== Prometheus service discovery After the ServiceMonitor is deployed, the wls-exporter should be discovered by Prometheus and be able to collect metrics.\n Access the following URL to view Prometheus service discovery: http://${MASTERNODE-HOSTNAME}:32101/service-discovery\n Click on serviceMonitor/oigns/wls-exporter/0 and then show more. Verify all the targets are mentioned.\n Note : It may take several minutes for serviceMonitor/oigns/wls-exporter/0 to appear, so refresh the page until it does.\nGrafana dashboard Access the Grafana dashboard with the following URL: http://${MASTERNODE-HOSTNAME}:32100 and login with admin/admin. Change your password when prompted.\n In the Dashboards panel, click on WebLogic Server Dashboard. The dashboard for your OIG domain should be displayed. If it is not displayed, click the Search icon in the left hand menu and search for WebLogic Server Dashboard.\n Cleanup To uninstall the Prometheus, Grafana, WebLogic Monitoring Exporter and the deployments, you can run the $WORKDIR/monitoring-service/kubernetes/delete-monitoring.sh script. For usage details execute ./delete-monitoring.sh -h\n To uninstall run the following command:\n$ cd $WORKDIR/kubernetes/monitoring-service $ ./delete-monitoring.sh -i monitoring-inputs.yaml Setup using manual configuration Install Prometheus, Grafana and WebLogic Monitoring Exporter manually. Create the web applications and deploy to the OIG domain.\nDeploy the Prometheus operator Kube-Prometheus requires all nodes to be labelled with kubernetes.io/os=linux. To check if your nodes are labelled, run the following:\n$ kubectl get nodes --show-labels If the nodes are labelled the output will look similar to the following:\nNAME STATUS ROLES AGE VERSION LABELS worker-node1 Ready \u0026lt;none\u0026gt; 42d v1.20.10 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker-node1,kubernetes.io/os=linux worker-node2 Ready \u0026lt;none\u0026gt; 42d v1.20.10 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker-node2,kubernetes.io/os=linux master-node Ready master 42d v1.20.10 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=master-node,kubernetes.io/os=linux,node-role.kubernetes.io/master= If the nodes are not labelled, run the following command:\n$ kubectl label nodes --all kubernetes.io/os=linux Clone Prometheus by running the following commands:\n$ cd $WORKDIR/kubernetes/monitoring-service $ git clone https://github.com/coreos/kube-prometheus.git -b v0.7.0 Note: Please refer the compatibility matrix of Kube Prometheus. Please download the release of the repository according to the Kubernetes version of your cluster.\n Run the following command to create the namespace and custom resource definitions:\n$ cd kube-prometheus $ kubectl create -f manifests/setup The output will look similar to the following:\nnamespace/monitoring created customresourcedefinition.apiextensions.k8s.io/alertmanagerconfigs.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/alertmanagers.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/podmonitors.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/probes.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/prometheuses.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/prometheusrules.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/servicemonitors.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/thanosrulers.monitoring.coreos.com created clusterrole.rbac.authorization.k8s.io/prometheus-operator created clusterrolebinding.rbac.authorization.k8s.io/prometheus-operator created deployment.apps/prometheus-operator created service/prometheus-operator created serviceaccount/prometheus-operator created Run the following command to created the rest of the resources:\n$ kubectl create -f manifests/ The output will look similar to the following:\nalertmanager.monitoring.coreos.com/main created prometheusrule.monitoring.coreos.com/alertmanager-main-rules created secret/alertmanager-main created service/alertmanager-main created serviceaccount/alertmanager-main created servicemonitor.monitoring.coreos.com/alertmanager-main created clusterrole.rbac.authorization.k8s.io/blackbox-exporter created clusterrolebinding.rbac.authorization.k8s.io/blackbox-exporter created configmap/blackbox-exporter-configuration created deployment.apps/blackbox-exporter created service/blackbox-exporter created serviceaccount/blackbox-exporter created servicemonitor.monitoring.coreos.com/blackbox-exporter created secret/grafana-config created secret/grafana-datasources created configmap/grafana-dashboard-alertmanager-overview created configmap/grafana-dashboard-apiserver created configmap/grafana-dashboard-cluster-total created configmap/grafana-dashboard-controller-manager created configmap/grafana-dashboard-k8s-resources-cluster created configmap/grafana-dashboard-k8s-resources-namespace created configmap/grafana-dashboard-k8s-resources-node created configmap/grafana-dashboard-k8s-resources-pod created configmap/grafana-dashboard-k8s-resources-workload created configmap/grafana-dashboard-k8s-resources-workloads-namespace created configmap/grafana-dashboard-kubelet created configmap/grafana-dashboard-namespace-by-pod created configmap/grafana-dashboard-namespace-by-workload created configmap/grafana-dashboard-node-cluster-rsrc-use created configmap/grafana-dashboard-node-rsrc-use created configmap/grafana-dashboard-nodes created configmap/grafana-dashboard-persistentvolumesusage created configmap/grafana-dashboard-pod-total created configmap/grafana-dashboard-prometheus-remote-write created configmap/grafana-dashboard-prometheus created configmap/grafana-dashboard-proxy created configmap/grafana-dashboard-scheduler created configmap/grafana-dashboard-workload-total created configmap/grafana-dashboards created deployment.apps/grafana created service/grafana created serviceaccount/grafana created servicemonitor.monitoring.coreos.com/grafana created prometheusrule.monitoring.coreos.com/kube-prometheus-rules created clusterrole.rbac.authorization.k8s.io/kube-state-metrics created clusterrolebinding.rbac.authorization.k8s.io/kube-state-metrics created deployment.apps/kube-state-metrics created prometheusrule.monitoring.coreos.com/kube-state-metrics-rules created service/kube-state-metrics created serviceaccount/kube-state-metrics created servicemonitor.monitoring.coreos.com/kube-state-metrics created prometheusrule.monitoring.coreos.com/kubernetes-monitoring-rules created servicemonitor.monitoring.coreos.com/kube-apiserver created servicemonitor.monitoring.coreos.com/coredns created servicemonitor.monitoring.coreos.com/kube-controller-manager created servicemonitor.monitoring.coreos.com/kube-scheduler created servicemonitor.monitoring.coreos.com/kubelet created clusterrole.rbac.authorization.k8s.io/node-exporter created clusterrolebinding.rbac.authorization.k8s.io/node-exporter created daemonset.apps/node-exporter created prometheusrule.monitoring.coreos.com/node-exporter-rules created service/node-exporter created serviceaccount/node-exporter created servicemonitor.monitoring.coreos.com/node-exporter created apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created clusterrole.rbac.authorization.k8s.io/prometheus-adapter created clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created clusterrolebinding.rbac.authorization.k8s.io/prometheus-adapter created clusterrolebinding.rbac.authorization.k8s.io/resource-metrics:system:auth-delegator created clusterrole.rbac.authorization.k8s.io/resource-metrics-server-resources created configmap/adapter-config created deployment.apps/prometheus-adapter created rolebinding.rbac.authorization.k8s.io/resource-metrics-auth-reader created service/prometheus-adapter created serviceaccount/prometheus-adapter created servicemonitor.monitoring.coreos.com/prometheus-adapter created clusterrole.rbac.authorization.k8s.io/prometheus-k8s created clusterrolebinding.rbac.authorization.k8s.io/prometheus-k8s created prometheusrule.monitoring.coreos.com/prometheus-operator-rules created servicemonitor.monitoring.coreos.com/prometheus-operator created prometheus.monitoring.coreos.com/k8s created prometheusrule.monitoring.coreos.com/prometheus-k8s-prometheus-rules created rolebinding.rbac.authorization.k8s.io/prometheus-k8s-config created rolebinding.rbac.authorization.k8s.io/prometheus-k8s created rolebinding.rbac.authorization.k8s.io/prometheus-k8s created rolebinding.rbac.authorization.k8s.io/prometheus-k8s created role.rbac.authorization.k8s.io/prometheus-k8s-config created role.rbac.authorization.k8s.io/prometheus-k8s created role.rbac.authorization.k8s.io/prometheus-k8s created role.rbac.authorization.k8s.io/prometheus-k8s created service/prometheus-k8s created serviceaccount/prometheus-k8s created servicemonitor.monitoring.coreos.com/prometheus-k8s created unable to recognize \u0026quot;manifests/alertmanager-podDisruptionBudget.yaml\u0026quot;: no matches for kind \u0026quot;PodDisruptionBudget\u0026quot; in version \u0026quot;policy/v1\u0026quot; unable to recognize \u0026quot;manifests/prometheus-adapter-podDisruptionBudget.yaml\u0026quot;: no matches for kind \u0026quot;PodDisruptionBudget\u0026quot; in version \u0026quot;policy/v1\u0026quot; unable to recognize \u0026quot;manifests/prometheus-podDisruptionBudget.yaml\u0026quot;: no matches for kind \u0026quot;PodDisruptionBudget\u0026quot; in version \u0026quot;policy/v1\u0026quot; Provide external access for Grafana, Prometheus, and Alertmanager, by running the following commands:\n$ kubectl patch svc grafana -n monitoring --type=json -p \u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/type\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;NodePort\u0026#34; },{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/ports/0/nodePort\u0026#34;, \u0026#34;value\u0026#34;: 32100 }]\u0026#39; $ kubectl patch svc prometheus-k8s -n monitoring --type=json -p \u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/type\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;NodePort\u0026#34; },{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/ports/0/nodePort\u0026#34;, \u0026#34;value\u0026#34;: 32101 }]\u0026#39; $ kubectl patch svc alertmanager-main -n monitoring --type=json -p \u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/type\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;NodePort\u0026#34; },{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/ports/0/nodePort\u0026#34;, \u0026#34;value\u0026#34;: 32102 }]\u0026#39; Note: This assigns port 32100 to Grafana, 32101 to Prometheus, and 32102 to Alertmanager.\nThe output will look similar to the following:\nservice/grafana patched service/prometheus-k8s patched service/alertmanager-main patched Verify that the Prometheus, Grafana, and Alertmanager pods are running in the monitoring namespace and the respective services have the exports configured correctly:\n$ kubectl get pods,services -o wide -n monitoring The output should look similar to the following:\npod/alertmanager-main-0 2/2 Running 0 40s 10.244.1.29 worker-node1 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/alertmanager-main-1 2/2 Running 0 40s 10.244.2.68 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/alertmanager-main-2 2/2 Running 0 40s 10.244.1.28 worker-node1 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/grafana-f8cd57fcf-zpjh2 1/1 Running 0 40s 10.244.2.69 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/kube-state-metrics-587bfd4f97-zw9zj 3/3 Running 0 38s 10.244.1.30 worker-node1 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/node-exporter-2cgrm 2/2 Running 0 38s 10.196.54.36 master-node \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/node-exporter-fpl7f 2/2 Running 0 38s 10.247.95.26 worker-node1 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/node-exporter-kvvnr 2/2 Running 0 38s 10.250.40.59 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/prometheus-adapter-69b8496df6-9vfdp 1/1 Running 0 38s 10.244.2.70 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/prometheus-k8s-0 2/2 Running 0 37s 10.244.2.71 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/prometheus-k8s-1 2/2 Running 0 37s 10.244.1.31 worker-node1 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/prometheus-operator-7649c7454f-g5b4l 2/2 Running 0 47s 10.244.2.67 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR service/alertmanager-main NodePort 10.105.76.223 \u0026lt;none\u0026gt; 9093:32102/TCP 41s alertmanager=main,app=alertmanager service/alertmanager-operated ClusterIP None \u0026lt;none\u0026gt; 9093/TCP,9094/TCP,9094/UDP 40s app=alertmanager service/grafana NodePort 10.107.86.157 \u0026lt;none\u0026gt; 3000:32100/TCP 40s app=grafana service/kube-state-metrics ClusterIP None \u0026lt;none\u0026gt; 8443/TCP,9443/TCP 40s app.kubernetes.io/name=kube-state-metrics service/node-exporter ClusterIP None \u0026lt;none\u0026gt; 9100/TCP 39s app.kubernetes.io/name=node-exporter service/prometheus-adapter ClusterIP 10.102.244.224 \u0026lt;none\u0026gt; 443/TCP 39s name=prometheus-adapter service/prometheus-k8s NodePort 10.100.241.34 \u0026lt;none\u0026gt; 9090:32101/TCP 39s app=prometheus,prometheus=k8s service/prometheus-operated ClusterIP None \u0026lt;none\u0026gt; 9090/TCP 39s app=prometheus service/prometheus-operator ClusterIP None \u0026lt;none\u0026gt; 8443/TCP 47s app.kubernetes.io/component=controller,app.kubernetes.io/name=prometheus-operator Deploy WebLogic Monitoring Exporter Generate the WebLogic Monitoring Exporter deployment package. The wls-exporter.war package need to be updated and created for each listening port (Administration Server and Managed Servers) in the domain.\n Set the below environment values and run the script get-wls-exporter.sh to generate the required WAR files at ${WORKDIR}/kubernetes/monitoring-service/scripts/wls-exporter-deploy:\n$ cd $WORKDIR/kubernetes/monitoring-service/scripts $ export adminServerPort=7001 $ export wlsMonitoringExporterTosoaCluster=true $ export soaManagedServerPort=8001 $ export wlsMonitoringExporterTooimCluster=true $ export oimManagedServerPort=14000 $ sh get-wls-exporter.sh The output will look similar to the following:\n % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 655 100 655 0 0 1159 0 --:--:-- --:--:-- --:--:-- 1159 100 2196k 100 2196k 0 0 1430k 0 0:00:01 0:00:01 --:--:-- 8479k created $WORKDIR/kubernetes/monitoring-service/scripts/wls-exporter-deploy dir domainNamespace is empty, setting to default oimcluster domainUID is empty, setting to default oimcluster weblogicCredentialsSecretName is empty, setting to default \u0026quot;oimcluster-domain-credentials\u0026quot; adminServerPort is empty, setting to default \u0026quot;7001\u0026quot; soaClusterName is empty, setting to default \u0026quot;soa_cluster\u0026quot; oimClusterName is empty, setting to default \u0026quot;oim_cluster\u0026quot; created /tmp/ci-NEZy7NOfoz /tmp/ci-NEZy7NOfoz $WORKDIR/kubernetes/monitoring-service/scripts in temp dir adding: WEB-INF/weblogic.xml (deflated 61%) adding: config.yml (deflated 60%) $WORKDIR/kubernetes/monitoring-service/scripts created /tmp/ci-J7QJ4Nc1lo /tmp/ci-J7QJ4Nc1lo $WORKDIR/kubernetes/monitoring-service/scripts in temp dir adding: WEB-INF/weblogic.xml (deflated 61%) adding: config.yml (deflated 60%) $WORKDIR/kubernetes/monitoring-service/scripts created /tmp/ci-f4GbaxM2aJ /tmp/ci-f4GbaxM2aJ $WORKDIR/kubernetes/monitoring-service/scripts in temp dir adding: WEB-INF/weblogic.xml (deflated 61%) adding: config.yml (deflated 60%) $WORKDIR/kubernetes/monitoring-service/scripts Deploy the WebLogic Monitoring Exporter WAR files into the Oracle Access Management domain:\n$ cd $WORKDIR/kubernetes/monitoring-service/scripts $ kubectl cp wls-exporter-deploy \u0026lt;domain_namespace\u0026gt;/\u0026lt;domain_uid\u0026gt;-adminserver:/u01/oracle $ kubectl cp deploy-weblogic-monitoring-exporter.py \u0026lt;domain_namespace\u0026gt;/\u0026lt;domain_uid\u0026gt;-adminserver:/u01/oracle/wls-exporter-deploy $ kubectl exec -it -n \u0026lt;domain_namespace\u0026gt; \u0026lt;domain_uid\u0026gt;-adminserver -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/deploy-weblogic-monitoring-exporter.py -domainName \u0026lt;domain_uid\u0026gt; -adminServerName AdminServer -adminURL \u0026lt;domain_uid\u0026gt;-adminserver:7001 -username weblogic -password \u0026lt;password\u0026gt; -oimClusterName oim_cluster -wlsMonitoringExporterTooimCluster true -soaClusterName soa_cluster -wlsMonitoringExporterTosoaCluster true For example:\n$ cd $WORKDIR/kubernetes/monitoring-service/scripts $ kubectl cp wls-exporter-deploy oigns/governancedomain-adminserver:/u01/oracle $ kubectl cp deploy-weblogic-monitoring-exporter.py oigns/governancedomain-adminserver:/u01/oracle/wls-exporter-deploy $ kubectl exec -it -n oigns governancedomain-adminserver -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/deploy-weblogic-monitoring-exporter.py -domainName governancedomain -adminServerName AdminServer -adminURL governancedomain-adminserver:7001 -username weblogic -password \u0026lt;password\u0026gt; -oimClusterName oim_cluster -wlsMonitoringExporterTooimCluster true -soaClusterName soa_cluster -wlsMonitoringExporterTosoaCluster true The output will look similar to the following:\nInitializing WebLogic Scripting Tool (WLST) ... Welcome to WebLogic Server Administration Scripting Shell Type help() for help on available commands Connecting to t3://accessdomain-adminserver:7001 with userid weblogic ... Successfully connected to Admin Server \u0026quot;AdminServer\u0026quot; that belongs to domain \u0026quot;accessdomain\u0026quot;. Warning: An insecure protocol was used to connect to the server. To ensure on-the-wire security, the SSL port or Admin port should be used instead. Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war to targets AdminServer (upload=true) ... \u0026lt;Nov 18, 2021 10:35:44 AM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating deploy operation for application, wls-exporter-adminserver [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war], to AdminServer .\u0026gt; .Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-adminserver. \u0026lt;Nov 18, 2021 10:35:56 AM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating start operation for application, wls-exporter-adminserver [archive: null], to AdminServer .\u0026gt; .Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start Deployment State : completed Deployment Message : no message Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-soa.war to targets soa_cluster (upload=true) ... \u0026lt;Nov 18, 2021 10:35:59 AM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating deploy operation for application, wls-exporter-soa [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-soa.war], to soa_cluster .\u0026gt; ..Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-soa. \u0026lt;Nov 18, 2021 10:36:12 AM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating start operation for application, wls-exporter-soa [archive: null], to soa_cluster .\u0026gt; .Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start Deployment State : completed Deployment Message : no message Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-oim.war to targets oim_cluster (upload=true) ... \u0026lt;Nov 18, 2021 10:36:15 AM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating deploy operation for application, wls-exporter-oim [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-oim.war], to oim_cluster .\u0026gt; .Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-oim. \u0026lt;Nov 18, 2021 10:36:24 AM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating start operation for application, wls-exporter-oim [archive: null], to oim_cluster .\u0026gt; .Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start Deployment State : completed Deployment Message : no message Disconnected from weblogic server: AdminServer Exiting WebLogic Scripting Tool. \u0026lt;Nov 18, 2021 10:36:27 AM GMT\u0026gt; \u0026lt;Warning\u0026gt; \u0026lt;JNDI\u0026gt; \u0026lt;BEA-050001\u0026gt; \u0026lt;WLContext.close() was called in a different thread than the one in which it was created.\u0026gt; Configure Prometheus Operator Prometheus enables you to collect metrics from the WebLogic Monitoring Exporter. The Prometheus Operator identifies the targets using service discovery. To get the WebLogic Monitoring Exporter end point discovered as a target, you must create a service monitor pointing to the service.\nThe exporting of metrics from wls-exporter requires basicAuth, so a Kubernetes Secret is created with the user name and password that are base64 encoded. This Secret is used in the ServiceMonitor deployment. The wls-exporter-ServiceMonitor.yaml has basicAuth with credentials as username: weblogic and password: \u0026lt;password\u0026gt; in base64 encoded.\n Run the following command to get the base64 encoded version of the weblogic password:\n$ echo -n \u0026#34;\u0026lt;password\u0026gt;\u0026#34; | base64 The output will look similar to the following:\nV2VsY29tZTE= Update the $WORKDIR/kubernetes/monitoring-service/manifests/wls-exporter-ServiceMonitor.yaml and change the password: value to the value returned above. Also change any reference to the namespace and weblogic.domainName: values to match your OIG namespace and domain name. For example:\napiVersion: v1 kind: Secret metadata: name: basic-auth namespace: oigns data: password: V2VsY29tZTE= user: d2VibG9naWM= type: Opaque --- apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: name: wls-exporter namespace: oigns labels: k8s-app: wls-exporter release: monitoring spec: namespaceSelector: matchNames: - oigns selector: matchLabels: weblogic.domainName: governancedomain endpoints: - basicAuth: password: name: basic-auth key: password username: name: basic-auth key: user port: default relabelings: - action: labelmap regex: __meta_kubernetes_service_label_(.+) interval: 10s honorLabels: true path: /wls-exporter/metrics Update the $WORKDIR/kubernetes/monitoring-service/manifests/prometheus-roleSpecific-domain-namespace.yaml and change the namespace to match your OIG namespace. For example:\napiVersion: rbac.authorization.k8s.io/v1 items: - apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: prometheus-k8s namespace: oigns rules: - apiGroups: - \u0026quot;\u0026quot; resources: - services - endpoints - pods verbs: - get - list - watch kind: RoleList Update the $WORKDIR/kubernetes/monitoring-service/manifests/prometheus-roleBinding-domain-namespace.yaml and change the namespace` to match your OIG namespace. For example:\napiVersion: rbac.authorization.k8s.io/v1 items: - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: prometheus-k8s namespace: oigns roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: prometheus-k8s subjects: - kind: ServiceAccount name: prometheus-k8s namespace: monitoring kind: RoleBindingList Run the following command to enable Prometheus:\n$ cd $WORKDIR/kubernetes/monitoring-service/manifests $ kubectl apply -f . The output will look similar to the following:\nrolebinding.rbac.authorization.k8s.io/prometheus-k8s created role.rbac.authorization.k8s.io/prometheus-k8s created secret/basic-auth created servicemonitor.monitoring.coreos.com/wls-exporter created Prometheus service discovery After the ServiceMonitor is deployed, the wls-exporter should be discovered by Prometheus and be able to collect metrics.\n Access the following URL to view Prometheus service discovery: http://${MASTERNODE-HOSTNAME}:32101/service-discovery\n Click on oigns/wls-exporter/0 and then show more. Verify all the targets are mentioned.\n Note: It may take several minutes for oigns/wls-exporter/0 to appear, so refresh the page until it does.\nGrafana dashboard Access the Grafana dashboard with the following URL: http://${MASTERNODE-HOSTNAME}:32100 and login with admin/admin. Change your password when prompted.\n Import the Grafana dashboard by navigating on the left hand menu to Create \u0026gt; Import. Copy the content from $WORKDIR/kubernetes/monitoring-service/config/weblogic-server-dashboard-import.json and paste. Then click Load and Import. The dashboard should be displayed.\n Cleanup To clean up a manual installation:\n Run the following commands:\n$ cd $WORKDIR/kubernetes/monitoring-service/manifests/ $ kubectl delete -f . Delete the deployments:\n$ cd $WORKDIR/kubernetes/monitoring-service/scripts/ $ kubectl cp undeploy-weblogic-monitoring-exporter.py \u0026lt;domain_namespace\u0026gt;/\u0026lt;domain_uid\u0026gt;-adminserver:/u01/oracle/wls-exporter-deploy $ kubectl exec -it -n \u0026lt;domain_namespace\u0026gt; \u0026lt;domain_uid\u0026gt;-adminserver -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/undeploy-weblogic-monitoring-exporter.py -domainName \u0026lt;domain_uid\u0026gt; -adminServerName AdminServer -adminURL \u0026lt;domain_uid\u0026gt;-adminserver:7001 -username weblogic -password \u0026lt;password\u0026gt; -oimClusterName oim_cluster -wlsMonitoringExporterTooimCluster true -soaClusterName soa_cluster -wlsMonitoringExporterTosoaCluster true Delete Prometheus:\n $ cd $WORKDIR/kubernetes/monitoring-service/kube-prometheus $ kubectl delete -f manifests $ kubectl delete -f manifests/setup " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oud/troubleshooting/", + "title": "Troubleshooting", + "tags": [], + "description": "How to Troubleshoot issues.", + "content": " Check the Status of a Namespace View POD Logs View Pod Description Check the Status of a Namespace To check the status of objects in a namespace use the following command:\n$ kubectl --namespace \u0026lt;namespace\u0026gt; get nodes,pod,service,secret,pv,pvc,ingress -o wide Output will be similar to the following:\n$ kubectl --namespace oudns get nodes,pod,service,secret,pv,pvc,ingress -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/oud-ds-rs-0 1/1 Running 0 8m44s 10.244.0.195 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oud-ds-rs-1 1/1 Running 0 8m44s 10.244.0.194 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oud-ds-rs-2 0/1 Running 0 8m44s 10.244.0.193 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR service/oud-ds-rs-0 ClusterIP 10.99.232.83 \u0026lt;none\u0026gt; 1444/TCP,1888/TCP,1898/TCP 8m44s kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-0 service/oud-ds-rs-1 ClusterIP 10.100.186.42 \u0026lt;none\u0026gt; 1444/TCP,1888/TCP,1898/TCP 8m45s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-1 service/oud-ds-rs-2 ClusterIP 10.104.55.53 \u0026lt;none\u0026gt; 1444/TCP,1888/TCP,1898/TCP 8m45s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-2 service/oud-ds-rs-http-0 ClusterIP 10.102.116.145 \u0026lt;none\u0026gt; 1080/TCP,1081/TCP 8m45s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-0 service/oud-ds-rs-http-1 ClusterIP 10.111.103.84 \u0026lt;none\u0026gt; 1080/TCP,1081/TCP 8m44s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-1 service/oud-ds-rs-http-2 ClusterIP 10.105.53.24 \u0026lt;none\u0026gt; 1080/TCP,1081/TCP 8m45s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-2 service/oud-ds-rs-lbr-admin ClusterIP 10.98.39.206 \u0026lt;none\u0026gt; 1888/TCP,1444/TCP 8m45s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs service/oud-ds-rs-lbr-http ClusterIP 10.110.77.132 \u0026lt;none\u0026gt; 1080/TCP,1081/TCP 8m45s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs service/oud-ds-rs-lbr-ldap ClusterIP 10.111.55.122 \u0026lt;none\u0026gt; 1389/TCP,1636/TCP 8m45s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs service/oud-ds-rs-ldap-0 ClusterIP 10.108.155.81 \u0026lt;none\u0026gt; 1389/TCP,1636/TCP 8m44s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-0 service/oud-ds-rs-ldap-1 ClusterIP 10.104.88.44 \u0026lt;none\u0026gt; 1389/TCP,1636/TCP 8m45s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-1 service/oud-ds-rs-ldap-2 ClusterIP 10.105.253.120 \u0026lt;none\u0026gt; 1389/TCP,1636/TCP 8m45s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-2 NAME TYPE DATA AGE secret/default-token-tbjr5 kubernetes.io/service-account-token 3 25d secret/oud-ds-rs-creds opaque 8 8m48s secret/oud-ds-rs-token-cct26 kubernetes.io/service-account-token 3 8m50s secret/sh.helm.release.v1.oud-ds-rs.v1 helm.sh/release.v1 1 8m51s NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE persistentvolume/oud-ds-rs-pv 20Gi RWX Retain Bound oudns/oud-ds-rs-pvc manual 8m47s NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE persistentvolumeclaim/oud-ds-rs-pvc Bound oud-ds-rs-pv 20Gi RWX manual 8m48s NAME HOSTS ADDRESS PORTS AGE ingress.extensions/oud-ds-rs-admin-ingress-nginx oud-ds-rs-admin-0,oud-ds-rs-admin-1,oud-ds-rs-admin-2 + 2 more... 10.229.141.78 80 8m45s ingress.extensions/oud-ds-rs-http-ingress-nginx oud-ds-rs-http-0,oud-ds-rs-http-1,oud-ds-rs-http-2 + 3 more... 10.229.141.78 80 8m45s Include/exclude elements (nodes,pod,service,secret,pv,pvc,ingress) as required.\nView POD Logs To view logs for a POD use the following command:\n$ kubectl logs \u0026lt;pod\u0026gt; -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl logs oud-ds-rs-0 -n oudns Output will depend on the application running in the POD.\nView Pod Description Details about a POD can be viewed using the kubectl describe command:\n$ kubectl describe pod \u0026lt;pod\u0026gt; -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl describe pod oud-ds-rs-0 -n oudns Name: oud-ds-rs-0 Namespace: oudns Priority: 0 Node: 10.89.73.203/10.89.73.203 Start Time: Wed, 07 Oct 2020 07:30:27 -0700 Labels: app.kubernetes.io/instance=oud-ds-rs app.kubernetes.io/managed-by=Helm app.kubernetes.io/name=oud-ds-rs app.kubernetes.io/version=12.2.1.4.0 helm.sh/chart=oud-ds-rs-0.1 oud/instance=oud-ds-rs-0 Annotations: meta.helm.sh/release-name: oud-ds-rs meta.helm.sh/release-namespace: oudns Status: Running IP: 10.244.1.90 IPs: IP: 10.244.1.90 Containers: oud-ds-rs: Container ID: docker://e3b79a283f56870e6d702cf8c2cc7aafa09a242f7a2cd543d8014a24aa219903 Image: oracle/oud:12.2.1.4.0 Image ID: docker://sha256:8a937042bef357fdeb09ce20d34332b14d1f1afe3ccb9f9b297f6940fdf32a76 Ports: 1444/TCP, 1888/TCP, 1389/TCP, 1636/TCP, 1080/TCP, 1081/TCP, 1898/TCP Host Ports: 0/TCP, 0/TCP, 0/TCP, 0/TCP, 0/TCP, 0/TCP, 0/TCP State: Running Started: Wed, 07 Oct 2020 07:30:28 -0700 Ready: True Restart Count: 0 Liveness: tcp-socket :ldap delay=900s timeout=15s period=30s #success=1 #failure=1 Readiness: exec [/u01/oracle/container-scripts/checkOUDInstance.sh] delay=180s timeout=30s period=60s #success=1 #failure=10 Environment: instanceType: Directory sleepBeforeConfig: 3 OUD_INSTANCE_NAME: oud-ds-rs-0 hostname: oud-ds-rs-0 baseDN: dc=example,dc=com rootUserDN: \u0026lt;set to the key 'rootUserDN' in secret 'oud-ds-rs-creds'\u0026gt; Optional: false rootUserPassword: \u0026lt;set to the key 'rootUserPassword' in secret 'oud-ds-rs-creds'\u0026gt; Optional: false adminConnectorPort: 1444 httpAdminConnectorPort: 1888 ldapPort: 1389 ldapsPort: 1636 httpPort: 1080 httpsPort: 1081 replicationPort: 1898 sampleData: 10 Mounts: /u01/oracle/user_projects from oud-ds-rs-pv (rw) /var/run/secrets/kubernetes.io/serviceaccount from oud-ds-rs-token-c4tg4 (ro) Conditions: Type Status Initialized True Ready True ContainersReady True PodScheduled True Volumes: oud-ds-rs-pv: Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace) ClaimName: oud-ds-rs-pvc ReadOnly: false oud-ds-rs-token-c4tg4: Type: Secret (a volume populated by a Secret) SecretName: oud-ds-rs-token-c4tg4 Optional: false QoS Class: BestEffort Node-Selectors: \u0026lt;none\u0026gt; Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s node.kubernetes.io/unreachable:NoExecute for 300s Events: \u0026lt;none\u0026gt; " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oudsm/troubleshooting/", + "title": "Troubleshooting", + "tags": [], + "description": "How to Troubleshoot issues.", + "content": " Check the Status of a Namespace View POD Logs View Pod Description Check the Status of a Namespace To check the status of objects in a namespace use the following command:\n$ kubectl --namespace \u0026lt;namespace\u0026gt; get nodes,pod,service,secret,pv,pvc,ingress -o wide Output will be similar to the following:\n$ kubectl --namespace oudns get nodes,pod,service,secret,pv,pvc,ingress -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/oudsm-1 1/1 Running 0 22h 10.244.0.19 100.102.51.238 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oudsm-2 1/1 Running 0 22h 10.244.0.20 100.102.51.238 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR service/oudsm-1 ClusterIP 10.96.108.200 \u0026lt;none\u0026gt; 7001/TCP,7002/TCP 22h app.kubernetes.io/instance=oudsm,app.kubernetes.io/name=oudsm,oudsm/instance=oudsm-1 service/oudsm-2 ClusterIP 10.96.96.12 \u0026lt;none\u0026gt; 7001/TCP,7002/TCP 22h app.kubernetes.io/instance=oudsm,app.kubernetes.io/name=oudsm,oudsm/instance=oudsm-2 service/oudsm-lbr ClusterIP 10.96.41.201 \u0026lt;none\u0026gt; 7001/TCP,7002/TCP 22h app.kubernetes.io/instance=oudsm,app.kubernetes.io/name=oudsm NAME TYPE DATA AGE secret/default-token-w4jft kubernetes.io/service-account-token 3 32d secret/oudsm-creds opaque 2 22h secret/oudsm-token-ksr4g kubernetes.io/service-account-token 3 22h secret/sh.helm.release.v1.oudsm.v1 helm.sh/release.v1 1 22h secret/sh.helm.release.v1.oudsm.v2 helm.sh/release.v1 1 21h secret/sh.helm.release.v1.oudsm.v3 helm.sh/release.v1 1 19h NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE VOLUMEMODE persistentvolume/oudsm-pv 30Gi RWX Retain Bound myoudns/oudsm-pvc manual 22h Filesystem NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE VOLUMEMODE persistentvolumeclaim/oudsm-pvc Bound oudsm-pv 30Gi RWX manual 22h Filesystem NAME HOSTS ADDRESS PORTS AGE ingress.extensions/oudsm-ingress-nginx oudsm-1,oudsm-2,oudsm + 1 more... 100.102.51.230 80 19h Include/exclude elements (nodes,pod,service,secret,pv,pvc,ingress) as required. View POD Logs To view logs for a POD use the following command:\n$ kubectl logs \u0026lt;pod\u0026gt; -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl logs oudsm-1 -n oudns Output will depend on the application running in the POD.\nView Pod Description Details about a POD can be viewed using the kubectl describe command:\n$ kubectl describe pod \u0026lt;pod\u0026gt; -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl describe pod oudsm-1 -n oudns Name: oudsm-1 Namespace: oudns Priority: 0 Node: 10.252.12.103/10.252.12.103 Start Time: Thu, 02 Sep 2021 04:37:41 -0700 Labels: app.kubernetes.io/instance=oudsm app.kubernetes.io/managed-by=Helm app.kubernetes.io/name=oudsm app.kubernetes.io/version=12.2.1.4.0 helm.sh/chart=oudsm-0.1 oudsm/instance=oudsm-1 Annotations: meta.helm.sh/release-name: oudsm meta.helm.sh/release-namespace: oudns Status: Running IP: 10.244.3.33 IPs: IP: 10.244.3.33 Containers: oudsm: Container ID: docker://583080692e2957d2a567350d497f88063ed79dfb3c52e717322 afdafa94d2ca4 Image: oracle/oudsm:12.2.1.4.0-8-ol7-210721.0755 Image ID: docker://sha256:91cbafb6b7f9b2b76a61d8d4df9babef570d3f88f8a0 72eb0966fdec3324cab9 Ports: 7001/TCP, 7002/TCP Host Ports: 0/TCP, 0/TCP State: Running Started: Thu, 02 Sep 2021 04:37:43 -0700 Ready: True Restart Count: 0 Liveness: http-get http://:7001/oudsm delay=1200s timeout=15s period=6 0s #success=1 #failure=3 Readiness: http-get http://:7001/oudsm delay=900s timeout=15s period=30 s #success=1 #failure=3 Environment: DOMAIN_NAME: oudsmdomain-1 ADMIN_USER: \u0026lt;set to the key 'adminUser' in secret 'oudsm-creds'\u0026gt; Optional: false ADMIN_PASS: \u0026lt;set to the key 'adminPass' in secret 'oudsm-creds'\u0026gt; Optional: false ADMIN_PORT: 7001 ADMIN_SSL_PORT: 7002 WLS_PLUGIN_ENABLED: true Mounts: /u01/oracle/user_projects from oudsm-pv (rw) /var/run/secrets/kubernetes.io/serviceaccount from oudsm-token-gvv65 (ro) Conditions: Type Status Initialized True Ready True ContainersReady True PodScheduled True Volumes: oudsm-pv: Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace) ClaimName: oudsm-pvc ReadOnly: false oudsm-token-gvv65: Type: Secret (a volume populated by a Secret) SecretName: oudsm-token-gvv65 Optional: false QoS Class: BestEffort Node-Selectors: \u0026lt;none\u0026gt; Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s node.kubernetes.io/unreachable:NoExecute for 300s Events: \u0026lt;none\u0026gt; " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/soa-domains/adminguide/deploying-composites/", + "title": "Deploy composite applications", + "tags": [], + "description": "Deploy composite applications for Oracle SOA Suite and Oracle Service Bus domains.", + "content": "Learn how to deploy the composite applications for Oracle SOA Suite and Oracle Service Bus domains.\n Deploy using JDeveloper Deploy Oracle SOA Suite and Oracle Service Bus composite applications from Oracle JDeveloper to Oracle SOA Suite in the WebLogic Kubernetes Operator environment.\n Deploy using Maven and Ant Deploy Oracle SOA Suite and Oracle Service Bus composite applications using the Maven and Ant based approach in an Oracle SOA Suite deployment.\n Deploy using composites in a persistent volume or image Deploy Oracle SOA Suite and Oracle Service Bus composite applications artifacts in a persistent volume or in an image.\n " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/soa-domains/cleanup-domain-setup/", + "title": "Uninstall", + "tags": [], + "description": "Clean up the Oracle SOA Suite domain setup.", + "content": "Learn how to clean up the Oracle SOA Suite domain setup.\nRemove the domain Remove the domain\u0026rsquo;s ingress (for example, Traefik ingress) using Helm:\n$ helm uninstall soa-domain-ingress -n sample-domain1-ns For example:\n$ helm uninstall soainfra-traefik -n soans Remove the domain resources by using the sample delete-weblogic-domain-resources.sh script present at ${WORKDIR}/delete-domain:\n$ cd ${WORKDIR}/delete-domain $ ./delete-weblogic-domain-resources.sh -d sample-domain1 For example:\n$ cd ${WORKDIR}/delete-domain $ ./delete-weblogic-domain-resources.sh -d soainfra Use kubectl to confirm that the server pods and domain resource are deleted:\n$ kubectl get pods -n sample-domain1-ns $ kubectl get domains -n sample-domain1-ns For example:\n$ kubectl get pods -n soans $ kubectl get domains -n soans Drop the RCU schemas Follow these steps to drop the RCU schemas created for Oracle SOA Suite domains.\nRemove the domain namespace Configure the installed ingress load balancer (for example, Traefik) to stop managing the ingresses in the domain namespace:\n$ helm upgrade traefik traefik/traefik \\ --namespace traefik \\ --reuse-values \\ --set \u0026#34;kubernetes.namespaces={traefik}\u0026#34; \\ --wait Configure the operator to stop managing the domain:\n$ helm upgrade sample-weblogic-operator \\ charts/weblogic-operator \\ --namespace sample-weblogic-operator-ns \\ --reuse-values \\ --set \u0026#34;domainNamespaces={}\u0026#34; \\ --wait For example:\n$ cd ${WORKDIR} $ helm upgrade weblogic-kubernetes-operator \\ charts/weblogic-operator \\ --namespace opns \\ --reuse-values \\ --set \u0026#34;domainNamespaces={}\u0026#34; \\ --wait Delete the domain namespace:\n$ kubectl delete namespace sample-domain1-ns For example:\n$ kubectl delete namespace soans Remove the operator Remove the operator:\n$ helm uninstall sample-weblogic-operator -n sample-weblogic-operator-ns For example:\n$ helm uninstall weblogic-kubernetes-operator -n opns Remove the operator\u0026rsquo;s namespace:\n$ kubectl delete namespace sample-weblogic-operator-ns For example:\n$ kubectl delete namespace opns Remove the load balancer Remove the installed ingress based load balancer (for example, Traefik):\n$ helm uninstall traefik -n traefik Remove the Traefik namespace:\n$ kubectl delete namespace traefik Delete the domain home To remove the domain home that is generated using the create-domain.sh script, with appropriate privileges manually delete the contents of the storage attached to the domain home persistent volume (PV).\nFor example, for the domain\u0026rsquo;s persistent volume of type host_path:\n$ rm -rf /scratch/k8s_dir/SOA/* " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oig/manage-oig-domains/delete-domain-home/", + "title": "Delete the OIG domain home", + "tags": [], + "description": "Learn about the steps to cleanup the OIG domain home.", + "content": "Sometimes in production, but most likely in testing environments, you might want to remove the domain home that is generated using the create-domain.sh script.\n Run the following command to delete the domain:\n$ cd $WORKDIR/kubernetes/delete-domain $ ./delete-weblogic-domain-resources.sh -d \u0026lt;domain_uid\u0026gt; For example:\n$ cd $WORKDIR/kubernetes/delete-domain $ ./delete-weblogic-domain-resources.sh -d governancedomain Drop the RCU schemas as follows:\n$ kubectl exec -it helper -n \u0026lt;domain_namespace\u0026gt; -- /bin/bash [oracle@helper ~]$ [oracle@helper ~]$ export CONNECTION_STRING=\u0026lt;db_host.domain\u0026gt;:\u0026lt;db_port\u0026gt;/\u0026lt;service_name\u0026gt; [oracle@helper ~]$ export RCUPREFIX=\u0026lt;rcu_schema_prefix\u0026gt; /u01/oracle/oracle_common/bin/rcu -silent -dropRepository -databaseType ORACLE -connectString $CONNECTION_STRING \\ -dbUser sys -dbRole sysdba -selectDependentsForComponents true -schemaPrefix $RCUPREFIX \\ -component MDS -component IAU -component IAU_APPEND -component IAU_VIEWER -component OPSS \\ -component WLS -component STB -component OIM -component SOAINFRA -component UCSUMS -f \u0026lt; /tmp/pwd.txt For example:\n$ kubectl exec -it helper -n oigns -- /bin/bash [oracle@helper ~]$ export CONNECTION_STRING=mydatabasehost.example.com:1521/orcl.example.com [oracle@helper ~]$ export RCUPREFIX=OIGK8S /u01/oracle/oracle_common/bin/rcu -silent -dropRepository -databaseType ORACLE -connectString $CONNECTION_STRING \\ -dbUser sys -dbRole sysdba -selectDependentsForComponents true -schemaPrefix $RCUPREFIX \\ -component MDS -component IAU -component IAU_APPEND -component IAU_VIEWER -component OPSS \\ -component WLS -component STB -component OIM -component SOAINFRA -component UCSUMS -f \u0026lt; /tmp/pwd.txt Delete the contents of the persistent volume:\n$ rm -rf /\u0026lt;workdir\u0026gt;/governancedomainpv/* For example:\n$ rm -rf /scratch/OIGK8S/governancedomainpv/* Delete the WebLogic Kubernetes Operator, by running the following command:\n$ helm delete weblogic-kubernetes-operator -n opns Delete the label from the OIG namespace:\n$ kubectl label namespaces \u0026lt;domain_namespace\u0026gt; weblogic-operator- For example:\n$ kubectl label namespaces oigns weblogic-operator- Delete the service account for the operator:\n$ kubectl delete serviceaccount \u0026lt;sample-kubernetes-operator-sa\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl delete serviceaccount op-sa -n opns Delete the operator namespace:\n$ kubectl delete namespace \u0026lt;sample-kubernetes-operator-ns\u0026gt; For example:\n$ kubectl delete namespace opns To delete NGINX:\n$ helm delete governancedomain-nginx-designconsole -n \u0026lt;domain_namespace\u0026gt; For example:\n$ helm delete governancedomain-nginx-designconsole -n oigns Then run:\n$ helm delete governancedomain-nginx -n \u0026lt;domain_namespace\u0026gt; For example:\n$ helm delete governancedomain-nginx -n oigns Then run:\n$ helm delete nginx-ingress -n \u0026lt;domain_namespace\u0026gt; For example:\n$ helm delete nginx-ingress -n nginxssl Then delete the NGINX namespace:\n$ kubectl delete namespace \u0026lt;namespace\u0026gt; For example:\n$ kubectl delete namespace nginxssl Delete the OIG namespace:\n$ kubectl delete namespace \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl delete namespace oigns " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oam/validate-domain-urls/", + "title": "Validate Domain URLs", + "tags": [], + "description": "Sample for validating domain urls.", + "content": "In this section you validate the OAM domain URLs are accessible via the NGINX ingress.\nMake sure you know the master hostname and ingress port for NGINX before proceeding.\nValidate the OAM domain urls via the Ingress Launch a browser and access the following URL\u0026rsquo;s. Login with the weblogic username and password (weblogic/\u0026lt;password\u0026gt;).\n Console or Page URL WebLogic Administration Console https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console Oracle Enterprise Manager Console https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/em Oracle Access Management Console https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/oamconsole Oracle Access Management Console https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/access Logout URL https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/oam/server/logout Note: WebLogic Administration Console and Oracle Enterprise Manager Console should only be used to monitor the servers in the OAM domain. To control the Administration Server and OAM Managed Servers (start/stop) you must use Kubernetes. See Domain Life Cycle for more information.\nThe browser will give certificate errors if you used a self signed certifcate and have not imported it into the browsers Certificate Authority store. If this occurs you can proceed with the connection and ignore the errors.\n" +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oig/validate-domain-urls/", + "title": "Validate domain URLs", + "tags": [], + "description": "Sample for validating domain urls.", + "content": "In this section you validate the OIG domain URLs that are accessible via the NGINX ingress.\nMake sure you know the master hostname and port before proceeding.\nValidate the OIG domain urls via the ingress Launch a browser and access the following URL\u0026rsquo;s. Use http or https depending on whether you configured your ingress for non-ssl or ssl.\nLogin to the WebLogic Administration Console and Oracle Enterprise Manager Console with the WebLogic username and password (weblogic/\u0026lt;password\u0026gt;).\nLogin to Oracle Identity Governance with the xelsysadm username and password (xelsysadm/\u0026lt;password\u0026gt;).\n Console or Page URL WebLogic Administration Console https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console Oracle Enterprise Manager Console https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/em Oracle Identity System Administration https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/sysadmin Oracle Identity Self Service https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/identity Note: WebLogic Administration Console and Oracle Enterprise Manager Console should only be used to monitor the servers in the OIG domain. To control the Administration Server and OIG Managed Servers (start/stop) you must use Kubernetes. See Domain Life Cycle for more information.\nThe browser will give certificate errors if you used a self signed certifcate and have not imported it into the browsers Certificate Authority store. If this occurs you can proceed with the connection and ignore the errors.\n" +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oudsm/", + "title": "Oracle Unified Directory Services Manager", + "tags": [], + "description": "Oracle Unified Directory Services Manager provides an interface for managing instances of Oracle Unified Directory", + "content": "Oracle Unified Directory Services Manager is an interface for managing instances of Oracle Unified Directory. Oracle Unified Directory Services Manager enables you to configure the structure of the directory, define objects in the directory, add and configure users, groups, and other entries. Oracle Unified Directory Services Manager is also the interface you use to manage entries, schema, security, and other directory features.\nThis project supports deployment of Oracle Unified Directory Services Manager images based on the 12cPS4 (12.2.1.4.0) release within a Kubernetes environment. The Oracle Unified Directory Services Manager Image refers to binaries for Oracle Unified Directory Services Manager Release 12.2.1.4.0.\nImage: oracle/oudsm:12.2.1.4.0\nFollow the instructions in this guide to set up Oracle Unified Directory Services Manager on Kubernetes.\nGetting started For detailed information about deploying Oracle Unified Directory Services Manager, start at Prerequisites and follow this documentation sequentially.\nIf performing an Enterprise Deployment, refer to the Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster instead.\nCurrent release The current production release for Oracle Unified Directory Services Manager 12c PS4 (12.2.1.4.0) deployment on Kubernetes is 21.4.2.\n" +}, +{ + "uri": "/fmw-kubernetes/21.4.2/soa-domains/faq/", + "title": "Frequently Asked Questions", + "tags": [], + "description": "This section describes known issues for Oracle SOA Suite domains deployment on Kubernetes. Also, provides answers to frequently asked questions.", + "content": "Overriding tuning parameters is not supported using configuration overrides The WebLogic Kubernetes Operator enables you to override some of the domain configuration using configuration overrides (also called situational configuration). See supported overrides. Overriding the tuning parameters such as MaxMessageSize and PAYLOAD, for Oracle SOA Suite domains is not supported using the configuration overrides feature. However, you can override them using the following steps:\n Specify the new value using the environment variable K8S_REFCONF_OVERRIDES in serverPod.env section in domain.yaml configuration file (example path: \u0026lt;domain-creation-output-directory\u0026gt;/weblogic-domains/soainfra/domain.yaml) based on the servers to which the changes are to be applied.\nFor example, to override the value at the Administration Server pod level:\nspec: adminServer: serverPod: env: - name: K8S_REFCONF_OVERRIDES value: \u0026#34;-Dweblogic.MaxMessageSize=78787878\u0026#34; - name: USER_MEM_ARGS value: \u0026#39;-Djava.security.egd=file:/dev/./urandom -Xms512m -Xmx1024m \u0026#39; serverStartState: RUNNING For example, to override the value at a specific cluster level (soa_cluster or osb_cluster):\n- clusterName: soa_cluster serverService: precreateService: true serverStartState: \u0026#34;RUNNING\u0026#34; serverPod: env: - name: K8S_REFCONF_OVERRIDES value: \u0026#34;-Dsoa.payload.threshold.kb=102410\u0026#34; Note: When multiple system properties are specified for serverPod.env.value, make sure each system property is separated by a space.\n Apply the updated domain.yaml file:\n$ kubectl apply -f domain.yaml Note: The server pod(s) will be automatically restarted (rolling restart).\n Deployments in the WebLogic Server Administration Console may display unexpected error In an Oracle SOA Suite environment deployed using the operator, accessing Deployments from the WebLogic Server Administration Console home page may display the error message Unexpected error encountered while obtaining monitoring information for applications. This error does not have any functional impact and can be ignored. You can verify that the applications are in Active state from the Control tab in Summary of deployments page.\nEnterprise Manager Console may display ADF_FACES-30200 error In an Oracle SOA Suite environment deployed using the operator, the Enterprise Manager Console may intermittently display the following error when the domain servers are restarted:\nADF_FACES-30200: For more information, please see the server\u0026#39;s error log for an entry beginning with: The UIViewRoot is null. Fatal exception during PhaseId: RESTORE_VIEW 1. You can refresh the Enterprise Manager Console URL to successfully log in to the Console.\nConfigure the external URL access for Oracle SOA Suite composite applications For Oracle SOA Suite composite applications to access the external URLs over the internet (if your cluster is behind a http proxy server), you must configure the following proxy parameters for Administration Server and Managed Server pods.\n-Dhttp.proxyHost=www-your-proxy.com -Dhttp.proxyPort=proxy-port -Dhttps.proxyHost=www-your-proxy.com -Dhttps.proxyPort=proxy-port -Dhttp.nonProxyHosts=\u0026#34;localhost|soainfra-adminserver|soainfra-soa-server1|soainfra-osb-server1|...soainfra-soa-serverN|*.svc.cluster.local|*.your.domain.com|/var/run/docker.sock\u0026#34; To do this, edit the domain.yaml configuration file and append the proxy parameters to the spec.serverPod.env.JAVA_OPTIONS environment variable value.\nFor example:\nserverPod: env: - name: JAVA_OPTIONS value: -Dweblogic.StdoutDebugEnabled=false -Dweblogic.ssl.Enabled=true -Dweblogic.security.SSL.ignoreHostnameVerification=true -Dhttp.proxyHost=www-your-proxy.com -Dhttp.proxyPort=proxy-port -Dhttps.proxyHost=www-your-proxy.com -Dhttps.proxyPort=proxy-port -Dhttp.nonProxyHosts=\u0026#34;localhost|soainfra-adminserver|soainfra-soa-server1|soainfra-osb-server1|...soainfra-soa-serverN|*.svc.cluster.local|*.your.domain.com|/var/run/docker.sock\u0026#34; - name: USER_MEM_ARGS value: \u0026#39;-Djava.security.egd=file:/dev/./urandom -Xms256m -Xmx1024m \u0026#39; volumeMounts: Note: The -Dhttp.nonProxyHosts parameter must have the pod names of the Administration Server and each Managed Server. For example: soainfra-adminserver, soainfra-soa-server1, soainfra-osb-server1, and so on.\n Apply the updated domain.yaml file:\n$ kubectl apply -f domain.yaml Note: The server pod(s) will be automatically restarted (rolling restart).\n Configure the external access for the Oracle Enterprise Scheduler WebServices WSDL URLs In an Oracle SOA Suite domain deployed including the Oracle Enterprise Scheduler (ESS) component, the following ESS WebServices WSDL URLs shown in the table format in the ess/essWebServicesWsdl.jsp page are not reachable outside the Kubernetes cluster.\nESSWebService EssAsyncCallbackService EssWsJobAsyncCallbackService Follow these steps to configure the external access for the Oracle Enterprise Scheduler WebServices WSDL URLs:\n Log in to the Administration Console URL of the domain.\nFor example: http://\u0026lt;LOADBALANCER-HOST\u0026gt;:\u0026lt;port\u0026gt;/console In the Home Page, click Clusters. Then click the soa_cluster. Click the HTTP tab and then click Lock \u0026amp; Edit in the Change Center panel. Update the following values: Frontend Host: host name of the load balancer. For example, domain1.example.com. Frontend HTTP Port: load balancer port. For example, 30305. Frontend HTTPS Port: load balancer https port. For example, 30443. Click Save. Click Activate Changes in the Change Center panel. Restart the servers in the SOA cluster. Note: Do not restart servers from the Administration Console.\n Missing gif images in Oracle Service Bus console pipeline configuration page In an Oracle SOA Suite domain environment upgraded to the release 21.1.2, some gif images are not rendered in the Oracle Serice Bus console pipeline configuration page, as their corresponding url paths are not exposed via the Ingress path rules in the earlier releases (for Non-SSL and SSL termination). To resolve this issue, perform the following steps to apply the latest ingress configuration:\n$ cd ${WORKDIR} $ helm upgrade \u0026lt;helm_release_for_ingress\u0026gt; \\ charts/ingress-per-domain \\ --namespace \u0026lt;domain_namespace\u0026gt; \\ --reuse-values Note: helm_release_for_ingress is the ingress name used in the corresponding helm install command for the ingress installation.\n For example, to upgrade the NGINX based ingress configuration:\n$ cd ${WORKDIR} $ helm upgrade soa-nginx-ingress \\ charts/ingress-per-domain \\ --namespace soans \\ --reuse-values WebLogic Kubernetes Operator FAQs See the general frequently asked questions for using the WebLogic Kubernetes Operator.\n" +}, +{ + "uri": "/fmw-kubernetes/21.4.2/soa-domains/adminguide/persisting-soa-adapters-customizations/", + "title": "Persist adapter customizations", + "tags": [], + "description": "Persist the customizations done for Oracle SOA Suite adapters.", + "content": "The lifetime for any customization done in a file on a server pod is up to the lifetime of that pod. The changes are not persisted once the pod goes down or is restarted.\nFor example, the following configuration updates DbAdapter.rar to create a new connection instance and creates data source CoffeeShop on the Administration Console for the same with jdbc/CoffeeShopDS.\nFile location: /u01/oracle/soa/soa/connectors/DbAdapter.rar\n\u0026lt;connection-instance\u0026gt; \u0026lt;jndi-name\u0026gt;eis/DB/CoffeeShop\u0026lt;/jndi-name\u0026gt; \u0026lt;connection-properties\u0026gt; \u0026lt;properties\u0026gt; \u0026lt;property\u0026gt; \u0026lt;name\u0026gt;XADataSourceName\u0026lt;/name\u0026gt; \u0026lt;value\u0026gt;jdbc/CoffeeShopDS\u0026lt;/value\u0026gt; \u0026lt;/property\u0026gt; \u0026lt;property\u0026gt; \u0026lt;name\u0026gt;DataSourceName\u0026lt;/name\u0026gt; \u0026lt;value\u0026gt;\u0026lt;/value\u0026gt; \u0026lt;/property\u0026gt; \u0026lt;property\u0026gt; \u0026lt;name\u0026gt;PlatformClassName\u0026lt;/name\u0026gt; \u0026lt;value\u0026gt;org.eclipse.persistence.platform.database.Oracle10Platform\u0026lt;/value\u0026gt; \u0026lt;/property\u0026gt; \u0026lt;/properties\u0026gt; \u0026lt;/connection-properties\u0026gt; \u0026lt;/connection-instance\u0026gt; If you need to persist the customizations for any of the adapter files under the SOA Oracle Home in the server pod, use one of the following methods.\nMethod 1: Customize the Adapter file using the WebLogic Administration Console: Log in to the WebLogic Administration Console, and go to Deployments \u0026gt; ABC.rar \u0026gt; Configuration \u0026gt; Outbound Connection Pools.\n Click New to create a new connection, then provide a new connection name, and click Finish.\n Go back to the new connection, update the properties as required, and save.\n Under Deployments, select ABC.rar, then Update.\nThis step asks for the Plan.xml location. This location by default will be in ${ORACLE_HOME}/soa/soa which is not under Persistent Volume (PV). Therefore, provide the domain\u0026rsquo;s PV location such as {DOMAIN_HOME}/soainfra/servers.\nNow the Plan.xml will be persisted under this location for each Managed Server.\n Method 2: Customize the Adapter file on the Worker Node: Copy ABC.rar from the server pod to a PV path:\n$ kubectl cp \u0026lt;namespace\u0026gt;/\u0026lt;SOA Managed Server pod name\u0026gt;:\u0026lt;full path of .rar file\u0026gt; \u0026lt;destination path inside PV\u0026gt; For example:\n$ kubectl cp soans/soainfra-soa-server1:/u01/oracle/soa/soa/connectors/ABC.rar ${DockerVolume}/domains/soainfra/servers/ABC.rar or do a normal file copy between these locations after entering (using kubectl exec) in to the Managed Server pod.\n Unrar ABC.rar.\n Update the new connection details in the weblogic-ra.xml file under META_INF.\n In the WebLogic Administration Console, under Deployments, select ABC.rar, then Update.\n Select the ABC.rar path as the new location, which is ${DOMAIN_HOME}/user_projects/domains/soainfra/servers/ABC.rar and click Update.\n Verify that the plan.xml or updated .rar should be persisted in the PV.\n " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oam/post-install-config/", + "title": "Post Install Configuration", + "tags": [], + "description": "Post install configuration.", + "content": "Follow these post install configuration steps.\n Create a Server Overrides File Removing OAM Server from WebLogic Server 12c Default Coherence Cluster WebLogic Server Tuning Enable Virtualization Modify oamconfig.properties Create a Server Overrides File Navigate to the following directory:\n$ cd $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv/output/weblogic-domains/accessdomain Create a setUserOverrides.sh with the following contents:\nDERBY_FLAG=false JAVA_OPTIONS=\u0026quot;${JAVA_OPTIONS} -Djava.net.preferIPv4Stack=true\u0026quot; MEM_ARGS=\u0026quot;-Xms8192m -Xmx8192m\u0026quot; Copy the setUserOverrides.sh file to the Administration Server pod:\n$ chmod 755 setUserOverrides.sh $ kubectl cp setUserOverrides.sh oamns/accessdomain-adminserver:/u01/oracle/user_projects/domains/accessdomain/bin/setUserOverrides.sh Where oamns is the OAM namespace and accessdomain is the DOMAIN_NAME/UID.\n Stop the OAM domain using the following command:\n$ kubectl -n \u0026lt;domain_namespace\u0026gt; patch domains \u0026lt;domain_uid\u0026gt; --type=\u0026#39;json\u0026#39; -p=\u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/serverStartPolicy\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;NEVER\u0026#34; }]\u0026#39; For example:\n$ kubectl -n oamns patch domains accessdomain --type=\u0026#39;json\u0026#39; -p=\u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/serverStartPolicy\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;NEVER\u0026#34; }]\u0026#39; The output will look similar to the following:\ndomain.weblogic.oracle/accessdomain patched Check that all the pods are stopped:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oamns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Terminating 0 27m accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 4h29m accessdomain-oam-policy-mgr1 1/1 Terminating 0 24m accessdomain-oam-policy-mgr2 1/1 Terminating 0 24m accessdomain-oam-server1 1/1 Terminating 0 24m accessdomain-oam-server2 1/1 Terminating 0 24m helper 1/1 Running 0 4h44m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 108m The Administration Server pods and Managed Server pods will move to a STATUS of Terminating. After a few minutes, run the command again and the pods should have disappeared:\nNAME READY STATUS RESTARTS AGE accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 4h30m helper 1/1 Running 0 4h45m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 109m Start the domain using the following command:\n$ kubectl -n \u0026lt;domain_namespace\u0026gt; patch domains \u0026lt;domain_uid\u0026gt; --type=\u0026#39;json\u0026#39; -p=\u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/serverStartPolicy\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;IF_NEEDED\u0026#34; }]\u0026#39; For example:\n$ kubectl -n oamns patch domains accessdomain --type=\u0026#39;json\u0026#39; -p=\u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/serverStartPolicy\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;IF_NEEDED\u0026#34; }]\u0026#39; Run the following kubectl command to view the pods:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oamns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 4h30m accessdomain-introspector-mckp2 1/1 Running 0 8s helper 1/1 Running 0 4h46m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 110m The Administration Server pod will start followed by the OAM Managed Servers pods. This process will take several minutes, so keep executing the command until all the pods are running with READY status 1/1:\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Running 0 5m38s accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 4h37m accessdomain-oam-policy-mgr1 1/1 Running 0 2m51s accessdomain-oam-policy-mgr2 1/1 Running 0 2m51s accessdomain-oam-server1 1/1 Running 0 2m50s accessdomain-oam-server2 1/1 Running 0 2m50s helper 1/1 Running 0 4h52m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 116m Removing OAM Server from WebLogic Server 12c Default Coherence Cluster Exclude all Oracle Access Management (OAM) clusters (including Policy Manager and OAM runtime server) from the default WebLogic Server 12c coherence cluster by using the WebLogic Server Administration Console.\nFrom 12.2.1.3.0 onwards, OAM server-side session management uses the database and does not require coherence cluster to be established. In some environments, warnings and errors are observed due to default coherence cluster initialized by WebLogic. To avoid or fix these errors, exclude all of the OAM clusters from default WebLogic Server coherence cluster using the following steps:\n Login to the WebLogic Server Console at https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console. Click Lock \u0026amp; Edit. In Domain Structure, expand Environment and select Coherence Clusters. Click defaultCoherenceCluster and select the Members tab. From Servers and Clusters, deselect all OAM clusters (oam_cluster and policy_cluster). Click Save. Click Activate changes. WebLogic Server Tuning For production environments, the following WebLogic Server tuning parameters must be set:\nAdd Minimum Thread constraint to worker manager \u0026ldquo;OAPOverRestWM\u0026rdquo; Login to the WebLogic Server Console at https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console. Click Lock \u0026amp; Edit. In Domain Structure, click Deployments. On the Deployments page click Next until you see oam_server. Expand oam_server by clicking on the + icon, then click /iam/access/binding. Click the Configuration tab, followed by the Workload tab. Click wm/OAPOverRestWM Under Application Scoped Work Managed Components, click New. In Create a New Work Manager Component, select Minumum Threads Constraint and click Next. In Minimum Threads Constraint Properties enter the Count as 400 and click Finish. In the Save Deployment Plan change the Path to the value /u01/oracle/user_projects/domains/accessdomain/Plan.xml, where accessdomain is your domain_UID. Click OK and then Activate Changes. Remove Max Thread Constraint and Capacity Constraint Repeat steps 1-7 above. Under Application Scoped Work Managed Components select the check box for Capacity and MaxThreadsCount. Click Delete. In the Delete Work Manage Components screen, click OK to delete. Click on Release Configuration and then Log Out. oamDS DataSource Tuning Login to the WebLogic Server Console at https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console. Click Lock \u0026amp; Edit. In Domain Structure, Expand Services and click Data Sources. Click on oamDS. In Settings for oamDS, select the Configuration tab, and then the Connection Pool tab. Change Initial Capacity, Maximum Capacity, and Minimum Capacity to 800 and click Save. Click Activate Changes. Enable Virtualization Log in to Oracle Enterprise Manager Fusion Middleware Control at https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/em Click WebLogic Domain \u0026gt; Security \u0026gt; Security Provider Configuration. Expand Security Store Provider. Expand Identity Store Provider. Click Configure. Add a custom property. Select virtualize property with value true and click OK. Click OK again to persist the change. Modify oamconfig.properties Navigate to the following directory and change permissions for the oamconfig_modify.sh:\n$ cd $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv/common $ chmod 777 oamconfig_modify.sh For example:\n$ cd $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv/common $ chmod 777 oamconfig_modify.sh Edit the oamconfig.properties and change the OAM_NAMESPACE and LBR_HOST to match the values for your OAM Kubernetes environment. For example:\n#Below are only the sample values, please modify them as per your setup # The name space where OAM servers are created OAM_NAMESPACE='oamns' # Define the INGRESS CONTROLLER used. INGRESS=\u0026quot;nginx\u0026quot; # Define the INGRESS CONTROLLER name used during installation. INGRESS_NAME=\u0026quot;nginx-ingress\u0026quot; # FQDN of the LBR Host i.e the host from where you access oam console LBR_HOST=\u0026quot;masternode.example.com\u0026quot; Run the oamconfig_modify.sh script as follows:\n$ ./oamconfig_modify.sh \u0026lt;OAM_ADMIN_USER\u0026gt;:\u0026lt;OAM_ADMIN_PASSWORD\u0026gt; where:\nOAM_ADMIN_USER is the OAM administrator username\nOAM_ADMIN_PASSWORD is the OAM administrator password\nFor example:\n$ ./oamconfig_modify.sh weblogic:\u0026lt;password\u0026gt; Note: Make sure port 30540 is free before running the command.\nThe output will look similar to the following:\nLBR_PROTOCOL: https domainUID: accessdomain OAM_SERVER: accessdomain-oam-server OAM_NAMESPACE: oamns INGRESS: nginx INGRESS_NAME: nginx-ingress ING_TYPE : NodePort LBR_HOST: masternode.example.com LBR_PORT: 31051 Started Executing Command % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 764k 0 764k 0 0 221k 0 --:--:-- 0:00:03 --:--:-- 221k new_cluster_id: a52fc-masternode service/accessdomain-oamoap-service created accessdomain-oamoap-service NodePort 10.100.202.44 \u0026lt;none\u0026gt; 5575:30540/TCP 1s nginx-ingress-ingress-nginx-controller NodePort 10.101.132.251 \u0026lt;none\u0026gt; 80:32371/TCP,443:31051/TCP 144m HTTP/1.1 100 Continue HTTP/1.1 201 Created Date: Mon, 01 Nov 2021 16:59:12 GMT Content-Type: text/plain Content-Length: 76 Connection: keep-alive X-ORACLE-DMS-ECID: 9234b1a0-83b4-4100-9875-aa00e3f5db27-0000035f X-ORACLE-DMS-RID: 0 Set-Cookie: JSESSIONID=pSXccMR6t8B5QoyaAlOuZYSmhtseX4C4jx-0tnkmNyer8L1mOLET!402058795; path=/; HttpOnly Set-Cookie: _WL_AUTHCOOKIE_JSESSIONID=X1iqH-mtDNGyFx5ZCXMK; path=/; secure; HttpOnly Strict-Transport-Security: max-age=15724800; includeSubDomains https://masternode.example.com:31051/iam/admin/config/api/v1/config?path=%2F $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv/common/output/oamconfig_modify.xml executed successfully --------------------------------------------------------------------------- Initializing WebLogic Scripting Tool (WLST) ... Welcome to WebLogic Server Administration Scripting Shell Type help() for help on available commands Connecting to t3://accessdomain-adminserver:7001 with userid weblogic ... Successfully connected to Admin Server \u0026quot;AdminServer\u0026quot; that belongs to domain \u0026quot;accessdomain\u0026quot;. Warning: An insecure protocol was used to connect to the server. To ensure on-the-wire security, the SSL port or Admin port should be used instead. Location changed to domainRuntime tree. This is a read-only tree with DomainMBean as the root MBean. For more help, use help('domainRuntime') Exiting WebLogic Scripting Tool. Please wait for some time for the server to restart pod \u0026quot;accessdomain-oam-server1\u0026quot; deleted pod \u0026quot;accessdomain-oam-server2\u0026quot; deleted Waiting continuously at an interval of 10 secs for servers to start.. Waiting continuously at an interval of 10 secs for servers to start.. Waiting continuously at an interval of 10 secs for servers to start.. Waiting continuously at an interval of 10 secs for servers to start.. Waiting continuously at an interval of 10 secs for servers to start.. ... Waiting continuously at an interval of 10 secs for servers to start.. Waiting continuously at an interval of 10 secs for servers to start.. accessdomain-oam-server1 1/1 Running 0 4m37s accessdomain-oam-server2 1/1 Running 0 4m36s OAM servers started successfully The script will delete the accessdomain-oam-server1 and accessdomain-oam-server2 pods and then create new ones. Check the pods are running again by issuing the following command:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oamns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Running 0 43m accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 5h14m accessdomain-oam-policy-mgr1 1/1 Running 0 40m accessdomain-oam-policy-mgr2 1/1 Running 0 40m accessdomain-oam-server1 0/1 Running 0 8m3s accessdomain-oam-server2 0/1 Running 0 8m2s helper 0/1 Running 0 5h29m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 154m The accessdomain-oam-server1 and accessdomain-oam-server2 are started, but currently have a READY status of 0/1. This means oam_server1 and oam_server2 are not currently running but are in the process of starting. The servers will take several minutes to start so keep executing the command until READY shows 1/1:\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Running 0 49m accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 5h21m accessdomain-oam-policy-mgr1 1/1 Running 0 46m accessdomain-oam-policy-mgr2 1/1 Running 0 46m accessdomain-oam-server1 1/1 Running 0 14m accessdomain-oam-server2 1/1 Running 0 14m helper 1/1 Running 0 5h36m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 160m " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oig/post-install-config/", + "title": "Post install configuration", + "tags": [], + "description": "Post install configuration.", + "content": "Follow these post install configuration steps.\n a. Post Install Tasks Perform post install tasks.\n b. Install and configure connectors Install and Configure Connectors.\n " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/soa-domains/appendix/", + "title": "Appendix", + "tags": [], + "description": "", + "content": "This section provides information on miscellaneous tasks related to Oracle SOA Suite domains deployment on Kubernetes.\n Domain resource sizing Describes the resourse sizing information for Oracle SOA Suite domains setup on Kubernetes cluster.\n Quick start deployment on-premise Describes how to quickly get an Oracle SOA Suite domain instance running (using the defaults, nothing special) for development and test purposes.\n Security hardening Review resources for the Docker and Kubernetes cluster hardening.\n " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/soa-domains/adminguide/performing-wlst-operations/", + "title": "Perform WLST operations", + "tags": [], + "description": "Perform WLST administration operations using a helper pod running in the same Kubernetes cluster as the Oracle SOA Suite domain.", + "content": "You can use the WebLogic Scripting Tool (WLST) to manage a domain running in a Kubernetes cluster. Some of the many ways to do this are provided here.\nIf the Administration Server was configured to expose a T3 channel using exposeAdminT3Channel when creating the domain, refer to Use WLST.\nIf you do not want to expose additional ports and perform WLST administration operations using the existing Kubernetes services created by the WebLogic Server Kubernetes operator, then follow this documentation. Here we will be creating and using a helper pod in the same Kubernetes cluster as the Oracle SOA Suite domain to perform WLST operations.\n Note: To avoid any misconfigurations, Oracle recommends that you do not use the Administration Server pod directly for WLST operations.\n Create a Kubernetes helper pod Perform WLST operations Sample WLST operations Create a Kubernetes helper pod Before creating a Kubernetes helper pod, make sure that the Oracle SOA Suite Docker image is available on the node, or you can create an image pull secret so that the pod can pull the Docker image on the host where it gets created.\n Create an image pull secret to pull image soasuite:12.2.1.4 by the helper pod.\nNote: Skip this step if you are not using an image pull secret.\n$ kubectl create secret docker-registry \u0026lt;secret-name\u0026gt; --namespace soans \\ --docker-server=\u0026lt;docker-registry-name\u0026gt; \\ --docker-username=\u0026lt;docker-user\u0026gt; \\ --docker-password=\u0026lt;docker-user\u0026gt; \\ --docker-email=\u0026lt;email-id\u0026gt; For example:\n$ kubectl create secret docker-registry image-secret --namespace soans \\ --docker-server=your-registry.com \\ --docker-username=xxxxxx \\ --docker-password=xxxxxxx \\ --docker-email=my@company.com Create a helper pod.\nFor Kubernetes 1.18.10+, 1.19.7+, and 1.20.6+:\n$ kubectl run helper \\ --image \u0026lt;image_name\u0026gt; \\ --namespace \u0026lt;domain_namespace\u0026gt; \\ --overrides='{ \u0026quot;apiVersion\u0026quot;: \u0026quot;v1\u0026quot;, \u0026quot;spec\u0026quot;: { \u0026quot;imagePullSecrets\u0026quot;: [{\u0026quot;name\u0026quot;: \u0026quot;\u0026lt;secret-name\u0026gt;\u0026quot;}] } }' \\ -- sleep infinity For Kubernetes 1.16.15+, and 1.17.13+:\n$ kubectl run helper --generator=run-pod/v1 \\ --image \u0026lt;image_name\u0026gt; \\ --namespace \u0026lt;domain_namespace\u0026gt; \\ --overrides='{ \u0026quot;apiVersion\u0026quot;: \u0026quot;v1\u0026quot;, \u0026quot;spec\u0026quot;: { \u0026quot;imagePullSecrets\u0026quot;: [{\u0026quot;name\u0026quot;: \u0026quot;\u0026lt;secret-name\u0026gt;\u0026quot;}] } }' \\ -- sleep infinity For example:\n$ kubectl run helper \\ --image soasuite:12.2.1.4 \\ --namespace soans \\ --overrides='{ \u0026quot;apiVersion\u0026quot;: \u0026quot;v1\u0026quot;, \u0026quot;spec\u0026quot;: { \u0026quot;imagePullSecrets\u0026quot;: [{\u0026quot;name\u0026quot;: \u0026quot;image-secret\u0026quot;}] } }' \\ -- sleep infinity Note: If you are not using the image pull secret, remove --overrides='{ \u0026quot;apiVersion\u0026quot;: \u0026quot;v1\u0026quot;, \u0026quot;spec\u0026quot;: { \u0026quot;imagePullSecrets\u0026quot;: [{\u0026quot;name\u0026quot;: \u0026quot;\u0026lt;secret-name\u0026gt;\u0026quot;}] } }' .\n Perform WLST operations Once the Kubernetes helper pod is deployed, you can exec into the pod, connect to servers using t3 or t3s and perform WLST operations. By default, t3s is not enabled for the Administration Server or Managed Servers. If you enabled SSL with sslEnabled when creating the domain, then you can use t3s to perform WLST operations.\nInteractive mode Start a bash shell in the helper pod:\n$ kubectl exec -it helper -n \u0026lt;domain_namespace\u0026gt; -- /bin/bash For example:\n$ kubectl exec -it helper -n soans -- /bin/bash This opens a bash shell in the running helper pod:\n[oracle@helper oracle]$ Invoke WLST:\n[oracle@helper oracle]$ cd $ORACLE_HOME/oracle_common/common/bin [oracle@helper bin]$ ./wlst.sh The output will look similar to the following:\n[oracle@helper bin]$ ./wlst.sh Initializing WebLogic Scripting Tool (WLST) ... Jython scans all the jar files it can find at first startup. Depending on the system, this process may take a few minutes to complete, and WLST may not return a prompt right away. Welcome to WebLogic Server Administration Scripting Shell Type help() for help on available commands wls:/offline\u0026gt; Connect using t3:\na. To connect to the Administration Server or Managed Servers using t3, you can use the Kubernetes services created by the WebLogic Server Kubernetes operator:\nwls:/offline\u0026gt; connect('weblogic','\u0026lt;password\u0026gt;','t3://\u0026lt;domainUID\u0026gt;-\u0026lt;WebLogic Server Name\u0026gt;:\u0026lt;Server Port\u0026gt;') For example, if the domainUID is soainfra, Administration Server name is AdminServer, and Administration Server port is 7001, then you can connect to the Administration Server using t3:\nwls:/offline\u0026gt; connect('weblogic','\u0026lt;password\u0026gt;','t3://soainfra-adminserver:7001') The output will look similar to the following:\nwls:/offline\u0026gt; connect('weblogic','\u0026lt;password\u0026gt;','t3://soainfra-adminserver:7001') Connecting to t3://soainfra-adminserver:7001 with userid weblogic ... Successfully connected to Admin Server \u0026quot;AdminServer\u0026quot; that belongs to domain \u0026quot;soainfra\u0026quot;. Warning: An insecure protocol was used to connect to the server. To ensure on-the-wire security, the SSL port or Admin port should be used instead. wls:/soainfra/serverConfig/\u0026gt; b. To connect a WebLogic Server cluster (SOA or Oracle Service Bus) using t3, you can use the Kubernetes services created by the WebLogic Server Kubernetes operator:\nwls:/offline\u0026gt; connect('weblogic','\u0026lt;password\u0026gt;','t3://\u0026lt;domainUID\u0026gt;-cluster-\u0026lt;Cluster name\u0026gt;:\u0026lt;Managed Server Port\u0026gt;') For example, if the domainUID is soainfra, SOA cluster name is soa-cluster, and SOA Managed Server port is 8001, then you can connect to SOA Cluster using t3:\nwls:/offline\u0026gt; connect('weblogic','\u0026lt;password\u0026gt;','t3://soainfra-cluster-soa-cluster:8001') The output will look similar to the following:\nwls:/offline\u0026gt; connect('weblogic','\u0026lt;password\u0026gt;','t3://soainfra-cluster-soa-cluster:8001') Connecting to t3://soainfra-cluster-soa-cluster:8001 with userid weblogic ... Successfully connected to Managed Server \u0026quot;soa_server1\u0026quot; that belongs to domain \u0026quot;soainfra\u0026quot;. Warning: An insecure protocol was used to connect to the server. To ensure on-the-wire security, the SSL port or Admin port should be used instead. wls:/soainfra/serverConfig/\u0026gt; Connect using t3s.\nIf you enabled SSL with sslEnabled when creating the domain, then you can use t3s to perform WLST operations:\na. Obtain the certificate from the Administration Server to be used for a secured (t3s) connection from the client by exporting the certificate from the Administration Server using WLST commands. Sample commands to export the default demoidentity:\n[oracle@helper oracle]$ cd $ORACLE_HOME/oracle_common/common/bin [oracle@helper bin]$ ./wlst.sh . . wls:/offline\u0026gt; connect('weblogic','\u0026lt;password\u0026gt;','t3://soainfra-adminserver:7001') . . wls:/soainfra/serverConfig/\u0026gt; svc = getOpssService(name='KeyStoreService') wls:/soainfra/serverConfig/\u0026gt; svc.exportKeyStoreCertificate(appStripe='system', name='demoidentity', password='DemoIdentityKeyStorePassPhrase', alias='DemoIdetityKeyStorePassPhrase', type='Certificate', filepath='/tmp/cert.txt/') These commands download the certificate for the default demoidentity certificate at /tmp/cert.txt.\nb. Import the certificate to the Java trust store:\n[oracle@helper oracle]$ export JAVA_HOME=/u01/jdk [oracle@helper oracle]$ keytool -import -v -trustcacerts -alias soadomain -file /tmp/cert.txt -keystore $JAVA_HOME/jre/lib/security/cacerts -keypass changeit -storepass changeit c. Connect to WLST and set the required environment variable before connecting using t3s:\n[oracle@helper oracle]$ export WLST_PROPERTIES=\u0026quot;-Dweblogic.security.SSL.ignoreHostnameVerification=true\u0026quot; [oracle@helper oracle]$ cd $ORACLE_HOME/oracle_common/common/bin [oracle@helper bin]$ ./wlst.sh d. Access t3s for the Administration Server.\nFor example, if the domainUID is soainfra, Administration Server name is AdminServer, and Administration Server SSL port is 7002, connect to the Administration Server as follows:\nwls:/offline\u0026gt; connect('weblogic','\u0026lt;password\u0026gt;','t3s://soainfra-adminserver:7002') e. Access t3s for the SOA cluster.\nFor example, if the domainUID is soainfra, SOA cluster name is soa-cluster, and SOA Managed Server SSL port is 8002, connect to the SOA cluster as follows:\nwls:/offline\u0026gt; connect('weblogic','\u0026lt;password\u0026gt;','t3s://soainfra-cluster-soa-cluster:8002') Script mode In script mode, scripts contain WLST commands in a text file with a .py file extension (for example, mywlst.py). Before invoking WLST using the script file, you must copy the .py file into the helper pod.\nTo copy the .py file into the helper pod using WLST operations in script mode:\n Create a .py file containing all the WLST commands.\n Copy the .py file into the helper pod:\n$ kubectl cp \u0026lt;filename\u0026gt;.py \u0026lt;domain namespace\u0026gt;/helper:\u0026lt;directory\u0026gt; For example:\n$ kubectl cp mywlst.py soans/helper:/u01/oracle Run wlst.sh on the .py file by exec into the helper pod:\n$ kubectl exec -it helper -n \u0026lt;domain_namespace\u0026gt; -- /bin/bash [oracle@helper oracle]$ cd $ORACLE_HOME/oracle_common/common/bin [oracle@helper oracle]$ ./wlst.sh \u0026lt;directory\u0026gt;/\u0026lt;filename\u0026gt;.py Note: Refer to Interactive mode for details on how to connect using t3 or t3s.\nSample WLST operations For a full list of WLST operations, refer to WebLogic Server WLST Online and Offline Command Reference.\nDisplay servers $ kubectl exec -it helper -n soans -- /bin/bash [oracle@helper oracle]$ cd $ORACLE_HOME/oracle_common/common/bin [oracle@helper bin]$ ./wlst.sh Initializing WebLogic Scripting Tool (WLST) ... Jython scans all the jar files it can find at first startup. Depending on the system, this process may take a few minutes to complete, and WLST may not return a prompt right away. Welcome to WebLogic Server Administration Scripting Shell Type help() for help on available commands wls:/offline\u0026gt; connect('weblogic','Welcome1','t3://soainfra-adminserver:7001') Connecting to t3://soainfra-adminserver:7001 with userid weblogic ... Successfully connected to Admin Server \u0026quot;AdminServer\u0026quot; that belongs to domain \u0026quot;soainfra\u0026quot;. Warning: An insecure protocol was used to connect to the server. To ensure on-the-wire security, the SSL port or Admin port should be used instead. wls:/soainfra/serverConfig/\u0026gt; cd('/Servers') wls:/soainfra/serverConfig/Servers\u0026gt; ls() dr-- AdminServer dr-- osb_server1 dr-- osb_server2 dr-- osb_server3 dr-- osb_server4 dr-- osb_server5 dr-- soa_server1 dr-- soa_server2 dr-- soa_server3 dr-- soa_server4 dr-- soa_server5 wls:/soainfra/serverConfig/Servers\u0026gt; " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oig/configure-design-console/", + "title": "Configure Design Console", + "tags": [], + "description": "Configure Design Console.", + "content": "Configure an Ingress to allow Design Console to connect to your Kubernetes cluster.\n a. Using Design Console with NGINX(non-SSL) Configure Design Console with NGINX(non-SSL).\n b. Using Design Console with NGINX(SSL) Configure Design Console with NGINX(SSL).\n " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oam/validate-sso-using-webgate/", + "title": "Validate a Basic SSO Flow using WebGate Registration ", + "tags": [], + "description": "Sample for validating a basic SSO flow using WebGate registration.", + "content": "In this section you validate single-sign on works to the OAM Kubernetes cluster via Oracle WebGate. The instructions below assume you have a running Oracle HTTP Server (for example ohs_k8s) and Oracle WebGate installed on an independent server. The instructions also assume basic knowledge of how to register a WebGate agent.\nNote: At present Oracle HTTP Server and Oracle WebGate are not supported on a Kubernetes cluster.\nUpdate the OAM Hostname and Port for the Loadbalancer If using an NGINX ingress with no load balancer, change {LOADBALANCER-HOSTNAME}:${LOADBALANCER-PORT} to {MASTERNODE-HOSTNAME}:${MASTERNODE-PORT} when referenced below.\n Launch a browser and access the OAM console (https://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-PORT}/oamconsole). Login with the weblogic username and password (weblogic/\u0026lt;password\u0026gt;)\n Navigate to Configuration → Settings ( View ) → Access Manager.\n Under Load Balancing modify the OAM Server Host and OAM Server Port, to point to the Loadbalancer HTTP endpoint (e.g loadbalancer.example.com and \u0026lt;port\u0026gt; respectively). In the OAM Server Protocol drop down list select https.\n Under WebGate Traffic Load Balancer modify the OAM Server Host and OAM Server Port, to point to the Loadbalancer HTTP endpoint (e.g loadbalancer.example.com and \u0026lt;port\u0026gt; repectively). In the OAM Server Protocol drop down list select https.\n Click Apply.\n Register a WebGate Agent In all the examples below, change the directory path as appropriate for your installation.\n Run the following command on the server with Oracle HTTP Server and WebGate installed:\n$ cd /scratch/export/home/oracle/product/middleware/webgate/ohs/tools/deployWebGate $ ./deployWebGateInstance.sh -w /scratch/export/home/oracle/admin/domains/oam_domain/config/fmwconfig/components/OHS/ohs_k8s -oh /scratch/export/home/oracle/product/middleware -ws ohs The output will look similar to the following:\nCopying files from WebGate Oracle Home to WebGate Instancedir Run the following command to update the OHS configuration files appropriately:\n$ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/scratch/export/home/oracle/product/middleware/lib $ cd /scratch/export/home/oracle/product/middleware/webgate/ohs/tools/setup/InstallTools/ $ ./EditHttpConf -w /scratch/export/home/oracle/admin/domains/oam_domain/config/fmwconfig/components/OHS/ohs_k8s -oh /scratch/export/home/oracle/product/middleware The output will look similar to the following:\nThe web server configuration file was successfully updated /scratch/export/home/oracle/admin/domains/oam_domain/config/fmwconfig/components/OHS/ohs_k8s/httpd.conf has been backed up as /scratch/export/home/oracle/admin/domains/oam_domain/config/fmwconfig/components/OHS/ohs_k8s/httpd.conf.ORIG Launch a browser, and access the OAM console. Navigate to Application Security → Quick Start Wizards → SSO Agent Registration. Register the agent in the usual way, download the configuration zip file and copy to the OHS WebGate server, for example: /scratch/export/home/oracle/admin/domains/oam_domain/config/fmwconfig/components/OHS/ohs_k8/webgate/config. Extract the zip file.\n Copy the Certificate Authority (CA) certificate (cacert.pem) for the load balancer/ingress certificate to the same directory e.g: /scratch/export/home/oracle/admin/domains/oam_domain/config/fmwconfig/components/OHS/ohs_k8/webgate/config.\nIf you used a self signed certificate for the ingress, instead copy the self signed certificate (e.g: /scratch/ssl/tls.crt) to the above directory. Rename the certificate to cacert.pem.\n Restart Oracle HTTP Server.\n Access the configured OHS e.g http://ohs.example.com:7778, and check you are redirected to the SSO login page. Login and make sure you are redirected successfully to the home page.\n Changing WebGate agent to use OAP Note: This section should only be followed if you need to change the OAM/WebGate Agent communication from HTTPS to OAP.\nTo change the WebGate agent to use OAP:\n In the OAM Console click Application Security and then Agents.\n Search for the agent you want modify and select it.\n In the User Defined Parameters change:\na) OAMServerCommunicationMode from HTTPS to OAP. For example OAMServerCommunicationMode=OAP\nb) OAMRestEndPointHostName=\u0026lt;hostname\u0026gt; to the {$MASTERNODE-HOSTNAME}. For example OAMRestEndPointHostName=masternode.example.com\n In the Server Lists section click Add to a add new server with the following values:\n Access Server: oam_server Host Name: \u0026lt;{$MASTERNODE-HOSTNAME}\u0026gt; Host Port: \u0026lt;oamoap-service NodePort\u0026gt; Note: To find the value for Host Port run the following:\n$ kubectl describe svc accessdomain-oamoap-service -n oamns The output will look similar to the following:\nName: accessdomain-oamoap-service Namespace: oamns Labels: \u0026lt;none\u0026gt; Annotations: \u0026lt;none\u0026gt; Selector: weblogic.clusterName=oam_cluster Type: NodePort IP Families: \u0026lt;none\u0026gt; IP: 10.100.202.44 IPs: 10.100.202.44 Port: \u0026lt;unset\u0026gt; 5575/TCP TargetPort: 5575/TCP NodePort: \u0026lt;unset\u0026gt; 30540/TCP Endpoints: 10.244.5.21:5575,10.244.6.76:5575 Session Affinity: None External Traffic Policy: Cluster Events: \u0026lt;none\u0026gt; In the example above the NodePort is 30540.\n Delete all servers in Server Lists except for the one just created, and click Apply.\n Click Download to download the webgate zip file. Copy the zip file to the desired WebGate.\n " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oam/manage-oam-domains/", + "title": "Manage OAM Domains", + "tags": [], + "description": "This document provides steps to manage the OAM domain.", + "content": "Important considerations for Oracle Access Management domains in Kubernetes.\n a. Domain Life Cycle Learn about the domain life cycle of an OAM domain.\n b. WLST Administration Operations Describes the steps for WLST administration using helper pod running in the same Kubernetes Cluster as OAM Domain.\n c. Logging and Visualization Describes the steps for logging and visualization with Elasticsearch and Kibana.\n d. Monitoring an OAM domain Describes the steps for Monitoring the OAM domain.\n e. Delete the OAM domain home Learn about the steps to cleanup the OAM domain home.\n " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oig/manage-oig-domains/", + "title": "Manage OIG domains", + "tags": [], + "description": "This document provides steps to manage the OIG domain.", + "content": "Important considerations for Oracle Identity Governance domains in Kubernetes.\n Domain life cycle Learn about the domain life cyle of an OIG domain.\n WLST administration operations Describes the steps for WLST administration using helper pod running in the same Kubernetes Cluster as OIG Domain.\n Runnning OIG utilities Describes the steps for running OIG utilities in Kubernetes.\n Logging and visualization Describes the steps for logging and visualization with Elasticsearch and Kibana.\n Monitoring an OIG domain Describes the steps for Monitoring the OIG domain and Publising the logs to Elasticsearch.\n Delete the OIG domain home Learn about the steps to cleanup the OIG domain home.\n " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/soa-domains/troubleshooting/", + "title": "Troubleshooting", + "tags": [], + "description": "Describes common issues that may occur during Oracle SOA Suite deployment on Kubernetes and the steps to troubleshoot them.", + "content": "This document describes common issues that may occur during the deployment of Oracle SOA Suite on Kubernetes and the steps to troubleshoot them. Also refer to the FAQs page for frequent issues and steps to resolve them.\n WebLogic Kubernetes Operator installation failure RCU schema creation failure Domain creation failure Common domain creation issues Server pods not started after applying domain configuration file Ingress controller not serving the domain urls WebLogic Kubernetes Operator installation failure If the WebLogic Kubernetes Operator installation failed with timing out:\n Check the status of the operator Helm release using the command helm ls -n \u0026lt;operator-namespace\u0026gt;. Check if the operator pod is successfully created in the operator namespace. Describe the operator pod using kubectl describe pod \u0026lt;operator-pod-name\u0026gt; -n \u0026lt;operator-namespace\u0026gt; to identify any obvious errors. RCU schema creation failure When creating the RCU schema using create-rcu-schema.sh, the possible causes for RCU schema creation failure are:\n Database is not up and running Incorrect database connection URL used Invalid database credentials used Schema prefix already exists Make sure that all the above causes are reviewed and corrected as needed.\nAlso drop the existing schema with the same prefix before rerunning the create-rcu-schema.sh with correct values.\nDomain creation failure If the Oracle SOA Suite domain creation fails when running create-domain.sh, perform the following steps to diagnose the issue:\n Run the following command to diagnose the create domain job:\n$ kubectl logs jobs/\u0026lt;domain_job\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl logs jobs/soainfra-create-soa-infra-domain-job -n soans Also run:\n$ kubectl describe pod \u0026lt;domain_job\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl describe pod soainfra-create-soa-infra-domain-job-mcc6v -n soans Use the output to diagnose the problem and resolve the issue.\n Clean up the failed domain creation:\n Delete the failed domain creation job in the domain namespace using the command kubectl delete job \u0026lt;domain-creation-job-name\u0026gt; -n \u0026lt;domain-namespace\u0026gt;. Delete the contents of the domain home directory Drop the existing RCU schema Recreate the domain:\n Recreate the RCU schema Make sure the Persistent Volume and Persistent Volume Claim used for the domain are created with correct permissions and bound together. Rerun the create domain script Common domain creation issues A common domain creation issue is error Failed to build JDBC Connection object in the create domain job logs.\n Click here to see the error stack trace: Configuring the Service Table DataSource... fmwDatabase jdbc:oracle:thin:@orclcdb.soainfra-domain-ns-293-10202010:1521/orclpdb1 Getting Database Defaults... Error: getDatabaseDefaults() failed. Do dumpStack() to see details. Error: runCmd() failed. Do dumpStack() to see details. Problem invoking WLST - Traceback (innermost last): File \u0026quot;/u01/weblogic/..2021_10_20_20_29_37.256759996/createSOADomain.py\u0026quot;, line 943, in ? File \u0026quot;/u01/weblogic/..2021_10_20_20_29_37.256759996/createSOADomain.py\u0026quot;, line 75, in createSOADomain File \u0026quot;/u01/weblogic/..2021_10_20_20_29_37.256759996/createSOADomain.py\u0026quot;, line 695, in extendSoaB2BDomain File \u0026quot;/u01/weblogic/..2021_10_20_20_29_37.256759996/createSOADomain.py\u0026quot;, line 588, in configureJDBCTemplates File \u0026quot;/tmp/WLSTOfflineIni956349269221112379.py\u0026quot;, line 267, in getDatabaseDefaults File \u0026quot;/tmp/WLSTOfflineIni956349269221112379.py\u0026quot;, line 19, in command Failed to build JDBC Connection object: at com.oracle.cie.domain.script.jython.CommandExceptionHandler.handleException(CommandExceptionHandler.java:69) at com.oracle.cie.domain.script.jython.WLScriptContext.handleException(WLScriptContext.java:3085) at com.oracle.cie.domain.script.jython.WLScriptContext.runCmd(WLScriptContext.java:738) at sun.reflect.GeneratedMethodAccessor152.invoke(Unknown Source) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) com.oracle.cie.domain.script.jython.WLSTException: com.oracle.cie.domain.script.jython.WLSTException: Got exception when auto configuring the schema component(s) with data obtained from shadow table: Failed to build JDBC Connection object: ERROR: /u01/weblogic/create-domain-script.sh failed. This error is reported when there is an issue with database schema access during domain creation. The possible causes are:\n Incorrect schema name specified in create-domain-inputs.yaml. RCU schema credentials specified in the secret soainfra-rcu-credentials are different from the credentials specified while creating the RCU schema using create-rcu-schema.sh. To resolve these possible causes, check that the schema name and credentials used during the domain creation are the same as when the RCU schema was created.\nServer pods not started after applying domain configuration file This issue usually happens when the WebLogic Kubernetes Operator is not configured to manage the domain namespace. You can verify the configuration by running the command helm get values \u0026lt;operator-release\u0026gt; -n \u0026lt;operator-namespace\u0026gt; and checking the values under the domainNamespaces section.\nFor example:\n$ helm get values weblogic-kubernetes-operator -n opns USER-SUPPLIED VALUES: domainNamespaces: - soans image: ghcr.io/oracle/weblogic-kubernetes-operator:3.3.0 javaLoggingLevel: FINE serviceAccount: op-sa $ If you don\u0026rsquo;t see the domain namespace value under the domainNamespaces section, run the helm upgrade command in the operator namespace with appropriate values to configure the operator to manage the domain namespace.\n$ helm upgrade --reuse-values --namespace opns --set \u0026quot;domainNamespaces={soans}\u0026quot; --wait weblogic-kubernetes-operator charts/weblogic-operator Ingress controller not serving the domain URLs To diagnose this issue:\n Verify that the Ingress controller is installed successfully.\nFor example, to verify the Traefik Ingress controller status, run the following command: $ helm list -n traefik NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION traefik-operator traefik 2 2021-10-27 11:24:29.317003398 +0000 UTC deployed traefik-9.1.1 2.2.8 $ Verify that the Ingress controller is setup to monitor the domain namespace.\nFor example, to verify the Traefik Ingress controller manages the soans domain namespace, run the following command and check the values under namespaces section. $ helm get values traefik-operator -n traefik USER-SUPPLIED VALUES: kubernetes: namespaces: - traefik - soans $ Verify that the Ingress chart is installed correctly in domain namespace. For example, run the following command: $ helm list -n soans NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION soainfra-traefik soans 1 2021-10-27 11:24:31.7572791 +0000 UTC deployed ingress-per-domain-0.1.0 1.0 $ Verify that the Ingress URL paths and hostnames are configured correctly by running the following commands: Click here to see the sample commands and output $ kubectl get ingress soainfra-traefik -n soans NAME CLASS HOSTS ADDRESS PORTS AGE soainfra-traefik \u0026lt;none\u0026gt; \u0026lt;Hostname\u0026gt; 80 20h $ $ kubectl describe ingress soainfra-traefik -n soans Name: soainfra-traefik Namespace: soans Address: Default backend: default-http-backend:80 (\u0026lt;error: endpoints \u0026quot;default-http-backend\u0026quot; not found\u0026gt;) Rules: Host Path Backends ---- ---- -------- \u0026lt;Hostname\u0026gt; /console soainfra-adminserver:7001 (10.244.0.123:7001) /em soainfra-adminserver:7001 (10.244.0.123:7001) /weblogic/ready soainfra-adminserver:7001 (10.244.0.123:7001) /soa-infra soainfra-cluster-soa-cluster:8001 (10.244.0.126:8001,10.244.0.127:8001) /soa/composer soainfra-cluster-soa-cluster:8001 (10.244.0.126:8001,10.244.0.127:8001) /integration/worklistapp soainfra-cluster-soa-cluster:8001 (10.244.0.126:8001,10.244.0.127:8001) /EssHealthCheck soainfra-cluster-soa-cluster:8001 (10.244.0.126:8001,10.244.0.127:8001) Annotations: kubernetes.io/ingress.class: traefik meta.helm.sh/release-name: soainfra-traefik meta.helm.sh/release-namespace: soans Events: \u0026lt;none\u0026gt; $ " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oam/create-or-update-image/", + "title": "Create or update an image", + "tags": [], + "description": "Create or update an Oracle Access Management (OAM) container image used for deploying OAM domains. An OAM container image can be created using the WebLogic Image Tool or using the Dockerfile approach.", + "content": "As described in Prepare Your Environment you can obtain or build OAM container images in the following ways:\n Download the latest prebuilt OAM container image from My Oracle Support by referring to the document ID 2723908.1. This image is prebuilt by Oracle and includes Oracle Access Management 12.2.1.4.0 and the latest PSU.\n Build your own OAM image using the WebLogic Image Tool or by using the dockerfile, scripts and base images from Oracle Container Registry (OCR). You can also build your own image by using only the dockerfile and scripts. Building the OAM Image.\n If you have access to the My Oracle Support (MOS), and there is a need to build a new image with an interim or one off patch, it is recommended to use the WebLogic Image Tool to build an Oracle Access Management image for production deployments.\nCreate or update an Oracle Access Management image using the WebLogic Image Tool Using the WebLogic Image Tool, you can create a new Oracle Access Management image with PSU\u0026rsquo;s and interim patches or update an existing image with one or more interim patches.\n Recommendations:\n Use create for creating a new Oracle Access Management image containing the Oracle Access Management binaries, bundle patch and interim patches. This is the recommended approach if you have access to the OAM patches because it optimizes the size of the image. Use update for patching an existing Oracle Access Management image with a single interim patch. Note that the patched image size may increase considerably due to additional image layers introduced by the patch application tool. Create an image Set up the WebLogic Image Tool Prerequisites Set up the WebLogic Image Tool Validate setup WebLogic Image Tool build directory WebLogic Image Tool cache Prerequisites Verify that your environment meets the following prerequisites:\n Docker client and daemon on the build machine, with minimum Docker version 18.03.1.ce. Bash version 4.0 or later, to enable the command complete feature. JAVA_HOME environment variable set to the appropriate JDK location e.g: /scratch/export/oracle/product/jdk Set up the WebLogic Image Tool To set up the WebLogic Image Tool:\n Create a working directory and change to it:\n$ mdir \u0026lt;workdir\u0026gt; $ cd \u0026lt;workdir\u0026gt; For example:\n$ mkdir /scratch/imagetool-setup $ cd /scratch/imagetool-setup Download the latest version of the WebLogic Image Tool from the releases page.\n$ wget https://github.com/oracle/weblogic-image-tool/releases/download/release-X.X.X/imagetool.zip where X.X.X is the latest release referenced on the releases page.\n Unzip the release ZIP file in the imagetool-setup directory.\n$ unzip imagetool.zip Execute the following commands to set up the WebLogic Image Tool:\n$ cd \u0026lt;workdir\u0026gt;/imagetool-setup/imagetool/bin $ source setup.sh For example:\n$ cd /scratch/imagetool-setup/imagetool/bin $ source setup.sh Validate setup To validate the setup of the WebLogic Image Tool:\n Enter the following command to retrieve the version of the WebLogic Image Tool:\n$ imagetool --version Enter imagetool then press the Tab key to display the available imagetool commands:\n$ imagetool \u0026lt;TAB\u0026gt; cache create help rebase update WebLogic Image Tool build directory The WebLogic Image Tool creates a temporary Docker context directory, prefixed by wlsimgbuilder_temp, every time the tool runs. Under normal circumstances, this context directory will be deleted. However, if the process is aborted or the tool is unable to remove the directory, it is safe for you to delete it manually. By default, the WebLogic Image Tool creates the Docker context directory under the user\u0026rsquo;s home directory. If you prefer to use a different directory for the temporary context, set the environment variable WLSIMG_BLDDIR:\n$ export WLSIMG_BLDDIR=\u0026#34;/path/to/buid/dir\u0026#34; WebLogic Image Tool cache The WebLogic Image Tool maintains a local file cache store. This store is used to look up where the Java, WebLogic Server installers, and WebLogic Server patches reside in the local file system. By default, the cache store is located in the user\u0026rsquo;s $HOME/cache directory. Under this directory, the lookup information is stored in the .metadata file. All automatically downloaded patches also reside in this directory. You can change the default cache store location by setting the environment variable WLSIMG_CACHEDIR:\n$ export WLSIMG_CACHEDIR=\u0026#34;/path/to/cachedir\u0026#34; Set up additional build scripts Creating an Oracle Access Management Docker image using the WebLogic Image Tool requires additional container scripts for Oracle Access Management domains.\n Clone the docker-images repository to set up those scripts. In these steps, this directory is DOCKER_REPO:\n$ cd \u0026lt;workdir\u0026gt;/imagetool-setup $ git clone https://github.com/oracle/docker-images.git For example:\n$ cd /scratch/imagetool-setup $ git clone https://github.com/oracle/docker-images.git Note: If you want to create the image continue with the following steps, otherwise to update the image see update an image.\n Create an image After setting up the WebLogic Image Tool, follow these steps to use the WebLogic Image Tool to create a new Oracle Access Management image.\nDownload the Oracle Access Management installation binaries and patches You must download the required Oracle Access Management installation binaries and patches as listed below from the Oracle Software Delivery Cloud and save them in a directory of your choice.\nThe installation binaries and patches required are:\n Oracle Identity and Access Management 12.2.1.4.0\n fmw_12.2.1.4.0_idm.jar Oracle Fusion Middleware 12c Infrastructure 12.2.1.4.0\n fmw_12.2.1.4.0_infrastructure.jar OAM and FMW Infrastructure Patches:\n View document ID 2723908.1 on My Oracle Support. In the Container Image Download/Patch Details section, locate the Oracle Access Management (OAM) table. For the latest PSU click the README link in the Documentation column. In the README, locate the \u0026ldquo;Installed Software\u0026rdquo; section. All the patch numbers to be download are listed here. Download all these individual patches from My Oracle Support. Oracle JDK v8\n jdk-8uXXX-linux-x64.tar.gz where XXX is the JDK version referenced in the README above. Update required build files The following files in the code repository location \u0026lt;imagetool-setup-location\u0026gt;/docker-images/OracleAccessManagement/imagetool/12.2.1.4.0 are used for creating the image:\n additionalBuildCmds.txt buildArgs Edit the \u0026lt;workdir\u0026gt;/imagetool-setup/docker-images/OracleAccessManagement/imagetool/12.2.1.4.0/buildArgs file and change %DOCKER_REPO%, %JDK_VERSION% and %BUILDTAG% appropriately.\nFor example:\ncreate --jdkVersion=8u301 --type oam --version=12.2.1.4.0 --tag=oam-latestpsu:12.2.1.4.0 --pull --installerResponseFile /scratch/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file,/scratch/imagetool-setup/docker-images/OracleAccessManagement/dockerfiles/12.2.1.4.0/install/iam.response --additionalBuildCommands /scratch/imagetool-setup/docker-images/OracleAccessManagement/imagetool/12.2.1.4.0/addtionalBuildCmds.txt --additionalBuildFiles /scratch/imagetool-setup/docker-images/OracleAccessManagement/dockerfiles/12.2.1.4.0/container-scripts Edit the \u0026lt;workdir\u0026gt;/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file and under the GENERIC section add the line INSTALL_TYPE=\u0026quot;Fusion Middleware Infrastructure\u0026rdquo;. For example:\n[GENERIC] INSTALL_TYPE=\u0026quot;Fusion Middleware Infrastructure\u0026quot; DECLINE_SECURITY_UPDATES=true SECURITY_UPDATES_VIA_MYORACLESUPPORT=false Create the image Add a JDK package to the WebLogic Image Tool cache. For example:\n$ imagetool cache addInstaller --type jdk --version 8uXXX --path \u0026lt;download location\u0026gt;/jdk-8uXXX-linux-x64.tar.gz where XXX is the JDK version downloaded\n Add the downloaded installation binaries to the WebLogic Image Tool cache. For example:\n$ imagetool cache addInstaller --type fmw --version 12.2.1.4.0 --path \u0026lt;download location\u0026gt;/fmw_12.2.1.4.0_infrastructure.jar $ imagetool cache addInstaller --type OAM --version 12.2.1.4.0 --path \u0026lt;download location\u0026gt;/fmw_12.2.1.4.0_idm.jar Add the downloaded OPatch patch to the WebLogic Image Tool cache. For example:\n$ imagetool cache addEntry --key 28186730_13.9.4.2.7 --value \u0026lt;download location\u0026gt;/p28186730_139427_Generic.zip Add the rest of the downloaded product patches to the WebLogic Image Tool cache:\n$ imagetool cache addEntry --key \u0026lt;patch\u0026gt;_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p\u0026lt;patch\u0026gt;_122140_Generic.zip For example:\n$ imagetool cache addEntry --key 32971905_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p32971905_122140_Generic.zip $ imagetool cache addEntry --key 20812896_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p20812896_122140_Generic.zip $ imagetool cache addEntry --key 32880070_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p32880070_122140_Generic.zip $ imagetool cache addEntry --key 33059296_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33059296_122140_Generic.zip $ imagetool cache addEntry --key 32905339_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p32905339_122140_Generic.zip $ imagetool cache addEntry --key 33084721_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33084721_122140_Generic.zip $ imagetool cache addEntry --key 31544353_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p31544353_122140_Linux-x86-64.zip $ imagetool cache addEntry --key 32957281_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p32957281_122140_Generic.zip $ imagetool cache addEntry --key 33093748_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33093748_122140_Generic.zip Edit the \u0026lt;workdir\u0026gt;/imagetool-setup/docker-images/OracleAccessManagement/imagetool/12.2.1.4.0/buildArgs file and append the product patches and opatch patch as follows:\n--patches 32971905_12.2.1.4.0,20812896_12.2.1.4.0,32880070_12.2.1.4.0,33059296_12.2.1.4.0,32905339_12.2.1.4.0,33084721_12.2.1.4.0,31544353_12.2.1.4.0,32957281_12.2.1.4.0,33093748_12.2.1.4.0 --opatchBugNumber=28186730_13.9.4.2.7 An example buildArgs file is now as follows:\ncreate --jdkVersion=8u301 --type oam --version=12.2.1.4.0 --tag=oam-latestpsu:12.2.1.4.0 --pull --installerResponseFile /scratch/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file,/scratch/imagetool-setup/docker-images/OracleAccessManagement/dockerfiles/12.2.1.4.0/install/iam.response --additionalBuildCommands /scratch/imagetool-setup/docker-images/OracleAccessManagement/imagetool/12.2.1.4.0/additionalBuildCmds.txt --additionalBuildFiles /scratch/imagetool-setup/docker-images/OracleAccessManagement/dockerfiles/12.2.1.4.0/container-scripts --patches 32971905_12.2.1.4.0,20812896_12.2.1.4.0,32880070_12.2.1.4.0,33059296_12.2.1.4.0,32905339_12.2.1.4.0,33084721_12.2.1.4.0,31544353_12.2.1.4.0,32957281_12.2.1.4.0,33093748_12.2.1.4.0 --opatchBugNumber=28186730_13.9.4.2.7 Note: In the buildArgs file:\n --jdkVersion value must match the --version value used in the imagetool cache addInstaller command for --type jdk. --version value must match the --version value used in the imagetool cache addInstaller command for --type OAM. --pull always pulls the latest base Linux image oraclelinux:7-slim from the Docker registry. Refer to this page for the complete list of options available with the WebLogic Image Tool create command.\n Create the Oracle Access Management image:\n$ imagetool @\u0026lt;absolute path to buildargs file\u0026gt; Note: Make sure that the absolute path to the buildargs file is prepended with a @ character, as shown in the example above.\n For example:\n$ imagetool @\u0026lt;imagetool-setup-location\u0026gt;/docker-images/OracleAccessManagement/imagetool/12.2.1.4.0/buildArgs Check the created image using the docker images command:\n$ docker images | grep oam The output will look similar to the following:\noam-latestpsu 12.2.1.4.0 ad732fc7c16b About a minute ago 3.35GB Update an image The steps below show how to update an existing Oracle Access Management image with an interim patch. In the examples below the image oracle/oam:12.2.1.4.0 is updated with an interim patch.\n$ docker images REPOSITORY TAG IMAGE ID CREATED SIZE oracle/oam 12.2.1.4.0 b051804ba15f 3 months ago 3.34GB Set up the WebLogic Image Tool.\n Download the required interim patch and latest Opatch (28186730) from My Oracle Support. and save them in a directory of your choice.\n Add the OPatch patch to the WebLogic Image Tool cache, for example:\n$ imagetool cache addEntry --key 28186730_13.9.4.2.7 --value \u0026lt;downloaded-patches-location\u0026gt;/p28186730_139427_Generic.zip Execute the imagetool cache addEntry command for each patch to add the required patch(es) to the WebLogic Image Tool cache. For example, to add patch p32701831_12214210607_Generic.zip:\n$ imagetool cache addEntry --key=32701831_12.2.1.4.210607 --value \u0026lt;downloaded-patches-location\u0026gt;/p32701831_12214210607_Generic.zip Provide the following arguments to the WebLogic Image Tool update command:\n –-fromImage - Identify the image that needs to be updated. In the example below, the image to be updated is oracle/oam:12.2.1.4.0. –-patches - Multiple patches can be specified as a comma-separated list. --tag - Specify the new tag to be applied for the image being built. Refer here for the complete list of options available with the WebLogic Image Tool update command.\n Note: The WebLogic Image Tool cache should have the latest OPatch zip. The WebLogic Image Tool will update the OPatch if it is not already updated in the image.\n For example:\n$ imagetool update --fromImage oracle/oam:12.2.1.4.0 --tag=oracle/oam-new:12.2.1.4.0 --patches=32701831_12.2.1.4.210607 --opatchBugNumber=28186730_13.9.4.2.7 Note: If the command fails because the files in the image being upgraded are not owned by oracle:oracle, then add the parameter --chown \u0026lt;userid\u0026gt;:\u0026lt;groupid\u0026gt; to correspond with the values returned in the error.\n Check the built image using the docker images command:\n$ docker images | grep oam The output will look similar to the following:\nREPOSITORY TAG IMAGE ID CREATED SIZE oracle/oam-new 12.2.1.4.0 78ccd1ad67eb 5 minutes ago 3.8GB oracle/oam 12.2.1.4.0 b051804ba15f 3 months ago 3.34GB " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oig/create-or-update-image/", + "title": "Create or update an image", + "tags": [], + "description": "Create or update an Oracle Identity Governance (OIG) container image used for deploying OIG domains. An OIG container image can be created using the WebLogic Image Tool or using the Dockerfile approach.", + "content": "As described in Prepare Your Environment you can obtain or build OIG container images in the following ways:\n Download the latest prebuilt OIG container image from My Oracle Support by referring to the document ID 2723908.1. This image is prebuilt by Oracle and includes Oracle Identity Governance 12.2.1.4.0 and the latest PSU.\n Build your own OIG image using the WebLogic Image Tool or by using the dockerfile, scripts and base images from Oracle Container Registry (OCR). You can also build your own image by using only the dockerfile and scripts. Building the OIG Image.\n If you have access to the My Oracle Support (MOS), and there is a need to build a new image with an interim or one off patch, it is recommended to use the WebLogic Image Tool to build an Oracle Identity Governance image for production deployments.\nCreate or update an Oracle Identity Governance image using the WebLogic Image Tool Using the WebLogic Image Tool, you can create a new Oracle Identity Governance image with PSU\u0026rsquo;s and interim patches or update an existing image with one or more interim patches.\n Recommendations:\n Use create for creating a new Oracle Identity Governance image containing the Oracle Identity Governance binaries, bundle patch and interim patches. This is the recommended approach if you have access to the OIG patches because it optimizes the size of the image. Use update for patching an existing Oracle Identity Governance image with a single interim patch. Note that the patched image size may increase considerably due to additional image layers introduced by the patch application tool. Create an image Set up the WebLogic Image Tool Prerequisites Set up the WebLogic Image Tool Validate setup WebLogic Image Tool build directory WebLogic Image Tool cache Prerequisites Verify that your environment meets the following prerequisites:\n Docker client and daemon on the build machine, with minimum Docker version 18.03.1.ce. Bash version 4.0 or later, to enable the command complete feature. JAVA_HOME environment variable set to the appropriate JDK location e.g: /scratch/export/oracle/product/jdk Set up the WebLogic Image Tool To set up the WebLogic Image Tool:\n Create a working directory and change to it:\n$ mkdir \u0026lt;workdir\u0026gt; $ cd \u0026lt;workdir\u0026gt; For example:\n$ mkdir /scratch/imagetool-setup $ cd /scratch/imagetool-setup Download the latest version of the WebLogic Image Tool from the releases page.\n$ wget https://github.com/oracle/weblogic-image-tool/releases/download/release-X.X.X/imagetool.zip where X.X.X is the latest release referenced on the releases page.\n Unzip the release ZIP file in the imagetool-setup directory.\n$ unzip imagetool.zip Execute the following commands to set up the WebLogic Image Tool:\n$ cd \u0026lt;workdir\u0026gt;/imagetool-setup/imagetool/bin $ source setup.sh For example:\n$ cd /scratch/imagetool-setup/imagetool/bin $ source setup.sh Validate setup To validate the setup of the WebLogic Image Tool:\n Enter the following command to retrieve the version of the WebLogic Image Tool:\n$ imagetool --version Enter imagetool then press the Tab key to display the available imagetool commands:\n$ imagetool \u0026lt;TAB\u0026gt; cache create help rebase update WebLogic Image Tool build directory The WebLogic Image Tool creates a temporary Docker context directory, prefixed by wlsimgbuilder_temp, every time the tool runs. Under normal circumstances, this context directory will be deleted. However, if the process is aborted or the tool is unable to remove the directory, it is safe for you to delete it manually. By default, the WebLogic Image Tool creates the Docker context directory under the user\u0026rsquo;s home directory. If you prefer to use a different directory for the temporary context, set the environment variable WLSIMG_BLDDIR:\n$ export WLSIMG_BLDDIR=\u0026#34;/path/to/buid/dir\u0026#34; WebLogic Image Tool cache The WebLogic Image Tool maintains a local file cache store. This store is used to look up where the Java, WebLogic Server installers, and WebLogic Server patches reside in the local file system. By default, the cache store is located in the user\u0026rsquo;s $HOME/cache directory. Under this directory, the lookup information is stored in the .metadata file. All automatically downloaded patches also reside in this directory. You can change the default cache store location by setting the environment variable WLSIMG_CACHEDIR:\n$ export WLSIMG_CACHEDIR=\u0026#34;/path/to/cachedir\u0026#34; Set up additional build scripts Creating an Oracle Identity Governance Docker image using the WebLogic Image Tool requires additional container scripts for Oracle Identity Governance domains.\n Clone the docker-images repository to set up those scripts. In these steps, this directory is DOCKER_REPO:\n$ cd \u0026lt;workdir\u0026gt;/imagetool-setup $ git clone https://github.com/oracle/docker-images.git For example:\n$ cd /scratch/imagetool-setup $ git clone https://github.com/oracle/docker-images.git Note: If you want to create the image continue with the following steps, otherwise to update the image see update an image.\n Create an image After setting up the WebLogic Image Tool, follow these steps to use the WebLogic Image Tool to create a new Oracle Identity Governance image.\nDownload the Oracle Identity Governance installation binaries and patches You must download the required Oracle Identity Governance installation binaries and patches as listed below from the Oracle Software Delivery Cloud and save them in a directory of your choice.\nThe installation binaries and patches required are:\n Oracle Identity and Access Management 12.2.1.4.0\n fmw_12.2.1.4.0_idm.jar Oracle Fusion Middleware 12c Infrastructure 12.2.1.4.0\n fmw_12.2.1.4.0_infrastructure.jar Oracle SOA Suite for Oracle Middleware 12.2.1.4.0\n fmw_12.2.1.4.0_soa.jar Oracle Service Bus 12.2.1.4.0\n fmw_12.2.1.4.0_osb.jar OIG and FMW Infrastructure Patches:\n View document ID 2723908.1 on My Oracle Support. In the Container Image Download/Patch Details section, locate the Oracle Identity Governance (OIG) table. For the latest PSU click the README link in the Documentation column. In the README, locate the \u0026ldquo;Installed Software\u0026rdquo; section. All the patch numbers to be download are listed here. Download all these individual patches from My Oracle Support. Oracle JDK v8\n jdk-8uXXX-linux-x64.tar.gz where XXX is the JDK version referenced in the README above. Update required build files The following files in the code repository location \u0026lt;imagetool-setup-location\u0026gt;/docker-images/OracleIdentityGovernance/imagetool/12.2.1.4.0 are used for creating the image:\n additionalBuildCmds.txt buildArgs . Edit the \u0026lt;workdir\u0026gt;/imagetool-setup/docker-images/OracleIdentityGovernance/imagetool/12.2.1.4.0/buildArgs file and change %DOCKER_REPO%, %JDK_VERSION% and %BUILDTAG% appropriately.\nFor example:\ncreate --jdkVersion=8u311 --type oig --chown oracle:root --version=12.2.1.4.0 --tag=oig-latestpsu:12.2.1.4.0 --pull --installerResponseFile /scratch/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file,/scratch/imagetool-setup/docker-images/OracleSOASuite/dockerfiles/12.2.1.4.0/install/soasuite.response,/scratch/imagetool-setup/docker-images/OracleSOASuite/dockerfiles/12.2.1.4.0/install/osb.response,/scratch/imagetool-setup/docker-images/OracleIdentityGovernance/dockerfiles/12.2.1.4.0/idmqs.response --additionalBuildCommands /scratch/imagetool-setup/docker-images/OracleIdentityGovernance/imagetool/12.2.1.4.0/additionalBuildCmds.txt --additionalBuildFiles /scratch/imagetool-setup/docker-images/OracleIdentityGovernance/dockerfiles/12.2.1.4.0/container-scripts Edit the \u0026lt;workdir\u0026gt;/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4.0/install.file and under the GENERIC section add the line INSTALL_TYPE=\u0026quot;Fusion Middleware Infrastructure\u0026rdquo;. For example:\n[GENERIC] INSTALL_TYPE=\u0026quot;Fusion Middleware Infrastructure\u0026quot; DECLINE_SECURITY_UPDATES=true SECURITY_UPDATES_VIA_MYORACLESUPPORT=false Create the image Add a JDK package to the WebLogic Image Tool cache. For example:\n$ imagetool cache addInstaller --type jdk --version 8uXXX --path \u0026lt;download location\u0026gt;/jdk-8uXXX-linux-x64.tar.gz where XXX is the JDK version downloaded\n Add the downloaded installation binaries to the WebLogic Image Tool cache. For example:\n$ imagetool cache addInstaller --type fmw --version 12.2.1.4.0 --path \u0026lt;download location\u0026gt;/fmw_12.2.1.4.0_infrastructure.jar $ imagetool cache addInstaller --type soa --version 12.2.1.4.0 --path \u0026lt;download location\u0026gt;/fmw_12.2.1.4.0_soa.jar $ imagetool cache addInstaller --type osb --version 12.2.1.4.0 --path \u0026lt;download location\u0026gt;/fmw_12.2.1.4.0_osb.jar $ imagetool cache addInstaller --type idm --version 12.2.1.4.0 --path \u0026lt;download location\u0026gt;/fmw_12.2.1.4.0_idm.jar Add the downloaded OPatch patch to the WebLogic Image Tool cache. For example:\n$ imagetool cache addEntry --key 28186730_13.9.4.2.7 --value \u0026lt;download location\u0026gt;/p28186730_139427_Generic.zip Add the rest of the downloaded product patches to the WebLogic Image Tool cache:\n$ imagetool cache addEntry --key \u0026lt;patch\u0026gt;_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p\u0026lt;patch\u0026gt;_122140_Generic.zip For example:\n$ imagetool cache addEntry --key 33416868_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33416868_122140_Generic.zip $ imagetool cache addEntry --key 33453703_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33453703_122140_Generic.zip $ imagetool cache addEntry --key 32999272_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p32999272_122140_Generic.zip $ imagetool cache addEntry --key 33093748_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33093748_122140_Generic.zip $ imagetool cache addEntry --key 33281560_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33281560_122140_Generic.zip $ imagetool cache addEntry --key 31544353_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p31544353_122140_Linux-x86-64.zip $ imagetool cache addEntry --key 33313802_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33313802_122140_Generic.zip $ imagetool cache addEntry --key 33408307_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33408307_122140_Generic.zip $ imagetool cache addEntry --key 33286160_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33286160_122140_Generic.zip $ imagetool cache addEntry --key 32880070_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p32880070_122140_Generic.zip $ imagetool cache addEntry --key 32905339_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p32905339_122140_Generic.zip $ imagetool cache addEntry --key 32784652_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p32784652_122140_Generic.zip Edit the \u0026lt;workdir\u0026gt;/imagetool-setup/docker-images/OracleIdentityGovernance/imagetool/12.2.1.4.0/buildArgs file and append the product patches and opatch patch as follows:\n--patches 33416868_12.2.1.4.0,33453703_12.2.1.4.0,32999272_12.2.1.4.0,33093748_12.2.1.4.0,33281560_12.2.1.4.0,31544353_12.2.1.4.0,33313802_12.2.1.4.0,33408307_12.2.1.4.0,33286160_12.2.1.4.0,32880070_12.2.1.4.0,32905339_12.2.1.4.0,32784652_12.2.1.4.0 --opatchBugNumber=28186730_13.9.4.2.7 An example buildArgs file is now as follows:\ncreate --jdkVersion=8u301 --type oig --version=12.2.1.4.0 --tag=oig-latestpsu:12.2.1.4.0 --pull --installerResponseFile /scratch/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file,/scratch/docker-images/OracleSOASuite/dockerfiles/12.2.1.4.0/install/soasuite.response,/scratch/docker-images/OracleSOASuite/dockerfiles/12.2.1.4.0/install/osb.response,/scratch/docker-images/OracleIdentityGovernance/dockerfiles/12.2.1.4.0/idmqs.response --additionalBuildCommands /scratch/imagetool-setup/docker-images/OracleIdentityGovernance/imagetool/12.2.1.4.0/additionalBuildCmds.txt --additionalBuildFiles /scratch/imagetool-setup/docker-images/OracleIdentityGovernance/dockerfiles/12.2.1.4.0/container-scripts --patches 33416868_12.2.1.4.0,33453703_12.2.1.4.0,32999272_12.2.1.4.0,33093748_12.2.1.4.0,33281560_12.2.1.4.0,31544353_12.2.1.4.0,33313802_12.2.1.4.0,33408307_12.2.1.4.0,33286160_12.2.1.4.0,32880070_12.2.1.4.0,32905339_12.2.1.4.0,32784652_12.2.1.4.0 --opatchBugNumber=28186730_13.9.4.2.7 Note: In the buildArgs file:\n --jdkVersion value must match the --version value used in the imagetool cache addInstaller command for --type jdk. --version value must match the --version value used in the imagetool cache addInstaller command for --type idm. --pull always pulls the latest base Linux image oraclelinux:7-slim from the Docker registry. Refer to this page for the complete list of options available with the WebLogic Image Tool create command.\n Create the Oracle Identity Governance image:\n$ imagetool @\u0026lt;absolute path to buildargs file\u0026gt; Note: Make sure that the absolute path to the buildargs file is prepended with a @ character, as shown in the example above.\n For example:\n$ imagetool @\u0026lt;imagetool-setup-location\u0026gt;/docker-images/OracleIdentityGovernance/imagetool/12.2.1.4.0/buildArgs Check the created image using the docker images command:\n$ docker images | grep oig The output will look similar to the following:\noig-latestpsu 12.2.1.4.0 e391ed154bcb 50 seconds ago 4.43GB Update an image The steps below show how to update an existing Oracle Identity Governance image with an interim patch. In the examples below the image oracle/oig:12.2.1.4.0 is updated with an interim patch.\n$ docker images REPOSITORY TAG IMAGE ID CREATED SIZE oracle/oig 12.2.1.4.0 298fdb98e79c 3 months ago 4.42GB Set up the WebLogic Image Tool.\n Download the required interim patch and latest Opatch (28186730) from My Oracle Support. and save them in a directory of your choice.\n Add the OPatch patch to the WebLogic Image Tool cache, for example:\n$ imagetool cache addEntry --key 28186730_13.9.4.2.7 --value \u0026lt;downloaded-patches-location\u0026gt;/p28186730_139427_Generic.zip Execute the imagetool cache addEntry command for each patch to add the required patch(es) to the WebLogic Image Tool cache. For example, to add patch p32701831_12214210607_Generic.zip:\n$ imagetool cache addEntry --key=33165837_12.2.1.4.210708 --value \u0026lt;downloaded-patches-location\u0026gt;/p33165837_12214210708_Generic.zip Provide the following arguments to the WebLogic Image Tool update command:\n –-fromImage - Identify the image that needs to be updated. In the example below, the image to be updated is oracle/oig:12.2.1.4.0. –-patches - Multiple patches can be specified as a comma-separated list. --tag - Specify the new tag to be applied for the image being built. Refer here for the complete list of options available with the WebLogic Image Tool update command.\n Note: The WebLogic Image Tool cache should have the latest OPatch zip. The WebLogic Image Tool will update the OPatch if it is not already updated in the image.\n For example:\n$ imagetool update --fromImage oracle/oig:12.2.1.4.0 --tag=oracle/oig-new:12.2.1.4.0 --patches=33165837_12.2.1.4.210708 --opatchBugNumber=28186730_13.9.4.2.7 Note: If the command fails because the files in the image being upgraded are not owned by oracle:oracle, then add the parameter --chown \u0026lt;userid\u0026gt;:\u0026lt;groupid\u0026gt; to correspond with the values returned in the error.\n Check the built image using the docker images command:\n$ docker images | grep oig The output will look similar to the following:\nREPOSITORY TAG IMAGE ID CREATED SIZE oracle/oig-new 12.2.1.4.0 0c8381922e95 16 seconds ago 4.91GB oracle/oig 12.2.1.4.0 298fdb98e79c 3 months ago 4.42GB " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oam/patch-and-upgrade/", + "title": "Patch and Upgrade", + "tags": [], + "description": "This document provides steps to patch or upgrade an OAM image, WebLogic Kubernetes Operator or Kubernetes Cluster.", + "content": "Patch an existing OAM image, or upgrade the WebLogic Kubernetes Operator release.\n a. Patch an image Instructions on how to update your OAM Kubernetes cluster with a new OAM Docker image.\n b. Upgrade an operator release Instructions on how to update the WebLogic Kubernetes Operator version.\n " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oig/patch-and-upgrade/", + "title": "Patch and upgrade", + "tags": [], + "description": "This document provides steps to patch or upgrade an OIG image, or WebLogic Kubernetes Operator.", + "content": "Patch an existing Oracle OIG image, or upgrade the WebLogic Kubernetes Operator release.\n a. Patch an image Instructions on how to update your OIG Kubernetes cluster with a new OIG docker image.\n b. Upgrade an operator release Instructions on how to update the WebLogic Kubernetes Operator version.\n " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oam/troubleshooting/", + "title": "Troubleshooting", + "tags": [], + "description": "How to Troubleshoot domain creation failure.", + "content": "Domain creation failure If the OAM domain creation fails when running create-domain.sh, run the following to diagnose the issue:\n Run the following command to diagnose the create domain job:\n$ kubectl logs \u0026lt;domain_job\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl logs accessdomain-create-fmw-infra-sample-domain-job-c6vfb -n oamns Also run:\n$ kubectl describe pod \u0026lt;domain_job\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl describe pod accessdomain-create-fmw-infra-sample-domain-job-c6vfb -n oamns Using the output you should be able to diagnose the problem and resolve the issue.\nClean down the failed domain creation by following steps 1-3 in Delete the OAM domain home. Then follow RCU schema creation onwards to recreate the RCU schema, kubernetes secrets for domain and RCU, the persistent volume and the persistent volume claim. Then execute the OAM domain creation steps again.\n If any of the above commands return the following error:\nFailed to start container \u0026quot;create-fmw-infra-sample-domain-job\u0026quot;: Error response from daemon: error while creating mount source path '/scratch/OAMK8S/accessdomainpv ': mkdir /scratch/OAMK8S/accessdomainpv : permission denied then there is a permissions error on the directory for the PV and PVC and the following should be checked:\na) The directory has 777 permissions: chmod -R 777 \u0026lt;workdir\u0026gt;/accessdomainpv.\nb) If it does have the permissions, check if an oracle user exists and the uid and gid equal 1000.\nCreate the oracle user if it doesn\u0026rsquo;t exist and set the uid and gid to 1000.\nc) Edit the $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc/create-pv-pvc-inputs.yaml and add a slash to the end of the directory for the weblogicDomainStoragePath parameter:\nweblogicDomainStoragePath: /scratch/OAMK8S/accessdomainpv/ Clean down the failed domain creation by following steps 1-3 in Delete the OAM domain home. Then follow RCU schema creation onwards to recreate the RCU schema, kubernetes secrets for domain and RCU, the persistent volume and the persistent volume claim. Then execute the OAM domain creation steps again.\n " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oig/troubleshooting/", + "title": "Troubleshooting", + "tags": [], + "description": "Sample for creating an OIG domain home on an existing PV or PVC, and the domain resource YAML file for deploying the generated OIG domain.", + "content": "Domain creation failure If the OIG domain creation fails when running create-domain.sh, run the following to diagnose the issue:\n Run the following command to diagnose the create domain job:\n$ kubectl logs \u0026lt;job_name\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl logs governancedomain-create-fmw-infra-sample-domain-job-9wqzb -n oigns Also run:\n$ kubectl describe pod \u0026lt;job_domain\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl describe pod governancedomain-create-fmw-infra-sample-domain-job-9wqzb -n oigns Using the output you should be able to diagnose the problem and resolve the issue.\nClean down the failed domain creation by following steps 1-3 in Delete the OIG domain home. Then follow RCU schema creation onwards to recreate the RCU schema, kubernetes secrets for domain and RCU, the persistent volume and the persistent volume claim. Then execute the OIG domain creation steps again.\n If any of the above commands return the following error:\nFailed to start container \u0026quot;create-fmw-infra-sample-domain-job\u0026quot;: Error response from daemon: error while creating mount source path '/scratch/OIGK8S/governancedomainpv ': mkdir /scratch/OIGK8S/governancedomainpv : permission denied then there is a permissions error on the directory for the PV and PVC and the following should be checked:\na) The directory has 777 permissions: chmod -R 777 \u0026lt;workdir\u0026gt;/governancedomainpv.\nb) If it does have the permissions, check if an oracle user exists and the uid and gid equal 1000, for example:\n$ uid=1000(oracle) gid=1000(spg) groups=1000(spg),59968(oinstall),8500(dba),100(users),1007(cgbudba) Create the oracle user if it doesn\u0026rsquo;t exist and set the uid and gid to 1000.\nc) Edit the $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc/create-pv-pvc-inputs.yaml and add a slash to the end of the directory for the weblogicDomainStoragePath parameter:\nweblogicDomainStoragePath: /scratch/OIGK8S/governancedomainpv/ Clean down the failed domain creation by following steps 1-3 in Delete the OIG domain home. Then follow RCU schema creation onwards to recreate the RCU schema, kubernetes secrets for domain and RCU, the persistent volume and the persistent volume claim. Then execute the OIG domain creation steps again.\n " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oid/create-oid-instances-helm/oid/", + "title": "Helm Chart: oid", + "tags": [], + "description": "This document provides details of the oid Helm chart.", + "content": " Introduction Deploy oid Helm Chart Ingress Controller Setup Ingress with NGINX Configuration Parameters Introduction This Helm chart is provided for the deployment of Oracle Internet Directory instances on Kubernetes.\nBased on the configuration, this chart deploys the following objects in the specified namespace of a Kubernetes cluster.\n Service Account Secret Persistent Volume and Persistent Volume Claim Pod(s)/Container(s) for Oracle Internet Directory Instances Services for interfaces exposed through Oracle Internet Directory Instances Ingress configuration Create Kubernetes Namespace Create a Kubernetes namespace to provide a scope for other objects such as pods and services that you create in the environment. To create your namespace, issue the following command:\n$ kubectl create ns oidns namespace/oidns created Deploy OID Helm Chart Create Oracle Internet Directory instances along with Kubernetes objects in a specified namespace using the oid Helm Chart.\nThe deployment can be initiated by running the following Helm command with reference to the oid Helm Chart, along with configuration parameters according to your environment. Before deploying the Helm chart, the namespace should be created. Objects to be created by the Helm chart will be created inside the specified namespace.\n$ helm install --namespace \u0026lt;namespace\u0026gt; \\ \u0026lt;Configuration Parameters\u0026gt; \\ \u0026lt;deployment/release name\u0026gt; \\ \u0026lt;Helm Chart Path/Name\u0026gt; Configuration Parameters (override values in chart) can be passed on with --set arguments on the command line and/or with -f / --values arguments when referring to files.\nNote: Example files in the sections below provide values which allow the user to override the default values provided by the Helm chart.\n Navigate to the helm directory for OID under the working directory where the code was cloned.\n Create a file oidoverride.yaml file with the following contents:\n image: repository: oracle/oid tag: 12.2.1.4.0 pullPolicy: IfNotPresent oidConfig: realmDN: dc=oid,dc=example,dc=com domainName: oid_domain orcladminPassword: \u0026lt;password\u0026gt; dbUser: sys dbPassword: \u0026lt;password\u0026gt; dbschemaPassword: \u0026lt;password\u0026gt; rcuSchemaPrefix: OIDK8S rcuDatabaseURL: oiddb.example.com:1521/oiddb.example.com sslwalletPassword: welcome2 persistence: type: networkstorage networkstorage: nfs: path: /scratch/shared/oid_user_projects server: \u0026lt;NFS IP address \u0026gt; odsm: adminUser: weblogic adminPassword: welcome3 where: /scratch/shared/oid_user_projects is the hostpath where the pv and pvc will be created.\nCreate the OID instances To setup a single pod:\nhelm install --namespace oidns --values oidoverride.yaml oid oid --set replicaCount=0 To setup multiple pods increase replicaCount:\nhelm install --namespace oidns --values oidoverride.yaml oid oid --set replicaCount=2 Confirm that the pods and services are running: To setup a single pod:\nkubectl get all --namespace oidns Output should be similar to the following:\n NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/oidhost1 1/1 Running 0 3h34m 10.244.0.137 myoidhost \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oidhost2 1/1 Running 0 3h34m 10.244.0.138 myoidhost \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oidhost3 1/1 Running 0 3h34m 10.244.0.136 myoidhost \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR service/oid-lbr-ldap ClusterIP 10.103.103.151 \u0026lt;none\u0026gt; 3060/TCP,3131/TCP 3h34m app.kubernetes.io/instance=oid,app.kubernetes.io/name=oid service/oidhost1 ClusterIP 10.108.25.249 \u0026lt;none\u0026gt; 3060/TCP,3131/TCP,7001/TCP,7002/TCP 3h34m app.kubernetes.io/instance=oid,app.kubernetes.io/name=oid,oid/instance=oidhost1 service/oidhost2 ClusterIP 10.99.99.62 \u0026lt;none\u0026gt; 3060/TCP,3131/TCP 3h34m app.kubernetes.io/instance=oid,app.kubernetes.io/name=oid,oid/instance=oidhost2 service/oidhost3 ClusterIP 10.107.13.174 \u0026lt;none\u0026gt; 3060/TCP,3131/TCP 3h34m app.kubernetes.io/instance=oid,app.kubernetes.io/name=oid,oid/instance=oidhost3 Examples Example where configuration parameters are passed with --set argument: $ helm install --namespace oidns \\ --set oidConfig.rootUserPassword=Oracle123,persistence.filesystem.hostPath.path=/scratch/shared/oid_user_projects \\ oid oid For more details about the helm command and parameters, please execute helm --help and helm install --help. In this example, it is assumed that the command is executed from the directory containing the \u0026lsquo;oid\u0026rsquo; helm chart directory (OracleInternetDirectory/kubernetes/helm/). Example where configuration parameters are passed with --values argument: $ helm install --namespace oidns \\ --values oid-values-override.yaml \\ oid oid For more details about the helm command and parameters, please execute helm --help and helm install --help. In this example, it is assumed that the command is executed from the directory containing the \u0026lsquo;oid\u0026rsquo; helm chart directory (OracleInternetDirectory/kubernetes/helm/). The --values argument passes a file path/name which overrides values in the chart. oid-values-override.yaml\nimage: repository: oracle/oid tag: 12.2.1.4.0 pullPolicy: IfNotPresent oidConfig: realmDN: dc=oid,dc=example,dc=com domainName: oid_domain orcladminPassword: \u0026lt;password\u0026gt; dbUser: sys dbPassword: \u0026lt;password\u0026gt; dbschemaPassword: \u0026lt;password\u0026gt; rcuSchemaPrefix: OIDK8S rcuDatabaseURL: oiddb.example.com:1521/oiddb.example.com sslwalletPassword: welcome2 persistence: type: filesystem filesystem: hostPath: path: /scratch/shared/oid_user_projects odsm: adminUser: weblogic adminPassword: welcome3 Example to scale-up through Helm Chart based deployment: In this example, we are setting replicaCount value to 3. If initially, the replicaCount value was 2, we will observe a new Oracle Internet Directory pod with assosiated services brought up by Kubernetes. So overall, 4 pods will be running now.\nWe have two ways to achieve our goal:\n$ helm upgrade --namespace oidns \\ --set replicaCount=3 \\ oid oid OR\n$ helm upgrade --namespace oidns \\ --values oid-values-override.yaml \\ oid oid oid-values-override.yaml\nreplicaCount: 3 For more details about the helm command and parameters, please execute helm --help and helm install --help. In this example, it is assumed that the command is executed from the directory containing the \u0026lsquo;oid\u0026rsquo; helm chart directory (OracleInternetDirectory/kubernetes/helm/). Example to apply new Oracle Internet Directory patch through Helm Chart based deployment: In this example, we will apply PSU2020July-20200730 patch on earlier running Oracle Internet Directory version. If we describe pod we will observe that the container is up with new version.\nWe have two ways to achieve our goal:\n$ helm upgrade --namespace oidns \\ --set image.repository=oracle/oid,image.tag=12.2.1.4.0-PSU2020July-20200730 \\ oid oid --reuse-values OR\n$ helm upgrade --namespace oidns \\ --values oid-values-override.yaml \\ oid oid For more details about the helm command and parameters, please execute helm --help and helm install --help. In this example, it is assumed that the command is executed from the directory containing the \u0026lsquo;oid\u0026rsquo; helm chart directory (OracleInternetDirectory/kubernetes/helm/). oid-values-override.yaml\nimage: repository: oracle/oid tag: 12.2.1.4.0-PSU2020July-20200730 Example for using NFS as PV Storage: $ helm install --namespace oidns \\ --values oid-values-override-nfs.yaml \\ oid oid For more details about the helm command and parameters, please execute helm --help and helm install --help. In this example, it is assumed that the command is executed from the directory containing the \u0026lsquo;oid\u0026rsquo; helm chart directory (OracleInternetDirectory/kubernetes/helm/). The --values argument passes a file path/name which overrides values in the chart. oid-values-override-nfs.yaml\nimage: repository: oracle/oid tag: 12.2.1.4.0 pullPolicy: IfNotPresent oidConfig: realmDN: dc=oid,dc=example,dc=com domainName: oid_domain orcladminPassword: \u0026lt;password\u0026gt; dbUser: sys dbPassword: \u0026lt;password\u0026gt; dbschemaPassword: \u0026lt;password\u0026gt; rcuSchemaPrefix: OIDK8S rcuDatabaseURL: oiddb.example.com:1521/oiddb.example.com sslwalletPassword: welcome2 persistence: type: networkstorage networkstorage: nfs: path: /scratch/shared/oid_user_projects server: \u0026lt;NFS IP address \u0026gt; odsm: adminUser: weblogic adminPassword: welcome3 Example for using PV type of your choice: $ helm install --namespace oidns \\ --values oid-values-override-pv-custom.yaml \\ oid oid For more details about the helm command and parameters, please execute helm --help and helm install --help. In this example, it is assumed that the command is executed from the directory containing the \u0026lsquo;oid\u0026rsquo; helm chart directory (OracleInternetDirectory/kubernetes/helm/). The --values argument passes a file path/name which overrides values in the chart. oid-values-override-pv-custom.yaml\noidConfig: rootUserPassword: Oracle123 persistence: type: custom custom: nfs: # Path of NFS Share location path: /scratch/shared/oid_user_projects # IP of NFS Server server: \u0026lt;NFS IP address \u0026gt; Under custom:, the configuration of your choice can be specified. This configuration will be used \u0026lsquo;as-is\u0026rsquo; for the PersistentVolume object. Check Deployment Output for the helm install/upgrade command Output similar to the following is observed following successful execution of helm install/upgrade command.\nNAME: oid LAST DEPLOYED: Tue Mar 31 01:40:05 2020 NAMESPACE: oidns STATUS: deployed REVISION: 1 TEST SUITE: None Check for the status of objects created through oid helm chart Command:\n$ kubectl --namespace oidns get all Output is similar to the following:\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/oidhost1 1/1 Running 0 3h34m 10.244.0.137 myoidhost \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oidhost2 1/1 Running 0 3h34m 10.244.0.138 myoidhost \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oidhost3 1/1 Running 0 3h34m 10.244.0.136 myoidhost \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR service/oid-lbr-ldap ClusterIP 10.103.103.151 \u0026lt;none\u0026gt; 3060/TCP,3131/TCP 3h34m app.kubernetes.io/instance=oid,app.kubernetes.io/name=oid service/oidhost1 ClusterIP 10.108.25.249 \u0026lt;none\u0026gt; 3060/TCP,3131/TCP,7001/TCP,7002/TCP 3h34m app.kubernetes.io/instance=oid,app.kubernetes.io/name=oid,oid/instance=oidhost1 service/oidhost2 ClusterIP 10.99.99.62 \u0026lt;none\u0026gt; 3060/TCP,3131/TCP 3h34m app.kubernetes.io/instance=oid,app.kubernetes.io/name=oid,oid/instance=oidhost2 service/oidhost3 ClusterIP 10.107.13.174 \u0026lt;none\u0026gt; 3060/TCP,3131/TCP 3h34m app.kubernetes.io/instance=oid,app.kubernetes.io/name=oid,oid/instance=oidhost3 Kubernetes Objects Kubernetes objects created by the Helm chart are detailed in the table below:\n Type Name Example Name Purpose Secret \u0026lt;deployment/release name\u0026gt;-creds oid-creds Secret object for Oracle Internet Directory related critical values like passwords Persistent Volume \u0026lt;deployment/release name\u0026gt;-pv oid-pv Persistent Volume for user_projects mount. Persistent Volume Claim \u0026lt;deployment/release name\u0026gt;-pvc oid-pvc Persistent Volume Claim for user_projects mount. Pod \u0026lt;deployment/release name\u0026gt;1 oidhost1 Pod/Container for base Oracle Internet Directory Instance which would be populated first with base configuration (like number of sample entries) Pod \u0026lt;deployment/release name\u0026gt;N oidhost2, oidhost3, \u0026hellip; Pod(s)/Container(s) for Oracle Internet Directory Instances Service \u0026lt;deployment/release name\u0026gt;lbr-ldap oid-lbr-ldap Service for LDAP/LDAPS access load balanced across the base Oracle Internet Directory instances Service \u0026lt;deployment/release name\u0026gt; oidhost1, oidhost2, oidhost3, \u0026hellip; Service for LDAP/LDAPS access for each base Oracle Internet Directory instance Ingress \u0026lt;deployment/release name\u0026gt;-ingress-nginx oid-ingress-nginx Ingress Rules for LDAP/LDAPS access. In the table above the \u0026lsquo;Example Name\u0026rsquo; for each Object is based on the value \u0026lsquo;oid\u0026rsquo; as deployment/release name for the Helm chart installation. Ingress Controller Setup There are two types of Ingress controllers supported by this Helm chart. In the sub-sections below, configuration steps for each Controller are described.\nBy default Ingress configuration only supports HTTP and HTTPS Ports/Communication. To allow LDAP and LDAPS communication over TCP, additional configuration is required at Ingress Controller/Implementation level.\nIngress with NGINX Nginx-ingress controller implementation can be deployed/installed in a Kubernetes environment.\nCreate a Kubernetes Namespace Create a Kubernetes namespace to provide a scope for NGINX objects such as pods and services that you create in the environment. To create your namespace, issue the following command:\n$ kubectl create ns mynginx namespace/mynginx created Command helm install to install nginx-ingress related objects like pod, service, deployment, etc. Add repository reference to Helm for retrieving/installing Chart for nginx-ingress implementation.\n$ helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx To install and configure NGINX Ingress issue the following command:\n$ helm install --namespace mynginx \\ --values nginx-ingress-values-override.yaml \\ lbr-nginx \\ ingress-nginx/ingress-nginx \\ --version=3.34.0 \\ --set controller.admissionWebhooks.enabled=false Where:\n lbr-nginx is your deployment name\n ingress-nginx/ingress-nginx is the chart reference\n For more details about the helm command and parameters, please execute helm --help and helm install --help. The --values argument passes a file path/name which overrides values in the chart.\n Output will be something like this:\nNAME: lbr-nginx LAST DEPLOYED: Thu Aug 26 20:05:41 2021 NAMESPACE: mynginx STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: The ingress-nginx controller has been installed. Get the application URL by running these commands: export HTTP_NODE_PORT=$(kubectl --namespace mynginx get services -o jsonpath=\u0026quot;{.spec.ports[0].nodePort}\u0026quot; lbr-nginx-ingress-nginx-controller) export HTTPS_NODE_PORT=$(kubectl --namespace mynginx get services -o jsonpath=\u0026quot;{.spec.ports[1].nodePort}\u0026quot; lbr-nginx-ingress-nginx-controller) export NODE_IP=$(kubectl --namespace mynginx get nodes -o jsonpath=\u0026quot;{.items[0].status.addresses[1].address}\u0026quot;) echo \u0026quot;Visit http://$NODE_IP:$HTTP_NODE_PORT to access your application via HTTP.\u0026quot; echo \u0026quot;Visit https://$NODE_IP:$HTTPS_NODE_PORT to access your application via HTTPS.\u0026quot; An example Ingress that makes use of the controller: apiVersion: networking.k8s.io/v1beta1 kind: Ingress metadata: annotations: kubernetes.io/ingress.class: nginx name: example namespace: foo spec: rules: - host: www.example.com http: paths: - backend: serviceName: exampleService servicePort: 80 path: / # This section is only required if TLS is to be enabled for the Ingress tls: nginx-ingress-values-override.yaml\n# Configuration for additional TCP ports to be exposed through Ingress # Format for each port would be like: # \u0026lt;PortNumber\u0026gt;: \u0026lt;Namespace\u0026gt;/\u0026lt;Service\u0026gt; tcp: # Map 1389 TCP port to LBR LDAP service to get requests handled through any available POD/Endpoint serving LDAP Port 3060: oidns/oid-lbr-ldap:3060 # Map 1636 TCP port to LBR LDAP service to get requests handled through any available POD/Endpoint serving LDAPS Port 3131: oidns/oid-lbr-ldap:3131 3061: oidns/oidhost1:3060 3130: oidns/oidhost1:3131 3062: oidns/oidhost2:3060 3132: oidns/oidhost2:3131 3063: oidns/oidhost3:3060 3133: oidns/oidhost3:3131 3064: oidns/oidhost4:3060 3134: oidns/oidhost4:3131 3065: oidns/oidhost5:3060 3135: oidns/oidhost5:3131 controller: admissionWebhooks: enabled: false extraArgs: # The secret referred to by this flag contains the default certificate to be used when accessing the catch-all server. # If this flag is not provided NGINX will use a self-signed certificate. # If the TLS Secret is in different namespace, name can be mentioned as \u0026lt;namespace\u0026gt;/\u0026lt;tlsSecretName\u0026gt; default-ssl-certificate: oidns/oid-tls-cert service: # controller service external IP addresses # externalIPs: # - \u0026lt; External IP Address \u0026gt; # To configure Ingress Controller Service as LoadBalancer type of Service # Based on the Kubernetes configuration, External LoadBalancer would be linked to the Ingress Controller Service type: NodePort # Configuration for NodePort to be used for Ports exposed through Ingress # If NodePorts are not defied/configured, Node Port would be assigend automatically by Kubernetes # These NodePorts are helpful while accessing services directly through Ingress and without having External Load Balancer. # nodePorts: # For HTTP Interface exposed through LoadBalancer/Ingress # http: 30080 # For HTTPS Interface exposed through LoadBalancer/Ingress # https: 30443 #tcp: # For LDAP Interface # 3060: 31389 # For LDAPS Interface # 3131: 31636 The configuration above assumes that you have oid installed with value oid as a deployment/release name. Based on the deployment/release name in your environment, TCP port mapping may be required to be changed/updated. List the ports mapped using the following command:\n$ kubectl get all -n mynginx NAME READY STATUS RESTARTS AGE pod/lbr-nginx-ingress-nginx-controller-8644545f5b-8dgg9 0/1 Running 0 17s NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/lbr-nginx-ingress-nginx-controller NodePort 10.107.39.198 \u0026lt;none\u0026gt; 80:30450/TCP,443:32569/TCP,3060:30395/TCP,3061:30518/TCP,3062:32540/TCP,3130:32086/TCP,3131:31794/TCP,3132:31089/TCP 17s NAME READY UP-TO-DATE AVAILABLE AGE deployment.apps/lbr-nginx-ingress-nginx-controller 0/1 1 0 17s NAME DESIRED CURRENT READY AGE replicaset.apps/lbr-nginx-ingress-nginx-controller-8644545f5b 1 1 0 17s Validate Service Use an LDAP client to connect to the Oracle Internet Directory service, the Oracle ldapbind client for example:\n$ORACLE_HOME//bin/ldapbind -D cn=orcladmin -w welcome1 -h \u0026lt;hostname_ingress\u0026gt; -p 30395 where:\n -p 30395 : is the port mapping to the LDAP port 3060 (3060:30395) -h \u0026lt;hostname_ingress\u0026gt; : is the hostname where the ingress is running Access Oracle Directory Services Manager (ODSM) via a browser using the service port which maps to HTTPS port 443, in this case 32569 (443:32569 from the previous kubectl command). Access the following:\n Oracle WebLogic Server Administration Console : https://oid.example.com:32569/console\nWhen prompted, enter the following credentials from your oidoverride.yaml file.\n Username: [adminUser] Password: [adminPassword] Oracle Directory Services Manager : https://oid.example.com:32569/odsm\nSelect Create a New Connection and, when prompted, enter the following values.\n Server: oid.example.com Port: Ingress mapped port for LDAP or LDAPS, in the example above 3060:30395/TCP or 3131:31794/TCP, namely LDAP:30395, LDAPS:31794 SSL Enabled: select if accessing LDAPS. User Name: cn=orcladmin Password: value of orcladminPassword from your oidoverride.yaml file. Configuration Parameters The following table lists the configurable parameters of the oid chart and its default values.\n Parameter Description Default Value replicaCount Number of base Oracle Internet Directory instances/pods/services to be created. 1 restartPolicyName restartPolicy to be configured for each POD containing Oracle Internet Directory instance OnFailure image.repository Oracle Internet Directory Image Registry/Repository and name. Based on this, the image parameter will be configured for Oracle Internet Directory pods/containers oracle/oid image.tag Oracle Internet Directory Image Tag. Based on this, the image parameter will be configured for Oracle Internet Directory pods/containers 12.2.1.4.0 image.pullPolicy policy to pull the image IfnotPresent imagePullSecrets.name name of Secret resource containing private registry credentials regcred nameOverride override the fullname with this name fullnameOverride Overrides the fullname with the provided string serviceAccount.create Specifies whether a service account should be created true serviceAccount.name If not set and create is true, a name is generated using the fullname template oid-\u0026lt; fullname \u0026gt;-token-\u0026lt; randomalphanum \u0026gt; podSecurityContext Security context policies to add to the controller pod securityContext Security context policies to add by default service.type Type of Service to be created for OID Interfaces (like LDAP, HTTP, Admin) ClusterIP service.lbrtype Service Type for loadbalancer services exposing LDAP, HTTP interfaces from available/accessible OID pods ClusterIP ingress.enabled true ingress.nginx.http.host Hostname to be used with Ingress Rules. If not set, hostname would be configured according to fullname. Hosts would be configured as \u0026lt; fullname \u0026gt;-http.\u0026lt; domain \u0026gt;, \u0026lt; fullname \u0026gt;-http-0.\u0026lt; domain \u0026gt;, \u0026lt; fullname \u0026gt;-http-1.\u0026lt; domain \u0026gt;, etc. ingress.nginx.http.domain Domain name to be used with Ingress Rules. In ingress rules, hosts would be configured as \u0026lt; host \u0026gt;.\u0026lt; domain \u0026gt;, \u0026lt; host \u0026gt;-0.\u0026lt; domain \u0026gt;, \u0026lt; host \u0026gt;-1.\u0026lt; domain \u0026gt;, etc. ingress.nginx.http.backendPort http ingress.nginx.http.nginxAnnotations { kubernetes.io/ingress.class: “nginx\u0026rdquo; } ingress.nginx.admin.host Hostname to be used with Ingress Rules. If not set, hostname would be configured according to fullname. Hosts would be configured as \u0026lt; fullname \u0026gt;-admin.\u0026lt; domain \u0026gt;, \u0026lt; fullname \u0026gt;-admin-0.\u0026lt; domain \u0026gt;, \u0026lt; fullname \u0026gt;-admin-1.\u0026lt; domain \u0026gt;, etc. ingress.nginx.admin.domain Domain name to be used with Ingress Rules. In ingress rules, hosts would be configured as \u0026lt; host \u0026gt;.\u0026lt; domain \u0026gt;, \u0026lt; host \u0026gt;-0.\u0026lt; domain \u0026gt;, \u0026lt; host \u0026gt;-1.\u0026lt; domain \u0026gt;, etc. ingress.nginx.admin.nginxAnnotations { kubernetes.io/ingress.class: “nginx” nginx.ingress.kubernetes.io/backend-protocol: “https\u0026rdquo;} ingress.ingress.tlsSecret Secret name to use an already created TLS Secret. If such secret is not provided, one would be created with name \u0026lt; fullname \u0026gt;-tls-cert. If the TLS Secret is in different namespace, name can be mentioned as \u0026lt; namespace \u0026gt;/\u0026lt; tlsSecretName \u0026gt; ingress.certCN Subject’s common name (cn) for SelfSigned Cert \u0026lt; fullname \u0026gt; ingress.certValidityDays Validity of Self-Signed Cert in days 365 nodeSelector node labels for pod assignment tolerations node taints to tolerate affinity node/pod affinities persistence.enabled If enabled, it will use the persistent volume. if value is false, PV and PVC would not be used and pods would be using the default emptyDir mount volume true persistence.pvname pvname to use an already created Persistent Volume , If blank will use the default name oid-\u0026lt; fullname \u0026gt;-pv persistence.pvcname pvcname to use an already created Persistent Volume Claim , If blank will use default name oid-\u0026lt; fullname \u0026gt;-pvc persistence.type supported values: either filesystem or networkstorage or custom filesystem persistence.filesystem.hostPath.path The path location mentioned should be created and accessible from the local host provided with necessary privileges for the user /scratch/shared/oid_user_projects persistence.networkstorage.nfs.path Path of NFS Share location /scratch/shared/oid_user_projects persistence.networkstorage.nfs.server IP or hostname of NFS Server 0.0.0.0 persistence.custom.* Based on values/data, YAML content would be included in PersistenceVolume Object persistence.accessMode Specifies the access mode of the location provided ReadWriteMany persistence.size Specifies the size of the storage 20Gi persistence.storageClass Specifies the storageclass of the persistence volume. manual persistence.annotations specifies any annotations that will be used { } secret.enabled If enabled it will use the secret created with base64 encoding. if value is false, secret would not be used and input values (through –set, –values, etc.) would be used while creation of pods. true secret.name secret name to use an already created Secret oid-\u0026lt; fullname \u0026gt;-creds secret.type Specifies the type of the secret opaque oidPorts.ldap Port on which Oracle Internet Directory Instance in the container should listen for LDAP Communication. 3060 oidPorts.ldaps Port on which Oracle Internet Directory Instance in the container should listen for LDAPS Communication. oidConfig.realmDN BaseDN for OID Instances oidConfig.domainName WebLogic Domain Name oid_domain oidConfig.domainHome WebLogic Domain Home /u01/oracle/user_projects/domains/oid_domain oidConfig.orcladminPassword Password for orcladmin user. Value will be added to Secret and Pod(s) will use the Secret oidConfig.dbUser Value for login into db usually sys. Value would be added to Secret and Pod(s) would be using the Secret oidConfig.dbPassword dbPassword is the SYS password for the database. Value would be added to Secret and Pod(s) would be using the Secret oidConfig.dbschemaPassword Password for DB Schema(s) to be created by RCU. Value would be added to Secret and Pod(s) would be using the Secret oidConfig.rcuSchemaPrefix The schema prefix to use in the database, for example OIDPD. oidConfig.rcuDatabaseURL The database URL. Sample: \u0026lt;db_host.domain\u0026gt;:\u0026lt;db_port\u0026gt;/\u0026lt;service_name\u0026gt; oidConfig.sleepBeforeConfig Based on the value for this parameter, initialization/configuration of each OID additional server (oid)n would be delayed and readiness probes would be configured. This is to make sure that OID additional servers (oid)n are initialized in sequence. 600 oidConfig.sslwalletPassword SSL enabled password to be used for ORAPKI deploymentConfig.startupTime Based on the value for this parameter, initialization/configuration of each OID additional servers (oid)n will be delayed and readiness probes would be configured. initialDelaySeconds would be configured as sleepBeforeConfig + startupTime 480 deploymentConfig.livenessProbeInitialDelay Parameter to decide livenessProbe initialDelaySeconds 900 baseOID Configuration for Base OID instance (oid1) baseOID.envVarsConfigMap Reference to ConfigMap which can contain additional environment variables to be passed on to POD for Base OID Instance baseOID.envVars Environment variables in Yaml Map format. This is helpful when its requried to pass environment variables through \u0026ndash;values file. List of env variables which would not be honored from envVars map is same as list of env var names mentioned for envVarsConfigMap additionalOID Configuration for additional OID instances (oidN) additionalOID.envVarsConfigMap Reference to ConfigMap which can contain additional environment variables to be passed on to POD for additional OID Instance additionalOID.envVars List of env variables which would not be honored from envVars map is same as list of env var names mentioned for envVarsConfigMap odsm Parameters/Configurations for ODSM Deployment odsm.adminUser Oracle WebLogic Server Administration User odsm.adminPassword Password for Oracle WebLogic Server Administration User odsm.startupTime Expected startup time. After specified seconds readinessProbe will start 900 odsmPorts Configuration for ODSM Ports odsmPorts.http ODSM HTTP Port 7001 odsmPorts.https ODSM HTTPS Port 7002 " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oud/create-oud-instances/create-oud-instances-helm/oud-ds-rs/", + "title": "Helm Chart: oud-ds-rs: For deployment of replicated Oracle Unified Directory (DS+RS) instances", + "tags": [], + "description": "This document provides details of the oud-ds-rs Helm chart.", + "content": " Introduction Deploy oud-ds-rs Helm Chart Verify the Replication Ingress Controller Setup Ingress with NGINX Access to Interfaces through Ingress Configuration Parameters Introduction This Helm chart provides for the deployment of replicated Oracle Unified Directory (DS+RS) instances on Kubernetes.\nThis chart can be used to deploy an Oracle Unified Directory instance as a base, with configured sample entries, and multiple replicated Oracle Unified Directory instances/pods/services based on the specified replicaCount.\nBased on the configuration, this chart deploys the following objects in the specified namespace of a Kubernetes cluster.\n Service Account Secret Persistent Volume and Persistent Volume Claim Pod(s)/Container(s) for Oracle Unified Directory Instances Services for interfaces exposed through Oracle Unified Directory Instances Ingress configuration Create Kubernetes Namespace Create a Kubernetes namespace to provide a scope for other objects such as pods and services that you create in the environment. To create your namespace issue the following command:\n$ kubectl create ns oudns namespace/oudns created Deploy oud-ds-rs Helm Chart Create or Deploy a group of replicated Oracle Unified Directory instances along with Kubernetes objects in a specified namespace using the oud-ds-rs Helm Chart.\nThe deployment can be initiated by running the following Helm command with reference to the oud-ds-rs Helm Chart, along with configuration parameters according to your environment. Before deploying the Helm chart, the namespace should be created. Objects to be created by the Helm chart will be created inside the specified namespace.\n$ cd \u0026lt;work directory\u0026gt;/fmw-kubernetes/OracleUnifiedDirectory/kubernetes/helm $ helm install --namespace \u0026lt;namespace\u0026gt; \\ \u0026lt;Configuration Parameters\u0026gt; \\ \u0026lt;deployment/release name\u0026gt; \\ \u0026lt;Helm Chart Path/Name\u0026gt; Configuration Parameters (override values in chart) can be passed on with --set arguments on the command line and/or with -f / --values arguments when referring to files.\nNote: Example files in the sections below provide values which allow the user to override the default values provided by the Helm chart.\nExamples Example where configuration parameters are passed with --set argument: $ helm install --namespace oudns \\ --set oudConfig.rootUserPassword=Oracle123,persistence.filesystem.hostPath.path=/scratch/shared/oud_user_projects,image.repository=oracle/oud,image.tag=12.2.1.4.0-PSU2020July-20200730 \\ oud-ds-rs oud-ds-rs For more details about the helm command and parameters, please execute helm --help and helm install --help. In this example, it is assumed that the command is executed from the directory containing the \u0026lsquo;oud-ds-rs\u0026rsquo; helm chart directory (\u0026lt;work directory\u0026gt;/OracleUnifiedDirectory/kubernetes/helm/). Example where configuration parameters are passed with --values argument: $ helm install --namespace oudns \\ --values oud-ds-rs-values-override.yaml \\ oud-ds-rs oud-ds-rs For more details about the helm command and parameters, please execute helm --help and helm install --help. In this example, it is assumed that the command is executed from the directory containing the \u0026lsquo;oud-ds-rs\u0026rsquo; helm chart directory (OracleUnifiedDirectory/kubernetes/helm/). The --values argument passes a file path/name which overrides values in the chart. oud-ds-rs-values-override.yaml\noudConfig: rootUserPassword: Oracle123 persistence: type: filesystem filesystem: hostPath: path: /scratch/shared/oud_user_projects Example to scale-up through Helm Chart based deployment: In this example, we are setting replicaCount value to 3. If initially, the replicaCount value was 2, we will observe a new Oracle Unified Directory pod with assosiated services brought up by Kubernetes. So overall, 4 pods will be running now.\nWe have two ways to achieve our goal:\n$ helm upgrade --namespace oudns \\ --set replicaCount=3 \\ oud-ds-rs oud-ds-rs OR\n$ helm upgrade --namespace oudns \\ --values oud-ds-rs-values-override.yaml \\ oud-ds-rs oud-ds-rs oud-ds-rs-values-override.yaml\nreplicaCount: 3 For more details about the helm command and parameters, please execute helm --help and helm install --help. In this example, it is assumed that the command is executed from the directory containing the \u0026lsquo;oud-ds-rs\u0026rsquo; helm chart directory (OracleUnifiedDirectory/kubernetes/helm/). Example to apply new Oracle Unified Directory patch through Helm Chart based deployment: In this example, we will apply PSU2020July-20200730 patch on earlier running Oracle Unified Directory version. If we describe pod we will observe that the container is up with new version.\nWe have two ways to achieve our goal:\n$ helm upgrade --namespace oudns \\ --set image.repository=oracle/oud,image.tag=12.2.1.4.0-PSU2020July-20200730 \\ oud-ds-rs oud-ds-rs OR\n$ helm upgrade --namespace oudns \\ --values oud-ds-rs-values-override.yaml \\ oud-ds-rs oud-ds-rs For more details about the helm command and parameters, please execute helm --help and helm install --help. In this example, it is assumed that the command is executed from the directory containing the \u0026lsquo;oud-ds-rs\u0026rsquo; helm chart directory (OracleUnifiedDirectory/kubernetes/helm/). oud-ds-rs-values-override.yaml\nimage: repository: oracle/oud tag: 12.2.1.4.0-PSU2020July-20200730 Example for using NFS as PV Storage: $ helm install --namespace oudns \\ --values oud-ds-rs-values-override-nfs.yaml \\ oud-ds-rs oud-ds-rs For more details about the helm command and parameters, please execute helm --help and helm install --help. In this example, it is assumed that the command is executed from the directory containing the \u0026lsquo;oud-ds-rs\u0026rsquo; helm chart directory (OracleUnifiedDirectory/kubernetes/helm/). The --values argument passes a file path/name which overrides values in the chart. oud-ds-rs-values-override-nfs.yaml\noudConfig: rootUserPassword: Oracle123 persistence: type: networkstorage networkstorage: nfs: path: /scratch/shared/oud_user_projects server: \u0026lt;NFS IP address \u0026gt; Example for using PV type of your choice: $ helm install --namespace oudns \\ --values oud-ds-rs-values-override-pv-custom.yaml \\ oud-ds-rs oud-ds-rs For more details about the helm command and parameters, please execute helm --help and helm install --help. In this example, it is assumed that the command is executed from the directory containing the \u0026lsquo;oud-ds-rs\u0026rsquo; helm chart directory (OracleUnifiedDirectory/kubernetes/helm/). The --values argument passes a file path/name which overrides values in the chart. oud-ds-rs-values-override-pv-custom.yaml\noudConfig: rootUserPassword: Oracle123 persistence: type: custom custom: nfs: # Path of NFS Share location path: /scratch/shared/oud_user_projects # IP of NFS Server server: \u0026lt;NFS IP address \u0026gt; Under custom:, the configuration of your choice can be specified. This configuration will be used \u0026lsquo;as-is\u0026rsquo; for the PersistentVolume object. Check Deployment Output for the helm install/upgrade command Ouput similar to the following is observed following successful execution of helm install/upgrade command.\nNAME: oud-ds-rs LAST DEPLOYED: Tue Mar 31 01:40:05 2020 NAMESPACE: oudns STATUS: deployed REVISION: 1 TEST SUITE: None Check for the status of objects created through oud-ds-rs helm chart Command:\n$ kubectl --namespace oudns get nodes,pod,service,secret,pv,pvc,ingress -o wide Output is similar to the following:\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/oud-ds-rs-0 1/1 Running 0 8m44s 10.244.0.195 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oud-ds-rs-1 1/1 Running 0 8m44s 10.244.0.194 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oud-ds-rs-2 0/1 Running 0 8m44s 10.244.0.193 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR service/oud-ds-rs-0 ClusterIP 10.99.232.83 \u0026lt;none\u0026gt; 1444/TCP,1888/TCP,1898/TCP 8m44s kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-0 service/oud-ds-rs-1 ClusterIP 10.100.186.42 \u0026lt;none\u0026gt; 1444/TCP,1888/TCP,1898/TCP 8m45s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-1 service/oud-ds-rs-2 ClusterIP 10.104.55.53 \u0026lt;none\u0026gt; 1444/TCP,1888/TCP,1898/TCP 8m45s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-2 service/oud-ds-rs-http-0 ClusterIP 10.102.116.145 \u0026lt;none\u0026gt; 1080/TCP,1081/TCP 8m45s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-0 service/oud-ds-rs-http-1 ClusterIP 10.111.103.84 \u0026lt;none\u0026gt; 1080/TCP,1081/TCP 8m44s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-1 service/oud-ds-rs-http-2 ClusterIP 10.105.53.24 \u0026lt;none\u0026gt; 1080/TCP,1081/TCP 8m45s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-2 service/oud-ds-rs-lbr-admin ClusterIP 10.98.39.206 \u0026lt;none\u0026gt; 1888/TCP,1444/TCP 8m45s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs service/oud-ds-rs-lbr-http ClusterIP 10.110.77.132 \u0026lt;none\u0026gt; 1080/TCP,1081/TCP 8m45s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs service/oud-ds-rs-lbr-ldap ClusterIP 10.111.55.122 \u0026lt;none\u0026gt; 1389/TCP,1636/TCP 8m45s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs service/oud-ds-rs-ldap-0 ClusterIP 10.108.155.81 \u0026lt;none\u0026gt; 1389/TCP,1636/TCP 8m44s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-0 service/oud-ds-rs-ldap-1 ClusterIP 10.104.88.44 \u0026lt;none\u0026gt; 1389/TCP,1636/TCP 8m45s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-1 service/oud-ds-rs-ldap-2 ClusterIP 10.105.253.120 \u0026lt;none\u0026gt; 1389/TCP,1636/TCP 8m45s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-2 NAME TYPE DATA AGE secret/default-token-tbjr5 kubernetes.io/service-account-token 3 25d secret/oud-ds-rs-creds opaque 8 8m48s secret/oud-ds-rs-token-cct26 kubernetes.io/service-account-token 3 8m50s secret/sh.helm.release.v1.oud-ds-rs.v1 helm.sh/release.v1 1 8m51s NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE persistentvolume/oud-ds-rs-pv 20Gi RWX Retain Bound oudns/oud-ds-rs-pvc manual 8m47s NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE persistentvolumeclaim/oud-ds-rs-pvc Bound oud-ds-rs-pv 20Gi RWX manual 8m48s NAME HOSTS ADDRESS PORTS AGE ingress.extensions/oud-ds-rs-admin-ingress-nginx oud-ds-rs-admin-0,oud-ds-rs-admin-1,oud-ds-rs-admin-2 + 2 more... 10.229.141.78 80 8m45s ingress.extensions/oud-ds-rs-http-ingress-nginx oud-ds-rs-http-0,oud-ds-rs-http-1,oud-ds-rs-http-2 + 3 more... 10.229.141.78 80 8m45s Kubernetes Objects Kubernetes objects created by the Helm chart are detailed in the table below:\n Type Name Example Name Purpose Service Account \u0026lt;deployment/release name\u0026gt; oud-ds-rs Kubernetes Service Account for the Helm Chart deployment Secret \u0026lt;deployment/release name\u0026gt;-creds oud-ds-rs-creds Secret object for Oracle Unified Directory related critical values like passwords Persistent Volume \u0026lt;deployment/release name\u0026gt;-pv oud-ds-rs-pv Persistent Volume for user_projects mount. Persistent Volume Claim \u0026lt;deployment/release name\u0026gt;-pvc oud-ds-rs-pvc Persistent Volume Claim for user_projects mount. Persistent Volume \u0026lt;deployment/release name\u0026gt;-pv-config oud-ds-rs-pv-config Persistent Volume for mounting volume in containers for configuration files like ldif, schema, jks, java.security, etc. Persistent Volume Claim \u0026lt;deployment/release name\u0026gt;-pvc-config oud-ds-rs-pvc-config Persistent Volume Claim for mounting volume in containers for configuration files like ldif, schema, jks, java.security, etc. Pod \u0026lt;deployment/release name\u0026gt;-0 oud-ds-rs-0 Pod/Container for base Oracle Unified Directory Instance which would be populated first with base configuration (like number of sample entries) Pod \u0026lt;deployment/release name\u0026gt;-N oud-ds-rs-1, oud-ds-rs-2, \u0026hellip; Pod(s)/Container(s) for Oracle Unified Directory Instances - each would have replication enabled against base Oracle Unified Directory instance \u0026lt;deployment/release name\u0026gt;-0 Service \u0026lt;deployment/release name\u0026gt;-0 oud-ds-rs-0 Service for LDAPS Admin, REST Admin and Replication interfaces from base Oracle Unified Directory instance \u0026lt;deployment/release name\u0026gt;-0 Service \u0026lt;deployment/release name\u0026gt;-http-0 oud-ds-rs-http-0 Service for HTTP and HTTPS interfaces from base Oracle Unified Directory instance \u0026lt;deployment/release name\u0026gt;-0 Service \u0026lt;deployment/release name\u0026gt;-ldap-0 oud-ds-rs-ldap-0 Service for LDAP and LDAPS interfaces from base Oracle Unified Directory instance \u0026lt;deployment/release name\u0026gt;-0 Service \u0026lt;deployment/release name\u0026gt;-N oud-ds-rs-1, oud-ds-rs-2, \u0026hellip; Service(s) for LDAPS Admin, REST Admin and Replication interfaces from base Oracle Unified Directory instance \u0026lt;deployment/release name\u0026gt;-N Service \u0026lt;deployment/release name\u0026gt;-http-N oud-ds-rs-http-1, oud-ds-rs-http-2, \u0026hellip; Service(s) for HTTP and HTTPS interfaces from base Oracle Unified Directory instance \u0026lt;deployment/release name\u0026gt;-N Service \u0026lt;deployment/release name\u0026gt;-ldap-N oud-ds-rs-ldap-1, oud-ds-rs-ldap-2, \u0026hellip; Service(s) for LDAP and LDAPS interfaces from base Oracle Unified Directory instance \u0026lt;deployment/release name\u0026gt;-N Service \u0026lt;deployment/release name\u0026gt;-lbr-admin oud-ds-rs-lbr-admin Service for LDAPS Admin, REST Admin and Replication interfaces from all Oracle Unified Directory instances Service \u0026lt;deployment/release name\u0026gt;-lbr-http oud-ds-rs-lbr-http Service for HTTP and HTTPS interfaces from all Oracle Unified Directory instances Service \u0026lt;deployment/release name\u0026gt;-lbr-ldap oud-ds-rs-lbr-ldap Service for LDAP and LDAPS interfaces from all Oracle Unified Directory instances Ingress \u0026lt;deployment/release name\u0026gt;-admin-ingress-nginx oud-ds-rs-admin-ingress-nginx Ingress Rules for HTTP Admin interfaces. Ingress \u0026lt;deployment/release name\u0026gt;-http-ingress-nginx oud-ds-rs-http-ingress-nginx Ingress Rules for HTTP (Data/REST) interfaces. In the table above the \u0026lsquo;Example Name\u0026rsquo; for each Object is based on the value \u0026lsquo;oud-ds-rs\u0026rsquo; as deployment/release name for the Helm chart installation. Verify the Replication Once all the PODs created are visible as READY (i.e. 1/1), you can verify your replication across multiple Oracle Unified Directory instances.\nTo verify the replication group, connect to the container and issue an Oracle Unified Directory Administration command to show details. You can get the name of the container by issuing the following:\n$ kubectl get pods -n \u0026lt;namespace\u0026gt; -o jsonpath='{.items[*].spec.containers[*].name}' For example:\n$ kubectl get pods -n oudns -o jsonpath='{.items[*].spec.containers[*].name}' oud-ds-rs With the container name you can then connect to the container:\n$ kubectl --namespace \u0026lt;namespace\u0026gt; exec -it -c \u0026lt;containername\u0026gt; \u0026lt;podname\u0026gt; /bin/bash For example:\n$ kubectl --namespace oudns exec -it -c oud-ds-rs oud-ds-rs-0 /bin/bash From the prompt, use the dsreplication command to check the status of your replication group:\n$ cd /u01/oracle/user_projects/oud-ds-rs-0/OUD/bin $ ./dsreplication status --trustAll \\ --hostname oud-ds-rs-0 --port 1444 --adminUID admin \\ --dataToDisplay compat-view --dataToDisplay rs-connections Output will be similar to the following (enter credentials where prompted):\n\u0026gt;\u0026gt;\u0026gt;\u0026gt; Specify Oracle Unified Directory LDAP connection parameters Password for user 'admin': Establishing connections and reading configuration ..... Done. dc=example,dc=com - Replication Enabled ======================================= Server : Entries : M.C. [1] : A.O.M.C. [2] : Port [3] : Encryption [4] : Trust [5] : U.C. [6] : Status [7] : ChangeLog [8] : Group ID [9] : Connected To [10] ---------------------:---------:----------:--------------:----------:----------------:-----------:----------:------------:---------------:--------------:------------------------------- oud-ds-rs-0:1444 : 1 : 0 : 0 : 1898 : Disabled : Trusted : -- : Normal : Enabled : 1 : oud-ds-rs-0:1898 : : : : : : : : : : : (GID=1) oud-ds-rs-1:1444 : 1 : 0 : 0 : 1898 : Disabled : Trusted : -- : Normal : Enabled : 1 : oud-ds-rs-1:1898 : : : : : : : : : : : (GID=1) oud-ds-rs-2:1444 : 1 : 0 : 0 : 1898 : Disabled : Trusted : -- : Normal : Enabled : 1 : oud-ds-rs-2:1898 : : : : : : : : : : : (GID=1) Replication Server [11] : RS #1 : RS #2 : RS #3 -------------------------------:-------:-------:------ oud-ds-rs-0:1898 : -- : Yes : Yes (#1) : : : oud-ds-rs-1:1898 : Yes : -- : Yes (#2) : : : oud-ds-rs-2:1898 : Yes : Yes : -- (#3) : : : [1] The number of changes that are still missing on this element (and that have been applied to at least one other server). [2] Age of oldest missing change: the age (in seconds) of the oldest change that has not yet arrived on this element. [3] The replication port used to communicate between the servers whose contents are being replicated. [4] Whether the replication communication initiated by this element is encrypted or not. [5] Whether the directory server is trusted or not. Updates coming from an untrusted server are discarded and not propagated. [6] The number of untrusted changes. These are changes generated on this server while it is untrusted. Those changes are not propagated to the rest of the topology but are effective on the untrusted server. [7] The status of the replication on this element. [8] Whether the external change log is enabled for the base DN on this server or not. [9] The ID of the replication group to which the server belongs. [10] The replication server this server is connected to with its group ID between brackets. [11] This table represents the connections between the replication servers. The headers of the columns use a number as identifier for each replication server. See the values of the first column to identify the corresponding replication server for each number. The dsreplication status command can be additionally invoked using the following syntax:\n$ kubectl --namespace \u0026lt;namespace\u0026gt; exec -it -c \u0026lt;containername\u0026gt; \u0026lt;podname\u0026gt; -- \\ /u01/oracle/user_projects/\u0026lt;OUD Instance/Pod Name\u0026gt;/OUD/bin/dsreplication status \\ --trustAll --hostname \u0026lt;OUD Instance/Pod Name\u0026gt; --port 1444 --adminUID admin \\ --dataToDisplay compat-view --dataToDisplay rs-connections For example:\n$ kubectl --namespace oudns exec -it -c oud-ds-rs oud-ds-rs-0 -- \\ /u01/oracle/user_projects/oud-ds-rs-0/OUD/bin/dsreplication status \\ --trustAll --hostname oud-ds-rs-0 --port 1444 --adminUID admin \\ --dataToDisplay compat-view --dataToDisplay rs-connections Ingress Controller Setup There are two types of Ingress controllers supported by this Helm chart. In the sub-sections below, configuration steps for each Controller are described.\nBy default Ingress configuration only supports HTTP and HTTPS Ports/Communication. To allow LDAP and LDAPS communication over TCP, configuration is required at Ingress Controller/Implementation level.\nIngress with NGINX Nginx-ingress controller implementation can be deployed/installed in a Kubernetes environment.\nCreate a Kubernetes Namespace Create a Kubernetes namespace to provide a scope for NGINX objects such as pods and services that you create in the environment. To create your namespace issue the following command:\n$ kubectl create ns mynginx namespace/mynginx created Add Repo reference to Helm for retrieving/installing Chart for nginx-ingress implementation. $ helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx Confirm the charts available by issuing the following command:\n$ helm search repo | grep nginx ingress-nginx/ingress-nginx 4.0.1 1.0.0 Ingress controller for Kubernetes using NGINX a... stable/ingress-nginx 4.0.1 1.0.0 Ingress controller for Kubernetes using NGINX a... Command helm install to install nginx-ingress related objects like pod, service, deployment, etc. To install and configure NGINX Ingress issue the following command:\n$ helm install --namespace mynginx \\ --values nginx-ingress-values-override.yaml \\ lbr-nginx ingress-nginx/ingress-nginx --version=3.34.0 Where:\n lbr-nginx is your deployment name ingress-nginx/ingress-nginx is the chart reference Output will be similar to the following:\n$ helm install --namespace mynginx --values samples/nginx-ingress-values-override.yaml lbr-nginx ingress-nginx/ingress-nginx --version=3.34.0 NAME: lbr-nginx LAST DEPLOYED: Wed Oct 7 08:07:29 2020 NAMESPACE: mynginx STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: The ingress-nginx controller has been installed. It may take a few minutes for the LoadBalancer IP to be available. You can watch the status by running 'kubectl --namespace mynginx get services -o wide -w lbr-nginx-ingress-nginx-controller' An example Ingress that makes use of the controller: apiVersion: networking.k8s.io/v1beta1 kind: Ingress metadata: annotations: kubernetes.io/ingress.class: nginx name: example namespace: foo spec: rules: - host: www.example.com http: paths: - backend: serviceName: exampleService servicePort: 80 path: / # This section is only required if TLS is to be enabled for the Ingress tls: - hosts: - www.example.com secretName: example-tls If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: apiVersion: v1 kind: Secret metadata: name: example-tls namespace: foo data: tls.crt: \u0026lt;base64 encoded cert\u0026gt; tls.key: \u0026lt;base64 encoded key\u0026gt; type: kubernetes.io/tls For more details about the helm command and parameters, please execute helm --help and helm install --help. The --values argument passes a file path/name which overrides values in the chart. nginx-ingress-values-override.yaml\n# Configuration for additional TCP ports to be exposed through Ingress # Format for each port would be like: # \u0026lt;PortNumber\u0026gt;: \u0026lt;Namespace\u0026gt;/\u0026lt;Service\u0026gt; tcp: # Map 1389 TCP port to LBR LDAP service to get requests handled through any available POD/Endpoint serving LDAP Port 1389: oudns/oud-ds-rs-lbr-ldap:ldap # Map 1636 TCP port to LBR LDAP service to get requests handled through any available POD/Endpoint serving LDAPS Port 1636: oudns/oud-ds-rs-lbr-ldap:ldaps controller: admissionWebhooks: enabled: false extraArgs: # The secret referred to by this flag contains the default certificate to be used when accessing the catch-all server. # If this flag is not provided NGINX will use a self-signed certificate. # If the TLS Secret is in different namespace, name can be mentioned as \u0026lt;namespace\u0026gt;/\u0026lt;tlsSecretName\u0026gt; default-ssl-certificate: oudns/oud-ds-rs-tls-cert service: # controller service external IP addresses # externalIPs: # - \u0026lt; External IP Address \u0026gt; # To configure Ingress Controller Service as LoadBalancer type of Service # Based on the Kubernetes configuration, External LoadBalancer would be linked to the Ingress Controller Service type: LoadBalancer # Configuration for NodePort to be used for Ports exposed through Ingress # If NodePorts are not defied/configured, Node Port would be assigend automatically by Kubernetes # These NodePorts are helpful while accessing services directly through Ingress and without having External Load Balancer. nodePorts: # For HTTP Interface exposed through LoadBalancer/Ingress http: 30080 # For HTTPS Interface exposed through LoadBalancer/Ingress https: 30443 tcp: # For LDAP Interface 1389: 31389 # For LDAPS Interface 1636: 31636 The configuration above assumes that you have oud-ds-rs installed with value oud-ds-rs as a deployment/release name. Based on the deployment/release name in your environment, TCP port mapping may be required to be changed/updated. Optional: Command helm upgrade to update nginx-ingress related objects like pod, service, deployment, etc. If required, an nginx-ingress deployment can be updated/upgraded with following command. In this example, nginx-ingress configuration is updated with an additional TCP port and Node Port for accessing the LDAP/LDAPS port of a specific POD.\n$ helm upgrade --namespace mynginx \\ --values /samples/nginx-ingress-values-override.yaml \\ lbr-nginx ingress-nginx/ingress-nginx --version=3.34.0 helm upgrade \u0026ndash;namespace mynginx \u0026ndash;values samples/nginx-ingress-values-override.yaml lbr-nginx ingress-nginx/nginx-ingress \u0026ndash;version=3.34.0\n For more details about the helm command and parameters, please execute helm --help and helm install --help. The --values argument passes a file path/name which overrides values in the chart. nginx-ingress-values-override.yaml\n# Configuration for additional TCP ports to be exposed through Ingress # Format for each port would be like: # \u0026lt;PortNumber\u0026gt;: \u0026lt;Namespace\u0026gt;/\u0026lt;Service\u0026gt; tcp: # Map 1389 TCP port to LBR LDAP service to get requests handled through any available POD/Endpoint serving LDAP Port 1389: oudns/oud-ds-rs-lbr-ldap:ldap # Map 1636 TCP port to LBR LDAP service to get requests handled through any available POD/Endpoint serving LDAPS Port 1636: oudns/oud-ds-rs-lbr-ldap:ldaps # Map specific ports for LDAP and LDAPS communication from individual Services/Pods # To redirect requests on 3890 port to oudns/oud-ds-rs-ldap-0:ldap 3890: oudns/oud-ds-rs-ldap-0:ldap # To redirect requests on 6360 port to oudns/oud-ds-rs-ldaps-0:ldap 6360: oudns/oud-ds-rs-ldap-0:ldaps # To redirect requests on 3891 port to oudns/oud-ds-rs-ldap-1:ldap 3891: oudns/oud-ds-rs-ldap-1:ldap # To redirect requests on 6361 port to oudns/oud-ds-rs-ldaps-1:ldap 6361: oudns/oud-ds-rs-ldap-1:ldaps # To redirect requests on 3892 port to oudns/oud-ds-rs-ldap-2:ldap 3892: oudns/oud-ds-rs-ldap-2:ldap # To redirect requests on 6362 port to oudns/oud-ds-rs-ldaps-2:ldap 6362: oudns/oud-ds-rs-ldap-2:ldaps # Map 1444 TCP port to LBR Admin service to get requests handled through any available POD/Endpoint serving Admin LDAPS Port 1444: oudns/oud-ds-rs-lbr-admin:adminldaps # To redirect requests on 4440 port to oudns/oud-ds-rs-0:adminldaps 4440: oudns/oud-ds-rs-0:adminldaps # To redirect requests on 4441 port to oudns/oud-ds-rs-1:adminldaps 4441: oudns/oud-ds-rs-1:adminldaps # To redirect requests on 4442 port to oudns/oud-ds-rs-2:adminldaps 4442: oudns/oud-ds-rs-2:adminldaps controller: admissionWebhooks: enabled: false extraArgs: # The secret referred to by this flag contains the default certificate to be used when accessing the catch-all server. # If this flag is not provided NGINX will use a self-signed certificate. # If the TLS Secret is in different namespace, name can be mentioned as \u0026lt;namespace\u0026gt;/\u0026lt;tlsSecretName\u0026gt; default-ssl-certificate: oudns/oud-ds-rs-tls-cert service: # controller service external IP addresses # externalIPs: # - \u0026lt; External IP Address \u0026gt; # To configure Ingress Controller Service as LoadBalancer type of Service # Based on the Kubernetes configuration, External LoadBalancer would be linked to the Ingress Controller Service type: LoadBalancer # Configuration for NodePort to be used for Ports exposed through Ingress # If NodePorts are not defied/configured, Node Port would be assigend automatically by Kubernetes # These NodePorts are helpful while accessing services directly through Ingress and without having External Load Balancer. nodePorts: # For HTTP Interface exposed through LoadBalancer/Ingress http: 30080 # For HTTPS Interface exposed through LoadBalancer/Ingress https: 30443 tcp: # For LDAP Interface referring to LBR LDAP services serving LDAP port 1389: 31389 # For LDAPS Interface referring to LBR LDAP services serving LDAPS port 1636: 31636 # For LDAP Interface from specific service oud-ds-rs-ldap-0 3890: 30890 # For LDAPS Interface from specific service oud-ds-rs-ldap-0 6360: 30360 # For LDAP Interface from specific service oud-ds-rs-ldap-1 3891: 30891 # For LDAPS Interface from specific service oud-ds-rs-ldap-1 6361: 30361 # For LDAP Interface from specific service oud-ds-rs-ldap-2 3892: 30892 # For LDAPS Interface from specific service oud-ds-rs-ldap-2 6362: 30362 # For LDAPS Interface referring to LBR Admin services serving adminldaps port 1444: 31444 # For Admin LDAPS Interface from specific service oud-ds-rs-0 4440: 30440 # For Admin LDAPS Interface from specific service oud-ds-rs-1 4441: 30441 # For Admin LDAPS Interface from specific service oud-ds-rs-2 4442: 30442 The configuration above assumes that you have oud-ds-rs installed with value oud-ds-rs as a deployment/release name. Based on the deployment/release name in your environment, TCP port mapping may be required to be changed/updated. Access to Interfaces through Ingress Using the Helm chart, Ingress objects are also created according to configuration. The following table details the rules configured in Ingress object(s) for access to Oracle Unified Directory Interfaces through Ingress.\n Port NodePort Host Example Hostname Path Backend Service:Port Example Service Name:Port http/https 30080/30443 \u0026lt;deployment/release name\u0026gt;-admin-0 oud-ds-rs-admin-0 * \u0026lt;deployment/release name\u0026gt;-0:adminhttps oud-ds-rs-0:adminhttps http/https 30080/30443 \u0026lt;deployment/release name\u0026gt;-admin-N oud-ds-rs-admin-N * \u0026lt;deployment/release name\u0026gt;-N:adminhttps oud-ds-rs-1:adminhttps http/https 30080/30443 \u0026lt;deployment/release name\u0026gt;-admin oud-ds-rs-admin * \u0026lt;deployment/release name\u0026gt;-lbr-admin:adminhttps oud-ds-rs-lbr-admin:adminhttps http/https 30080/30443 * * /rest/v1/admin \u0026lt;deployment/release name\u0026gt;-lbr-admin:adminhttps oud-ds-rs-lbr-admin:adminhttps http/https 30080/30443 \u0026lt;deployment/release name\u0026gt;-http-0 oud-ds-rs-http-0 * \u0026lt;deployment/release name\u0026gt;-http-0:http oud-ds-rs-http-0:http http/https 30080/30443 \u0026lt;deployment/release name\u0026gt;-http-N oud-ds-rs-http-N * \u0026lt;deployment/release name\u0026gt;-http-N:http oud-ds-rs-http-N:http http/https 30080/30443 \u0026lt;deployment/release name\u0026gt;-http oud-ds-rs-http * \u0026lt;deployment/release name\u0026gt;-lbr-http:http oud-ds-rs-lbr-http:http http/https 30080/30443 * * /rest/v1/directory \u0026lt;deployment/release name\u0026gt;-lbr-http:http oud-ds-rs-lbr-http:http http/https 30080/30443 * * /iam/directory \u0026lt;deployment/release name\u0026gt;-lbr-http:http oud-ds-rs-lbr-http:http In the table above, example values are based on the value \u0026lsquo;oud-ds-rs\u0026rsquo; as the deployment/release name for Helm chart installation.The NodePorts mentioned in the table are according to Ingress configuration described in previous section.When External LoadBalancer is not available/configured, Interfaces can be accessed through NodePort on a Kubernetes Node.\n For LDAP/LDAPS access (based on the updated/upgraded configuration mentioned in previous section)\n Port NodePort Backend Service:Port Example Service Name:Port 1389 31389 \u0026lt;deployment/release name\u0026gt;-lbr-ldap:ldap oud-ds-rs-lbr-ldap:ldap 1636 31636 \u0026lt;deployment/release name\u0026gt;-lbr-ldap:ldap oud-ds-rs-lbr-ldap:ldaps 1444 31444 \u0026lt;deployment/release name\u0026gt;-lbr-admin:adminldaps oud-ds-rs-lbr-admin:adminldaps 3890 30890 \u0026lt;deployment/release name\u0026gt;-ldap-0:ldap oud-ds-rs-ldap-0:ldap 6360 30360 \u0026lt;deployment/release name\u0026gt;-ldap-0:ldaps oud-ds-rs-ldap-0:ldaps 3891 30891 \u0026lt;deployment/release name\u0026gt;-ldap-1:ldap oud-ds-rs-ldap-1:ldap 6361 30361 \u0026lt;deployment/release name\u0026gt;-ldap-1:ldaps oud-ds-rs-ldap-1:ldaps 3892 30892 \u0026lt;deployment/release name\u0026gt;-ldap-2:ldap oud-ds-rs-ldap-2:ldap 6362 30362 \u0026lt;deployment/release name\u0026gt;-ldap-2:ldaps oud-ds-rs-ldap-2:ldaps 4440 30440 \u0026lt;deployment/release name\u0026gt;-0:adminldaps oud-ds-rs-ldap-0:adminldaps 4441 30441 \u0026lt;deployment/release name\u0026gt;-1:adminldaps oud-ds-rs-ldap-1:adminldaps 4442 30442 \u0026lt;deployment/release name\u0026gt;-2:adminldaps oud-ds-rs-ldap-2:adminldaps In the table above, example values are based on value \u0026lsquo;oud-ds-rs\u0026rsquo; as the deployment/release name for helm chart installation. The NodePorts mentioned in the table are according to Ingress configuration described in previous section. When external LoadBalancer is not available/configured, Interfaces can be accessed through NodePort on a Kubernetes Node. Changes in /etc/hosts to validate hostname based Ingress rules If it is not possible to have a LoadBalancer configuration updated to have host names added for Oracle Unified Directory Interfaces then the following entries can be added in /etc/hosts files on host from where Oracle Unified Directory interfaces will be accessed.\n\u0026lt;IP Address of External LBR or Kubernetes Node\u0026gt;\toud-ds-rs-http oud-ds-rs-http-0 oud-ds-rs-http-1 oud-ds-rs-http-2 oud-ds-rs-http-N \u0026lt;IP Address of External LBR or Kubernetes Node\u0026gt;\toud-ds-rs-admin oud-ds-rs-admin-0 oud-ds-rs-admin-1 oud-ds-rs-admin-2 oud-ds-rs-admin-N In the table above, host names are based on the value \u0026lsquo;oud-ds-rs\u0026rsquo; as the deployment/release name for Helm chart installation. When External LoadBalancer is not available/configured, Interfaces can be accessed through NodePort on Kubernetes Node. Validate access HTTPS/REST API against External LBR Host:Port Note: For commands mentioned in this section you need to have an external IP assigned at Ingress level.\na) Command to invoke Data REST API:\n$curl --noproxy \u0026quot;*\u0026quot; --location \\ --request GET 'https://\u0026lt;External LBR Host\u0026gt;/rest/v1/directory/uid=user.1,ou=People,dc=example,dc=com?scope=sub\u0026amp;attributes=*' \\ --header 'Authorization: Basic \u0026lt;Base64 of userDN:userPassword\u0026gt;' | json_pp | json_pp is used to format output in readable json format on the client side. It can be ignored if you do not have the json_pp library. Base64 of userDN:userPassword can be generated using echo -n \u0026quot;userDN:userPassword\u0026quot; | base64 Output:\n{ \u0026#34;msgType\u0026#34; : \u0026#34;urn:ietf:params:rest:schemas:oracle:oud:1.0:SearchResponse\u0026#34;, \u0026#34;totalResults\u0026#34; : 1, \u0026#34;searchResultEntries\u0026#34; : [ { \u0026#34;dn\u0026#34; : \u0026#34;uid=user.1,ou=People,dc=example,dc=com\u0026#34;, \u0026#34;attributes\u0026#34; : { \u0026#34;st\u0026#34; : \u0026#34;OH\u0026#34;, \u0026#34;employeeNumber\u0026#34; : \u0026#34;1\u0026#34;, \u0026#34;postalCode\u0026#34; : \u0026#34;93694\u0026#34;, \u0026#34;description\u0026#34; : \u0026#34;This is the description for Aaren Atp.\u0026#34;, \u0026#34;telephoneNumber\u0026#34; : \u0026#34;+1 390 103 6917\u0026#34;, \u0026#34;homePhone\u0026#34; : \u0026#34;+1 280 375 4325\u0026#34;, \u0026#34;initials\u0026#34; : \u0026#34;ALA\u0026#34;, \u0026#34;objectClass\u0026#34; : [ \u0026#34;top\u0026#34;, \u0026#34;inetorgperson\u0026#34;, \u0026#34;organizationalperson\u0026#34;, \u0026#34;person\u0026#34; ], \u0026#34;uid\u0026#34; : \u0026#34;user.1\u0026#34;, \u0026#34;sn\u0026#34; : \u0026#34;Atp\u0026#34;, \u0026#34;street\u0026#34; : \u0026#34;70110 Fourth Street\u0026#34;, \u0026#34;mobile\u0026#34; : \u0026#34;+1 680 734 6300\u0026#34;, \u0026#34;givenName\u0026#34; : \u0026#34;Aaren\u0026#34;, \u0026#34;mail\u0026#34; : \u0026#34;user.1@maildomain.net\u0026#34;, \u0026#34;l\u0026#34; : \u0026#34;New Haven\u0026#34;, \u0026#34;postalAddress\u0026#34; : \u0026#34;Aaren Atp$70110 Fourth Street$New Haven, OH 93694\u0026#34;, \u0026#34;pager\u0026#34; : \u0026#34;+1 850 883 8888\u0026#34;, \u0026#34;cn\u0026#34; : \u0026#34;Aaren Atp\u0026#34; } } ] } b) Command to invoke Data REST API against specific Oracle Unified Directory Interface:\n$ curl --noproxy \u0026quot;*\u0026quot; --location \\ --request GET 'https://oud-ds-rs-http-0/rest/v1/directory/uid=user.1,ou=People,dc=example,dc=com?scope=sub\u0026amp;attributes=*' \\ --header 'Authorization: Basic \u0026lt;Base64 of userDN:userPassword\u0026gt;' | json_pp | json_pp is used to format output in readable json format on the client side. It can be ignored if you do not have the json_pp library. Base64 of userDN:userPassword can be generated using echo -n \u0026quot;userDN:userPassword\u0026quot; | base64. For this example, it is assumed that the value \u0026lsquo;oud-ds-rs\u0026rsquo; is used as the deployment/release name for helm chart installation. It is assumed that \u0026lsquo;oud-ds-rs-http-0\u0026rsquo; points to an External LoadBalancer HTTPS/REST API against Kubernetes NodePort for Ingress Controller Service a) Command to invoke Data SCIM API:\n$ curl --noproxy \u0026quot;*\u0026quot; --location \\ --request GET 'https://\u0026lt;Kubernetes Node\u0026gt;:30443/iam/directory/oud/scim/v1/Users' \\ --header 'Authorization: Basic \u0026lt;Base64 of userDN:userPassword\u0026gt;' | json_pp | json_pp is used to format output in readable json format on the client side. It can be ignored if you do not have the json_pp library. Base64 of userDN:userPassword can be generated using echo -n \u0026quot;userDN:userPassword\u0026quot; | base64. Output:\n{ \u0026#34;Resources\u0026#34; : [ { \u0026#34;id\u0026#34; : \u0026#34;ad55a34a-763f-358f-93f9-da86f9ecd9e4\u0026#34;, \u0026#34;userName\u0026#34; : [ { \u0026#34;value\u0026#34; : \u0026#34;user.0\u0026#34; } ], \u0026#34;schemas\u0026#34; : [ \u0026#34;urn:ietf:params:scim:schemas:core:2.0:User\u0026#34;, \u0026#34;urn:ietf:params:scim:schemas:extension:oracle:2.0:OUD:User\u0026#34;, \u0026#34;urn:ietf:params:scim:schemas:extension:enterprise:2.0:User\u0026#34; ], \u0026#34;meta\u0026#34; : { \u0026#34;location\u0026#34; : \u0026#34;http://idm-oke-lbr/iam/directory/oud/scim/v1/Users/ad55a34a-763f-358f-93f9-da86f9ecd9e4\u0026#34;, \u0026#34;resourceType\u0026#34; : \u0026#34;User\u0026#34; }, \u0026#34;addresses\u0026#34; : [ { \u0026#34;postalCode\u0026#34; : \u0026#34;50369\u0026#34;, \u0026#34;formatted\u0026#34; : \u0026#34;Aaccf Amar$01251 Chestnut Street$Panama City, DE 50369\u0026#34;, \u0026#34;streetAddress\u0026#34; : \u0026#34;01251 Chestnut Street\u0026#34;, \u0026#34;locality\u0026#34; : \u0026#34;Panama City\u0026#34;, \u0026#34;region\u0026#34; : \u0026#34;DE\u0026#34; } ], \u0026#34;urn:ietf:params:scim:schemas:extension:oracle:2.0:OUD:User\u0026#34; : { \u0026#34;description\u0026#34; : [ { \u0026#34;value\u0026#34; : \u0026#34;This is the description for Aaccf Amar.\u0026#34; } ], \u0026#34;mobile\u0026#34; : [ { \u0026#34;value\u0026#34; : \u0026#34;+1 010 154 3228\u0026#34; } ], \u0026#34;pager\u0026#34; : [ { \u0026#34;value\u0026#34; : \u0026#34;+1 779 041 6341\u0026#34; } ], \u0026#34;objectClass\u0026#34; : [ { \u0026#34;value\u0026#34; : \u0026#34;top\u0026#34; }, { \u0026#34;value\u0026#34; : \u0026#34;organizationalperson\u0026#34; }, { \u0026#34;value\u0026#34; : \u0026#34;person\u0026#34; }, { \u0026#34;value\u0026#34; : \u0026#34;inetorgperson\u0026#34; } ], \u0026#34;initials\u0026#34; : [ { \u0026#34;value\u0026#34; : \u0026#34;ASA\u0026#34; } ], \u0026#34;homePhone\u0026#34; : [ { \u0026#34;value\u0026#34; : \u0026#34;+1 225 216 5900\u0026#34; } ] }, \u0026#34;name\u0026#34; : [ { \u0026#34;givenName\u0026#34; : \u0026#34;Aaccf\u0026#34;, \u0026#34;familyName\u0026#34; : \u0026#34;Amar\u0026#34;, \u0026#34;formatted\u0026#34; : \u0026#34;Aaccf Amar\u0026#34; } ], \u0026#34;emails\u0026#34; : [ { \u0026#34;value\u0026#34; : \u0026#34;user.0@maildomain.net\u0026#34; } ], \u0026#34;phoneNumbers\u0026#34; : [ { \u0026#34;value\u0026#34; : \u0026#34;+1 685 622 6202\u0026#34; } ], \u0026#34;urn:ietf:params:scim:schemas:extension:enterprise:2.0:User\u0026#34; : { \u0026#34;employeeNumber\u0026#34; : [ { \u0026#34;value\u0026#34; : \u0026#34;0\u0026#34; } ] } } , . . . } b) Command to invoke Data SCIM API against specific Oracle Unified Directory Interface:\n$ curl --noproxy \u0026quot;*\u0026quot; --location \\ --request GET 'https://oud-ds-rs-http-0:30443/iam/directory/oud/scim/v1/Users' \\ --header 'Authorization: Basic \u0026lt;Base64 of userDN:userPassword\u0026gt;' | json_pp | json_pp is used to format output in readable json format on the client side. It can be ignored if you do not have the json_pp library. Base64 of userDN:userPassword can be generated using echo -n \u0026quot;userDN:userPassword\u0026quot; | base64. For this example, it is assumed that the value \u0026lsquo;oud-ds-rs\u0026rsquo; is used as the deployment/release name for helm chart installation. It is assumed that \u0026lsquo;oud-ds-rs-http-0\u0026rsquo; points to an External LoadBalancer HTTPS/REST Admin API a) Command to invoke Admin REST API against External LBR:\n$ curl --noproxy \u0026quot;*\u0026quot; --insecure --location \\ --request GET 'https://\u0026lt;External LBR Host\u0026gt;/rest/v1/admin/?scope=base\u0026amp;attributes=vendorName\u0026amp;attributes=vendorVersion\u0026amp;attributes=ds-private-naming-contexts\u0026amp;attributes=subschemaSubentry' \\ --header 'Content-Type: application/json' \\ --header 'Authorization: Basic \u0026lt;Base64 of userDN:userPassword\u0026gt;' | json_pp | json_pp is used to format output in readable json format on the client side. It can be ignored if you do not have the json_pp library. Base64 of userDN:userPassword can be generated using echo -n \u0026quot;userDN:userPassword\u0026quot; | base64. Output:\n{ \u0026#34;totalResults\u0026#34; : 1, \u0026#34;searchResultEntries\u0026#34; : [ { \u0026#34;dn\u0026#34; : \u0026#34;\u0026#34;, \u0026#34;attributes\u0026#34; : { \u0026#34;vendorVersion\u0026#34; : \u0026#34;Oracle Unified Directory 12.2.1.4.0\u0026#34;, \u0026#34;ds-private-naming-contexts\u0026#34; : [ \u0026#34;cn=admin data\u0026#34;, \u0026#34;cn=ads-truststore\u0026#34;, \u0026#34;cn=backups\u0026#34;, \u0026#34;cn=config\u0026#34;, \u0026#34;cn=monitor\u0026#34;, \u0026#34;cn=schema\u0026#34;, \u0026#34;cn=tasks\u0026#34;, \u0026#34;cn=virtual acis\u0026#34;, \u0026#34;dc=replicationchanges\u0026#34; ], \u0026#34;subschemaSubentry\u0026#34; : \u0026#34;cn=schema\u0026#34;, \u0026#34;vendorName\u0026#34; : \u0026#34;Oracle Corporation\u0026#34; } } ], \u0026#34;msgType\u0026#34; : \u0026#34;urn:ietf:params:rest:schemas:oracle:oud:1.0:SearchResponse\u0026#34; } b) Command to invoke Admin REST API against specific Oracle Unified Directory Admin Interface:\n$ curl --noproxy \u0026quot;*\u0026quot; --insecure --location \\ --request GET 'https://oud-ds-rs-admin-0/rest/v1/admin/?scope=base\u0026amp;attributes=vendorName\u0026amp;attributes=vendorVersion\u0026amp;attributes=ds-private-naming-contexts\u0026amp;attributes=subschemaSubentry' \\ --header 'Content-Type: application/json' \\ --header 'Authorization: Basic \u0026lt;Base64 of userDN:userPassword\u0026gt;' | json_pp | json_pp is used to format output in readable json format on the client side. It can be ignored if you do not have the json_pp library. Base64 of userDN:userPassword can be generated using echo -n \u0026quot;userDN:userPassword\u0026quot; | base64. c) Command to invoke Admin REST API against Kubernetes NodePort for Ingress Controller Service\n$ curl --noproxy \u0026quot;*\u0026quot; --insecure --location \\ --request GET 'https://oud-ds-rs-admin-0:30443/rest/v1/admin/?scope=base\u0026amp;attributes=vendorName\u0026amp;attributes=vendorVersion\u0026amp;attributes=ds-private-naming-contexts\u0026amp;attributes=subschemaSubentry' \\ --header 'Content-Type: application/json' \\ --header 'Authorization: Basic \u0026lt;Base64 of userDN:userPassword\u0026gt;' | json_pp | json_pp is used to format output in readable json format on the client side. It can be ignored if you do not have the json_pp library. Base64 of userDN:userPassword can be generated using echo -n \u0026quot;userDN:userPassword\u0026quot; | base64. LDAP against External Load Balancer a) Command to perform ldapsearch against External LBR and LDAP port\n$ \u0026lt;OUD Home\u0026gt;/bin/ldapsearch --hostname \u0026lt;External LBR\u0026gt; --port 1389 \\ -D \u0026quot;\u0026lt;Root User DN\u0026gt;\u0026quot; -w \u0026lt;Password for Root User DN\u0026gt; \\ -b \u0026quot;\u0026quot; -s base \u0026quot;(objectClass=*)\u0026quot; \u0026quot;*\u0026quot; Output:\ndn: objectClass: top objectClass: ds-root-dse lastChangeNumber: 0 firstChangeNumber: 0 changelog: cn=changelog entryDN: pwdPolicySubentry: cn=Default Password Policy,cn=Password Policies,cn=config subschemaSubentry: cn=schema supportedAuthPasswordSchemes: SHA256 supportedAuthPasswordSchemes: SHA1 supportedAuthPasswordSchemes: SHA384 supportedAuthPasswordSchemes: SHA512 supportedAuthPasswordSchemes: MD5 numSubordinates: 1 supportedFeatures: 1.3.6.1.1.14 supportedFeatures: 1.3.6.1.4.1.4203.1.5.1 supportedFeatures: 1.3.6.1.4.1.4203.1.5.2 supportedFeatures: 1.3.6.1.4.1.4203.1.5.3 lastExternalChangelogCookie: vendorName: Oracle Corporation vendorVersion: Oracle Unified Directory 12.2.1.4.0 componentVersion: 4 releaseVersion: 1 platformVersion: 0 supportedLDAPVersion: 2 supportedLDAPVersion: 3 supportedControl: 1.2.826.0.1.3344810.2.3 supportedControl: 1.2.840.113556.1.4.1413 supportedControl: 1.2.840.113556.1.4.319 supportedControl: 1.2.840.113556.1.4.473 supportedControl: 1.2.840.113556.1.4.805 supportedControl: 1.3.6.1.1.12 supportedControl: 1.3.6.1.1.13.1 supportedControl: 1.3.6.1.1.13.2 supportedControl: 1.3.6.1.4.1.26027.1.5.2 supportedControl: 1.3.6.1.4.1.26027.1.5.4 supportedControl: 1.3.6.1.4.1.26027.1.5.5 supportedControl: 1.3.6.1.4.1.26027.1.5.6 supportedControl: 1.3.6.1.4.1.26027.2.3.1 supportedControl: 1.3.6.1.4.1.26027.2.3.2 supportedControl: 1.3.6.1.4.1.26027.2.3.4 supportedControl: 1.3.6.1.4.1.42.2.27.8.5.1 supportedControl: 1.3.6.1.4.1.42.2.27.9.5.2 supportedControl: 1.3.6.1.4.1.42.2.27.9.5.8 supportedControl: 1.3.6.1.4.1.4203.1.10.1 supportedControl: 1.3.6.1.4.1.4203.1.10.2 supportedControl: 2.16.840.1.113730.3.4.12 supportedControl: 2.16.840.1.113730.3.4.16 supportedControl: 2.16.840.1.113730.3.4.17 supportedControl: 2.16.840.1.113730.3.4.18 supportedControl: 2.16.840.1.113730.3.4.19 supportedControl: 2.16.840.1.113730.3.4.2 supportedControl: 2.16.840.1.113730.3.4.3 supportedControl: 2.16.840.1.113730.3.4.4 supportedControl: 2.16.840.1.113730.3.4.5 supportedControl: 2.16.840.1.113730.3.4.9 supportedControl: 2.16.840.1.113894.1.8.21 supportedControl: 2.16.840.1.113894.1.8.31 supportedControl: 2.16.840.1.113894.1.8.36 maintenanceVersion: 2 supportedSASLMechanisms: PLAIN supportedSASLMechanisms: EXTERNAL supportedSASLMechanisms: CRAM-MD5 supportedSASLMechanisms: DIGEST-MD5 majorVersion: 12 orclGUID: D41D8CD98F003204A9800998ECF8427E entryUUID: d41d8cd9-8f00-3204-a980-0998ecf8427e ds-private-naming-contexts: cn=schema hasSubordinates: true nsUniqueId: d41d8cd9-8f003204-a9800998-ecf8427e structuralObjectClass: ds-root-dse supportedExtension: 1.3.6.1.4.1.4203.1.11.1 supportedExtension: 1.3.6.1.4.1.4203.1.11.3 supportedExtension: 1.3.6.1.1.8 supportedExtension: 1.3.6.1.4.1.26027.1.6.3 supportedExtension: 1.3.6.1.4.1.26027.1.6.2 supportedExtension: 1.3.6.1.4.1.26027.1.6.1 supportedExtension: 1.3.6.1.4.1.1466.20037 namingContexts: cn=changelog namingContexts: dc=example,dc=com b) Command to perform ldapsearch against External LBR and LDAP port for specific Oracle Unified Directory Interface\n$ \u0026lt;OUD Home\u0026gt;/bin/ldapsearch --hostname \u0026lt;External LBR\u0026gt; --port 3890 \\ -D \u0026quot;\u0026lt;Root User DN\u0026gt;\u0026quot; -w \u0026lt;Password for Root User DN\u0026gt; \\ -b \u0026quot;\u0026quot; -s base \u0026quot;(objectClass=*)\u0026quot; \u0026quot;*\u0026quot; LDAPS against Kubernetes NodePort for Ingress Controller Service a) Command to perform ldapsearch against External LBR and LDAP port\n$ \u0026lt;OUD Home\u0026gt;/bin/ldapsearch --hostname \u0026lt;Kubernetes Node\u0026gt; --port 31636 \\ --useSSL --trustAll \\ -D \u0026quot;\u0026lt;Root User DN\u0026gt;\u0026quot; -w \u0026lt;Password for Root User DN\u0026gt; \\ -b \u0026quot;\u0026quot; -s base \u0026quot;(objectClass=*)\u0026quot; \u0026quot;*\u0026quot; Output:\ndn: objectClass: top objectClass: ds-root-dse lastChangeNumber: 0 firstChangeNumber: 0 changelog: cn=changelog entryDN: pwdPolicySubentry: cn=Default Password Policy,cn=Password Policies,cn=config subschemaSubentry: cn=schema supportedAuthPasswordSchemes: SHA256 supportedAuthPasswordSchemes: SHA1 supportedAuthPasswordSchemes: SHA384 supportedAuthPasswordSchemes: SHA512 supportedAuthPasswordSchemes: MD5 numSubordinates: 1 supportedFeatures: 1.3.6.1.1.14 supportedFeatures: 1.3.6.1.4.1.4203.1.5.1 supportedFeatures: 1.3.6.1.4.1.4203.1.5.2 supportedFeatures: 1.3.6.1.4.1.4203.1.5.3 lastExternalChangelogCookie: vendorName: Oracle Corporation vendorVersion: Oracle Unified Directory 12.2.1.4.0 componentVersion: 4 releaseVersion: 1 platformVersion: 0 supportedLDAPVersion: 2 supportedLDAPVersion: 3 supportedControl: 1.2.826.0.1.3344810.2.3 supportedControl: 1.2.840.113556.1.4.1413 supportedControl: 1.2.840.113556.1.4.319 supportedControl: 1.2.840.113556.1.4.473 supportedControl: 1.2.840.113556.1.4.805 supportedControl: 1.3.6.1.1.12 supportedControl: 1.3.6.1.1.13.1 supportedControl: 1.3.6.1.1.13.2 supportedControl: 1.3.6.1.4.1.26027.1.5.2 supportedControl: 1.3.6.1.4.1.26027.1.5.4 supportedControl: 1.3.6.1.4.1.26027.1.5.5 supportedControl: 1.3.6.1.4.1.26027.1.5.6 supportedControl: 1.3.6.1.4.1.26027.2.3.1 supportedControl: 1.3.6.1.4.1.26027.2.3.2 supportedControl: 1.3.6.1.4.1.26027.2.3.4 supportedControl: 1.3.6.1.4.1.42.2.27.8.5.1 supportedControl: 1.3.6.1.4.1.42.2.27.9.5.2 supportedControl: 1.3.6.1.4.1.42.2.27.9.5.8 supportedControl: 1.3.6.1.4.1.4203.1.10.1 supportedControl: 1.3.6.1.4.1.4203.1.10.2 supportedControl: 2.16.840.1.113730.3.4.12 supportedControl: 2.16.840.1.113730.3.4.16 supportedControl: 2.16.840.1.113730.3.4.17 supportedControl: 2.16.840.1.113730.3.4.18 supportedControl: 2.16.840.1.113730.3.4.19 supportedControl: 2.16.840.1.113730.3.4.2 supportedControl: 2.16.840.1.113730.3.4.3 supportedControl: 2.16.840.1.113730.3.4.4 supportedControl: 2.16.840.1.113730.3.4.5 supportedControl: 2.16.840.1.113730.3.4.9 supportedControl: 2.16.840.1.113894.1.8.21 supportedControl: 2.16.840.1.113894.1.8.31 supportedControl: 2.16.840.1.113894.1.8.36 maintenanceVersion: 2 supportedSASLMechanisms: PLAIN supportedSASLMechanisms: EXTERNAL supportedSASLMechanisms: CRAM-MD5 supportedSASLMechanisms: DIGEST-MD5 majorVersion: 12 orclGUID: D41D8CD98F003204A9800998ECF8427E entryUUID: d41d8cd9-8f00-3204-a980-0998ecf8427e ds-private-naming-contexts: cn=schema hasSubordinates: true nsUniqueId: d41d8cd9-8f003204-a9800998-ecf8427e structuralObjectClass: ds-root-dse supportedExtension: 1.3.6.1.4.1.4203.1.11.1 supportedExtension: 1.3.6.1.4.1.4203.1.11.3 supportedExtension: 1.3.6.1.1.8 supportedExtension: 1.3.6.1.4.1.26027.1.6.3 supportedExtension: 1.3.6.1.4.1.26027.1.6.2 supportedExtension: 1.3.6.1.4.1.26027.1.6.1 supportedExtension: 1.3.6.1.4.1.1466.20037 namingContexts: cn=changelog namingContexts: dc=example,dc=com b) Command to perform ldapsearch against External LBR and LDAP port for specific Oracle Unified Directory Interface\n$ \u0026lt;OUD Home\u0026gt;/bin/ldapsearch --hostname \u0026lt;Kubernetes Node\u0026gt; --port 30360 \\ --useSSL --trustAll \\ -D \u0026quot;\u0026lt;Root User DN\u0026gt;\u0026quot; -w \u0026lt;Password for Root User DN\u0026gt; \\ -b \u0026quot;\u0026quot; -s base \u0026quot;(objectClass=*)\u0026quot; \u0026quot;*\u0026quot; Configuration Parameters The following table lists the configurable parameters of the oud-ds-rs chart and its default values.\n Parameter Description Default Value replicaCount Number of DS+RS instances/pods/services to be created with replication enabled against a base Oracle Unified Directory instance/pod. 3 restartPolicyName restartPolicy to be configured for each POD containing Oracle Unified Directory instance OnFailure image.repository Oracle Unified Directory Image Registry/Repository and name. Based on this, image parameter would be configured for Oracle Unified Directory pods/containers oracle/oud image.tag Oracle Unified Directory Image Tag. Based on this, image parameter would be configured for Oracle Unified Directory pods/containers 12.2.1.4.0 image.pullPolicy policy to pull the image IfnotPresent imagePullSecrets.name name of Secret resource containing private registry credentials regcred nameOverride override the fullname with this name fullnameOverride Overrides the fullname with the provided string serviceAccount.create Specifies whether a service account should be created true serviceAccount.name If not set and create is true, a name is generated using the fullname template oud-ds-rs-\u0026lt; fullname \u0026gt;-token-\u0026lt; randomalphanum \u0026gt; podSecurityContext Security context policies to add to the controller pod securityContext Security context policies to add by default service.type type of controller service to create ClusterIP nodeSelector node labels for pod assignment tolerations node taints to tolerate affinity node/pod affinities ingress.enabled true ingress.type Supported value: nginx nginx ingress.nginx.http.host Hostname to be used with Ingress Rules. If not set, hostname would be configured according to fullname. Hosts would be configured as \u0026lt; fullname \u0026gt;-http.\u0026lt; domain \u0026gt;, \u0026lt; fullname \u0026gt;-http-0.\u0026lt; domain \u0026gt;, \u0026lt; fullname \u0026gt;-http-1.\u0026lt; domain \u0026gt;, etc. ingress.nginx.http.domain Domain name to be used with Ingress Rules. In ingress rules, hosts would be configured as \u0026lt; host \u0026gt;.\u0026lt; domain \u0026gt;, \u0026lt; host \u0026gt;-0.\u0026lt; domain \u0026gt;, \u0026lt; host \u0026gt;-1.\u0026lt; domain \u0026gt;, etc. ingress.nginx.http.backendPort http ingress.nginx.http.nginxAnnotations { kubernetes.io/ingress.class: \u0026ldquo;nginx\u0026quot;} ingress.nginx.admin.host Hostname to be used with Ingress Rules. If not set, hostname would be configured according to fullname. Hosts would be configured as \u0026lt; fullname \u0026gt;-admin.\u0026lt; domain \u0026gt;, \u0026lt; fullname \u0026gt;-admin-0.\u0026lt; domain \u0026gt;, \u0026lt; fullname \u0026gt;-admin-1.\u0026lt; domain \u0026gt;, etc. ingress.nginx.admin.domain Domain name to be used with Ingress Rules. In ingress rules, hosts would be configured as \u0026lt; host \u0026gt;.\u0026lt; domain \u0026gt;, \u0026lt; host \u0026gt;-0.\u0026lt; domain \u0026gt;, \u0026lt; host \u0026gt;-1.\u0026lt; domain \u0026gt;, etc. ingress.nginx.admin.nginxAnnotations { kubernetes.io/ingress.class: \u0026ldquo;nginx\u0026rdquo; nginx.ingress.kubernetes.io/backend-protocol: \u0026ldquo;https\u0026quot;} ingress.ingress.tlsSecret Secret name to use an already created TLS Secret. If such secret is not provided, one would be created with name \u0026lt; fullname \u0026gt;-tls-cert. If the TLS Secret is in different namespace, name can be mentioned as \u0026lt; namespace \u0026gt;/\u0026lt; tlsSecretName \u0026gt; ingress.certCN Subject\u0026rsquo;s common name (cn) for SelfSigned Cert. \u0026lt; fullname \u0026gt; ingress.certValidityDays Validity of Self-Signed Cert in days 365 secret.enabled If enabled it will use the secret created with base64 encoding. if value is false, secret would not be used and input values (through \u0026ndash;set, \u0026ndash;values, etc.) would be used while creation of pods. true secret.name secret name to use an already created Secret oud-ds-rs-\u0026lt; fullname \u0026gt;-creds secret.type Specifies the type of the secret Opaque persistence.enabled If enabled, it will use the persistent volume. if value is false, PV and PVC would not be used and pods would be using the default emptyDir mount volume. true persistence.pvname pvname to use an already created Persistent Volume , If blank will use the default name oud-ds-rs-\u0026lt; fullname \u0026gt;-pv persistence.pvcname pvcname to use an already created Persistent Volume Claim , If blank will use default name oud-ds-rs-\u0026lt; fullname \u0026gt;-pvc persistence.type supported values: either filesystem or networkstorage or custom filesystem persistence.filesystem.hostPath.path The path location mentioned should be created and accessible from the local host provided with necessary privileges for the user. /scratch/shared/oud_user_projects persistence.networkstorage.nfs.path Path of NFS Share location /scratch/shared/oud_user_projects persistence.networkstorage.nfs.server IP or hostname of NFS Server 0.0.0.0 persistence.custom.* Based on values/data, YAML content would be included in PersistenceVolume Object persistence.accessMode Specifies the access mode of the location provided ReadWriteMany persistence.size Specifies the size of the storage 10Gi persistence.storageClass Specifies the storageclass of the persistence volume. empty persistence.annotations specifies any annotations that will be used { } configVolume.enabled If enabled, it will use the persistent volume. If value is false, PV and PVC would not be used and pods would be using the default emptyDir mount volume. true configVolume.mountPath If enabled, it will use the persistent volume. If value is false, PV and PVC would not be used and there would not be any mount point available for config false configVolume.pvname pvname to use an already created Persistent Volume , If blank will use the default name oud-ds-rs-\u0026lt; fullname \u0026gt;-pv-config configVolume.pvcname pvcname to use an already created Persistent Volume Claim , If blank will use default name oud-ds-rs-\u0026lt; fullname \u0026gt;-pvc-config configVolume.type supported values: either filesystem or networkstorage or custom filesystem configVolume.filesystem.hostPath.path The path location mentioned should be created and accessible from the local host provided with necessary privileges for the user. /scratch/shared/oud_user_projects configVolume.networkstorage.nfs.path Path of NFS Share location /scratch/shared/oud_config configVolume.networkstorage.nfs.server IP or hostname of NFS Server 0.0.0.0 configVolume.custom.* Based on values/data, YAML content would be included in PersistenceVolume Object configVolume.accessMode Specifies the access mode of the location provided ReadWriteMany configVolume.size Specifies the size of the storage 10Gi configVolume.storageClass Specifies the storageclass of the persistence volume. empty configVolume.annotations specifies any annotations that will be used { } oudPorts.adminldaps Port on which Oracle Unified Directory Instance in the container should listen for Administration Communication over LDAPS Protocol 1444 oudPorts.adminhttps Port on which Oracle Unified Directory Instance in the container should listen for Administration Communication over HTTPS Protocol. 1888 oudPorts.ldap Port on which Oracle Unified Directory Instance in the container should listen for LDAP Communication. 1389 oudPorts.ldaps Port on which Oracle Unified Directory Instance in the container should listen for LDAPS Communication. 1636 oudPorts.http Port on which Oracle Unified Directory Instance in the container should listen for HTTP Communication. 1080 oudPorts.https Port on which Oracle Unified Directory Instance in the container should listen for HTTPS Communication. 1081 oudPorts.replication Port value to be used while setting up replication server. 1898 oudConfig.baseDN BaseDN for Oracle Unified Directory Instances dc=example,dc=com oudConfig.rootUserDN Root User DN for Oracle Unified Directory Instances cn=Directory Manager oudConfig.rootUserPassword Password for Root User DN RandomAlphanum oudConfig.sampleData To specify that the database should be populated with the specified number of sample entries. 0 oudConfig.sleepBeforeConfig Based on the value for this parameter, initialization/configuration of each Oracle Unified Directory replica would be delayed. 120 oudConfig.adminUID AdminUID to be configured with each replicated Oracle Unified Directory instance admin oudConfig.adminPassword Password for AdminUID. If the value is not passed, value of rootUserPassword would be used as password for AdminUID. rootUserPassword baseOUD.envVarsConfigMap Reference to ConfigMap which can contain additional environment variables to be passed on to POD for Base Oracle Unified Directory Instance. Following are the environment variables which would not be honored from the ConfigMap. instanceType, sleepBeforeConfig, OUD_INSTANCE_NAME, hostname, baseDN, rootUserDN, rootUserPassword, adminConnectorPort, httpAdminConnectorPort, ldapPort, ldapsPort, httpPort, httpsPort, replicationPort, sampleData. - baseOUD.envVars Environment variables in Yaml Map format. This is helpful when its requried to pass environment variables through \u0026ndash;values file. List of env variables which would not be honored from envVars map is same as list of env var names mentioned for envVarsConfigMap. - replOUD.envVarsConfigMap Reference to ConfigMap which can contain additional environment variables to be passed on to PODs for Replicated Oracle Unified Directory Instances. Following are the environment variables which would not be honored from the ConfigMap. instanceType, sleepBeforeConfig, OUD_INSTANCE_NAME, hostname, baseDN, rootUserDN, rootUserPassword, adminConnectorPort, httpAdminConnectorPort, ldapPort, ldapsPort, httpPort, httpsPort, replicationPort, sampleData, sourceHost, sourceServerPorts, sourceAdminConnectorPort, sourceReplicationPort, dsreplication_1, dsreplication_2, dsreplication_3, dsreplication_4, post_dsreplication_dsconfig_1, post_dsreplication_dsconfig_2 - replOUD.envVars Environment variables in Yaml Map format. This is helpful when its required to pass environment variables through \u0026ndash;values file. List of env variables which would not be honored from envVars map is same as list of env var names mentioned for envVarsConfigMap. - replOUD.groupId Group ID to be used/configured with each Oracle Unified Directory instance in replicated topology. 1 elk.elasticsearch.enabled If enabled it will create the elastic search statefulset deployment false elk.elasticsearch.image.repository Elastic Search Image name/Registry/Repository . Based on this elastic search instances will be created docker.elastic.co/elasticsearch/elasticsearch elk.elasticsearch.image.tag Elastic Search Image tag .Based on this, image parameter would be configured for Elastic Search pods/instances 6.4.3 elk.elasticsearch.image.pullPolicy policy to pull the image IfnotPresent elk.elasticsearch.esreplicas Number of Elastic search Instances will be created 3 elk.elasticsearch.minimumMasterNodes The value for discovery.zen.minimum_master_nodes. Should be set to (esreplicas / 2) + 1. 2 elk.elasticsearch.esJAVAOpts Java options for Elasticsearch. This is where you should configure the jvm heap size -Xms512m -Xmx512m elk.elasticsearch.sysctlVmMaxMapCount Sets the sysctl vm.max_map_count needed for Elasticsearch 262144 elk.elasticsearch.resources.requests.cpu cpu resources requested for the elastic search 100m elk.elasticsearch.resources.limits.cpu total cpu limits that are configures for the elastic search 1000m elk.elasticsearch.esService.type Type of Service to be created for elastic search ClusterIP elk.elasticsearch.esService.lbrtype Type of load balancer Service to be created for elastic search ClusterIP elk.kibana.enabled If enabled it will create a kibana deployment false elk.kibana.image.repository Kibana Image Registry/Repository and name. Based on this Kibana instance will be created docker.elastic.co/kibana/kibana elk.kibana.image.tag Kibana Image tag. Based on this, Image parameter would be configured. 6.4.3 elk.kibana.image.pullPolicy policy to pull the image IfnotPresent elk.kibana.kibanaReplicas Number of Kibana instances will be created 1 elk.kibana.service.tye Type of service to be created NodePort elk.kibana.service.targetPort Port on which the kibana will be accessed 5601 elk.kibana.service.nodePort nodePort is the port on which kibana service will be accessed from outside 31119 elk.logstash.enabled If enabled it will create a logstash deployment false elk.logstash.image.repository logstash Image Registry/Repository and name. Based on this logstash instance will be created logstash elk.logstash.image.tag logstash Image tag. Based on this, Image parameter would be configured. 6.6.0 elk.logstash.image.pullPolicy policy to pull the image IfnotPresent elk.logstash.containerPort Port on which the logstash container will be running 5044 elk.logstash.service.tye Type of service to be created NodePort elk.logstash.service.targetPort Port on which the logstash will be accessed 9600 elk.logstash.service.nodePort nodePort is the port on which logstash service will be accessed from outside 32222 elk.logstash.logstashConfigMap Provide the configmap name which is already created with the logstash conf. if empty default logstash configmap will be created and used elk.elkPorts.rest Port for REST 9200 elk.elkPorts.internode port used for communication between the nodes 9300 elk.busybox.image busy box image name. Used for initcontianers busybox elk.elkVolume.enabled If enabled, it will use the persistent volume. if value is false, PV and pods would be using the default emptyDir mount volume. true elk.elkVolume.pvname pvname to use an already created Persistent Volume , If blank will use the default name oud-ds-rs-\u0026lt; fullname \u0026gt;-espv elk.elkVolume.type supported values: either filesystem or networkstorage or custom filesystem elk.elkVolume.filesystem.hostPath.path The path location mentioned should be created and accessible from the local host provided with necessary privileges for the user. /scratch/shared/oud_elk/data elk.elkVolume.networkstorage.nfs.path Path of NFS Share location /scratch/shared/oud_elk/data elk.elkVolume.networkstorage.nfs.server IP or hostname of NFS Server 0.0.0.0 elk.elkVolume.custom.* Based on values/data, YAML content would be included in PersistenceVolume Object elk.elkVolume.accessMode Specifies the access mode of the location provided ReadWriteMany elk.elkVolume.size Specifies the size of the storage 20Gi elk.elkVolume.storageClass Specifies the storageclass of the persistence volume. elk elk.elkVolume.annotations specifies any annotations that will be used { } " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oudsm/create-oudsm-instances/create-oudsm-instances-helm/oudsm/", + "title": "Helm Chart: oudsm: For deployment of Oracle Unified Directory Services Manager instances on Kubernetes", + "tags": [], + "description": "This document provides details of the oudsm Helm chart.", + "content": " Introduction Create Kubernetes Namespace Deploy oudsm Helm Chart Verify the Installation Ingress Controller Setup Ingress with NGINX Access to Interfaces through Ingress Configuration Parameters Introduction This Helm chart provides for the deployment of replicated Oracle Unified Directory Services Manager instances on Kubernetes.\nBased on the configuration, this chart deploys the following objects in the specified namespace of a Kubernetes cluster.\n Service Account Secret Persistent Volume and Persistent Volume Claim Pod(s)/Container(s) for Oracle Unified Directory Services Manager Instances Services for interfaces exposed through Oracle Unified Directory Services Manager Instances Ingress configuration Create Kubernetes Namespace Create a Kubernetes namespace to provide a scope for other objects such as pods and services that you create in the environment. To create your namespace issue the following command:\n$ kubectl create ns oudns namespace/oudns created Deploy oudsm Helm Chart Create/Deploy Oracle Unified Directory Services Manager instances along with Kubernetes objects in a specified namespace using the oudsm Helm Chart.\nThe deployment can be initiated by running the following Helm command with reference to the oudsm Helm Chart, along with configuration parameters according to your environment. Before deploying the Helm chart, the namespace should be created. Objects to be created by the Helm chart will be created inside the specified namespace.\ncd \u0026lt;work directory\u0026gt;/fmw-kubernetes/OracleUnifiedDirectorySM/kubernetes/helm $ helm install --namespace \u0026lt;namespace\u0026gt; \\ \u0026lt;Configuration Parameters\u0026gt; \\ \u0026lt;deployment/release name\u0026gt; \\ \u0026lt;Helm Chart Path/Name\u0026gt; Configuration Parameters (override values in chart) can be passed on with --set arguments on the command line and/or with -f / --values arguments when referring to files.\nExamples Example where configuration parameters are passed with --set argument: $ helm install --namespace oudns \\ --set oudsm.adminUser=weblogic,oudsm.adminPass=Oracle123,persistence.filesystem.hostPath.path=/scratch/shared/oudsm_user_projects,image.repository=oracle/oudsm,image.tag=12.2.1.4.0-PSU2020July-20200730 \\ oudsm oudsm For more details about the helm command and parameters, please execute helm --help and helm install --help. In this example, it is assumed that the command is executed from the directory containing the \u0026lsquo;oudsm\u0026rsquo; helm chart directory (OracleUnifiedDirectorySM/kubernetes/helm/). Example where configuration parameters are passed with --values argument: $ helm install --namespace oudns \\ --values oudsm-values-override.yaml \\ oudsm oudsm For more details about the helm command and parameters, please execute helm --help and helm install --help. In this example, it is assumed that the command is executed from the directory containing the \u0026lsquo;oudsm\u0026rsquo; helm chart directory (OracleUnifiedDirectorySM/kubernetes/helm/). The --values argument passes a file path/name which overrides default values in the chart. oudsm-values-override.yaml\noudsm: adminUser: weblogic adminPass: Oracle123 persistence: type: filesystem filesystem: hostPath: path: /scratch/shared/oudsm_user_projects Example to update/upgrade Helm Chart based deployment: $ helm upgrade --namespace oudns \\ --set oudsm.adminUser=weblogic,oudsm.adminPass=Oracle123,persistence.filesystem.hostPath.path=/scratch/shared/oudsm_user_projects,replicaCount=2 \\ oudsm oudsm For more details about the helm command and parameters, please execute helm --help and helm install --help. In this example, it is assumed that the command is executed from the directory containing the \u0026lsquo;oudsm\u0026rsquo; helm chart directory (OracleUnifiedDirectorySM/kubernetes/helm/). Example to apply new Oracle Unified Directory Services Manager patch through Helm Chart based deployment: In this example, we will apply PSU2020July-20200730 patch on earlier running Oracle Unified Directory Services Manager version. If we describe pod we will observe that the container is up with new version.\nWe have two ways to achieve our goal:\n$ helm upgrade --namespace oudns \\ --set image.repository=oracle/oudsm,image.tag=12.2.1.4.0-PSU2020July-20200730 \\ oudsm oudsm OR\n$ helm upgrade --namespace oudns \\ --values oudsm-values-override.yaml \\ oudsm oudsm For more details about the helm command and parameters, please execute helm --help and helm install --help. In this example, it is assumed that the command is executed from the directory containing the \u0026lsquo;oudsm\u0026rsquo; helm chart directory (OracleUnifiedDirectorySM/kubernetes/helm/). oudsm-values-override.yaml\nimage: repository: oracle/oudsm tag: 12.2.1.4.0-PSU2020July-20200730 Example for using NFS as PV Storage: $ helm install --namespace oudns \\ --values oudsm-values-override-nfs.yaml \\ oudsm oudsm For more details about the helm command and parameters, please execute helm --help and helm install --help. In this example, it is assumed that the command is executed from the directory containing the \u0026lsquo;oudsm\u0026rsquo; helm chart directory (OracleUnifiedDirectorySM/kubernetes/helm/). The --values argument passes a file path/name which overrides values in the chart. oudsm-values-override-nfs.yaml\noudsm: adminUser: weblogic adminPass: Oracle123 persistence: type: networkstorage networkstorage: nfs: path: /scratch/shared/oud_user_projects server: \u0026lt;NFS IP address\u0026gt; Example for using PV type of your choice: $ helm install --namespace oudns \\ --values oudsm-values-override-pv-custom.yaml \\ oudsm oudsm For more details about the helm command and parameters, please execute helm --help and helm install --help. In this example, it is assumed that the command is executed from the directory containing the \u0026lsquo;oudsm\u0026rsquo; helm chart directory (OracleUnifiedDirectorySM/kubernetes/helm/). The --values argument passes a file path/name which overrides values in the chart. oudsm-values-override-pv-custom.yaml\noudsm: adminUser: weblogic adminPass: Oracle123 persistence: type: custom custom: nfs: # Path of NFS Share location path: /scratch/shared/oudsm_user_projects # IP of NFS Server server: \u0026lt;NFS IP address\u0026gt; Under custom:, the configuration of your choice can be specified. This configuration will be used \u0026lsquo;as-is\u0026rsquo; for the PersistentVolume object.\n Check Deployment Output for the helm install/upgrade command Ouput similar to the following is observed following successful execution of helm install/upgrade command.\nNAME: oudsm LAST DEPLOYED: Wed Oct 14 06:22:10 2020 NAMESPACE: oudns STATUS: deployed REVISION: 1 TEST SUITE: None Check for the status of objects created through oudsm helm chart Command:\n$ kubectl --namespace oudns get nodes,pod,service,secret,pv,pvc,ingress -o wide Output is similar to the following:\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/oudsm-1 1/1 Running 0 22h 10.244.0.19 100.102.51.238 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oudsm-2 1/1 Running 0 22h 10.244.0.20 100.102.51.238 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR service/oudsm-1 ClusterIP 10.96.108.200 \u0026lt;none\u0026gt; 7001/TCP,7002/TCP 22h app.kubernetes.io/instance=oudsm,app.kubernetes.io/name=oudsm,oudsm/instance=oudsm-1 service/oudsm-2 ClusterIP 10.96.96.12 \u0026lt;none\u0026gt; 7001/TCP,7002/TCP 22h app.kubernetes.io/instance=oudsm,app.kubernetes.io/name=oudsm,oudsm/instance=oudsm-2 service/oudsm-lbr ClusterIP 10.96.41.201 \u0026lt;none\u0026gt; 7001/TCP,7002/TCP 22h app.kubernetes.io/instance=oudsm,app.kubernetes.io/name=oudsm NAME TYPE DATA AGE secret/default-token-w4jft kubernetes.io/service-account-token 3 32d secret/oudsm-creds opaque 2 22h secret/oudsm-token-ksr4g kubernetes.io/service-account-token 3 22h secret/sh.helm.release.v1.oudsm.v1 helm.sh/release.v1 1 22h secret/sh.helm.release.v1.oudsm.v2 helm.sh/release.v1 1 21h secret/sh.helm.release.v1.oudsm.v3 helm.sh/release.v1 1 19h NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE VOLUMEMODE persistentvolume/oudsm-pv 30Gi RWX Retain Bound myoudns/oudsm-pvc manual 22h Filesystem NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE VOLUMEMODE persistentvolumeclaim/oudsm-pvc Bound oudsm-pv 30Gi RWX manual 22h Filesystem NAME HOSTS ADDRESS PORTS AGE ingress.extensions/oudsm-ingress-nginx oudsm-1,oudsm-2,oudsm + 1 more... 100.102.51.230 80 19h Kubernetes Objects Kubernetes objects created by the Helm chart are detailed in the table below:\n Type Name Example Name Purpose Service Account \u0026lt;deployment/release name\u0026gt; oudsm Kubernetes Service Account for the Helm Chart deployment Secret \u0026lt;deployment/release name\u0026gt;-creds oudsm-creds Secret object for Oracle Unified Directory Services Manager related critical values like passwords Persistent Volume \u0026lt;deployment/release name\u0026gt;-pv oudsm-pv Persistent Volume for user_projects mount. Persistent Volume Claim \u0026lt;deployment/release name\u0026gt;-pvc oudsm-pvc Persistent Volume Claim for user_projects mount. Pod \u0026lt;deployment/release name\u0026gt;-N oudsm-1, oudsm-2, \u0026hellip; Pod(s)/Container(s) for Oracle Unified Directory Services Manager Instances Service \u0026lt;deployment/release name\u0026gt;-N oudsm-1, oudsm-2, \u0026hellip; Service(s) for HTTP and HTTPS interfaces from Oracle Unified Directory Services Manager instance \u0026lt;deployment/release name\u0026gt;-N Ingress \u0026lt;deployment/release name\u0026gt;-ingress-nginx oudsm-ingress-nginx Ingress Rules for HTTP and HTTPS interfaces. In the table above, the Example Name for each Object is based on the value \u0026lsquo;oudsm\u0026rsquo; as the deployment/release name for the Helm chart installation. Verify the Installation Ingress Controller Setup There are two types of Ingress controllers supported by this Helm chart. In the sub-sections below, configuration steps for each Controller are described.\nBy default Ingress configuration only supports HTTP and HTTPS Ports/Communication. To allow LDAP and LDAPS communication over TCP, configuration is required at Ingress Controller/Implementation level.\nIngress with NGINX Nginx-ingress controller implementation can be deployed/installed in Kubernetes environment.\nCreate a Kubernetes Namespace Create a Kubernetes namespace to provide a scope for NGINX objects such as pods and services that you create in the environment. To create your namespace issue the following command:\n$ kubectl create ns mynginx namespace/mynginx created Add Repo reference to helm for retriving/installing Chart for nginx-ingress implementation. $ helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx Confirm the charts available by issuing the following command:\n$ helm search repo | grep nginx ingress-nginx/ingress-nginx 4.0.1 1.0.0 Ingress controller for Kubernetes using NGINX a... stable/ingress-nginx 4.0.1 1.0.0 Ingress controller for Kubernetes using NGINX a... Command helm install to install nginx-ingress related objects like pod, service, deployment, etc. $ helm install --namespace default \\ --values nginx-ingress-values-override.yaml \\ lbr-nginx ingress-nginx/ingress-nginx --version=3.34.0 For more details about the helm command and parameters, please execute helm --help and helm install --help. The --values argument passes a file path/name which overrides values in the chart. nginx-ingress-values-override.yaml\ncontroller: admissionWebhooks: enabled: false extraArgs: # The secret referred to by this flag contains the default certificate to be used when accessing the catch-all server. # If this flag is not provided NGINX will use a self-signed certificate. # If the TLS Secret is in different namespace, name can be mentioned as \u0026lt;namespace\u0026gt;/\u0026lt;tlsSecretName\u0026gt; default-ssl-certificate=oudns/oudsm-tls-cert service: # controller service external IP addresses externalIPs: - \u0026lt; External IP Address \u0026gt; # To configure Ingress Controller Service as LoadBalancer type of Service # Based on the Kubernetes configuration, External LoadBalancer would be linked to the Ingress Controller Service type: LoadBalancer # Configuration for NodePort to be used for Ports exposed through Ingress # If NodePorts are not defined/configured, Node Port would be assigned automatically by Kubernetes # These NodePorts are helpful while accessing services directly through Ingress and without having External Load Balancer. nodePorts: # For HTTP Interface exposed through LoadBalancer/Ingress http: 30080 # For HTTPS Interface exposed through LoadBalancer/Ingress https: 30443 Access to Interfaces through Ingress With the helm chart, Ingress objects are also created according to configuration. Following are the rules configured in Ingress object(s) for access to Oracle Unified Directory Services Manager Interfaces through Ingress.\n Port NodePort Host Example Hostname Path Backend Service:Port Example Service Name:Port http/https 30080/30443 \u0026lt;deployment/release name\u0026gt;-N oudsm-N * \u0026lt;deployment/release name\u0026gt;-N:http oudsm-1:http http/https 30080/30443 * * /oudsm/console \u0026lt;deployment/release name\u0026gt;-lbr:http oudsm-lbr:http In the table above, the Example Name for each Object is based on the value \u0026lsquo;oudsm\u0026rsquo; as the deployment/release name for the Helm chart installation. NodePort mentioned in the table are according to Ingress configuration described in previous section. When an External LoadBalancer is not available/configured, Interfaces can be accessed through NodePort on the Kubernetes Node. Changes in /etc/hosts to validate hostname based Ingress rules In case, its not possible for you to have LoadBalancer configuration updated to have host names added for Oracle Unified Directory Services Manager Interfaces, following kind of entries can be added in /etc/hosts files on host from where Oracle Unified Directory Services Manager interfaces would be accessed.\n\u0026lt;IP Address of External LBR or Kubernetes Node\u0026gt;\toudsm oudsm-1 oudsm-2 oudsm-N In the table above, the Example Name for each Object is based on the value \u0026lsquo;oudsm\u0026rsquo; as the deployment/release name for the Helm chart installation. When an External LoadBalancer is not available/configured, Interfaces can be accessed through NodePort on the Kubernetes Node. Configuration Parameters The following table lists the configurable parameters of the Oracle Unified Directory Services Manager chart and their default values.\n Parameter Description Default Value replicaCount Number of Oracle Unified Directory Services Manager instances/pods/services to be created 1 restartPolicyName restartPolicy to be configured for each POD containing Oracle Unified Directory Services Manager instance OnFailure image.repository Oracle Unified Directory Services Manager Image Registry/Repository and name. Based on this, image parameter would be configured for Oracle Unified Directory Services Manager pods/containers oracle/oudsm image.tag Oracle Unified Directory Services Manager Image Tag. Based on this, image parameter would be configured for Oracle Unified Directory Services Manager pods/containers 12.2.1.4.0 image.pullPolicy policy to pull the image IfnotPresent imagePullSecrets.name name of Secret resource containing private registry credentials regcred nameOverride override the fullname with this name fullnameOverride Overrides the fullname with the provided string serviceAccount.create Specifies whether a service account should be created true serviceAccount.name If not set and create is true, a name is generated using the fullname template oudsm-\u0026lt; fullname \u0026gt;-token-\u0026lt; randomalphanum \u0026gt; podSecurityContext Security context policies to add to the controller pod securityContext Security context policies to add by default service.type type of controller service to create ClusterIP nodeSelector node labels for pod assignment tolerations node taints to tolerate affinity node/pod affinities ingress.enabled true ingress.type Supported value: nginx nginx ingress.host Hostname to be used with Ingress Rules. If not set, hostname would be configured according to fullname. Hosts would be configured as \u0026lt; fullname \u0026gt;-http.\u0026lt; domain \u0026gt;, \u0026lt; fullname \u0026gt;-http-0.\u0026lt; domain \u0026gt;, \u0026lt; fullname \u0026gt;-http-1.\u0026lt; domain \u0026gt;, etc. ingress.domain Domain name to be used with Ingress Rules. In ingress rules, hosts would be configured as \u0026lt; host \u0026gt;.\u0026lt; domain \u0026gt;, \u0026lt; host \u0026gt;-0.\u0026lt; domain \u0026gt;, \u0026lt; host \u0026gt;-1.\u0026lt; domain \u0026gt;, etc. ingress.backendPort http ingress.nginxAnnotations { kubernetes.io/ingress.class: \u0026ldquo;nginx\u0026quot;nginx.ingress.kubernetes.io/affinity-mode: \u0026ldquo;persistent\u0026rdquo; nginx.ingress.kubernetes.io/affinity: \u0026ldquo;cookie\u0026rdquo; } ingress.ingress.tlsSecret Secret name to use an already created TLS Secret. If such secret is not provided, one would be created with name \u0026lt; fullname \u0026gt;-tls-cert. If the TLS Secret is in different namespace, name can be mentioned as \u0026lt; namespace \u0026gt;/\u0026lt; tlsSecretName \u0026gt; ingress.certCN Subject\u0026rsquo;s common name (cn) for SelfSigned Cert. \u0026lt; fullname \u0026gt; ingress.certValidityDays Validity of Self-Signed Cert in days 365 secret.enabled If enabled it will use the secret created with base64 encoding. if value is false, secret would not be used and input values (through \u0026ndash;set, \u0026ndash;values, etc.) would be used while creation of pods. true secret.name secret name to use an already created Secret oudsm-\u0026lt; fullname \u0026gt;-creds secret.type Specifies the type of the secret Opaque persistence.enabled If enabled, it will use the persistent volume. if value is false, PV and PVC would not be used and pods would be using the default emptyDir mount volume. true persistence.pvname pvname to use an already created Persistent Volume , If blank will use the default name oudsm-\u0026lt; fullname \u0026gt;-pv persistence.pvcname pvcname to use an already created Persistent Volume Claim , If blank will use default name oudsm-\u0026lt; fullname \u0026gt;-pvc persistence.type supported values: either filesystem or networkstorage or custom filesystem persistence.filesystem.hostPath.path The path location mentioned should be created and accessible from the local host provided with necessary privileges for the user. /scratch/shared/oudsm_user_projects persistence.networkstorage.nfs.path Path of NFS Share location /scratch/shared/oudsm_user_projects persistence.networkstorage.nfs.server IP or hostname of NFS Server 0.0.0.0 persistence.custom.* Based on values/data, YAML content would be included in PersistenceVolume Object persistence.accessMode Specifies the access mode of the location provided ReadWriteMany persistence.size Specifies the size of the storage 10Gi persistence.storageClass Specifies the storageclass of the persistence volume. empty persistence.annotations specifies any annotations that will be used { } oudsm.adminUser Weblogic Administration User weblogic oudsm.adminPass Password for Weblogic Administration User oudsm.startupTime Expected startup time. After specified seconds readinessProbe would start 900 oudsm.livenessProbeInitialDelay Paramter to decide livenessProbe initialDelaySeconds 1200 elk.elasticsearch.enabled If enabled it will create the elastic search statefulset deployment false elk.elasticsearch.image.repository Elastic Search Image name/Registry/Repository . Based on this elastic search instances will be created docker.elastic.co/elasticsearch/elasticsearch elk.elasticsearch.image.tag Elastic Search Image tag .Based on this, image parameter would be configured for Elastic Search pods/instances 6.4.3 elk.elasticsearch.image.pullPolicy policy to pull the image IfnotPresent elk.elasticsearch.esreplicas Number of Elastic search Instances will be created 3 elk.elasticsearch.minimumMasterNodes The value for discovery.zen.minimum_master_nodes. Should be set to (esreplicas / 2) + 1. 2 elk.elasticsearch.esJAVAOpts Java options for Elasticsearch. This is where you should configure the jvm heap size -Xms512m -Xmx512m elk.elasticsearch.sysctlVmMaxMapCount Sets the sysctl vm.max_map_count needed for Elasticsearch 262144 elk.elasticsearch.resources.requests.cpu cpu resources requested for the elastic search 100m elk.elasticsearch.resources.limits.cpu total cpu limits that are configures for the elastic search 1000m elk.elasticsearch.esService.type Type of Service to be created for elastic search ClusterIP elk.elasticsearch.esService.lbrtype Type of load balancer Service to be created for elastic search ClusterIP elk.kibana.enabled If enabled it will create a kibana deployment false elk.kibana.image.repository Kibana Image Registry/Repository and name. Based on this Kibana instance will be created docker.elastic.co/kibana/kibana elk.kibana.image.tag Kibana Image tag. Based on this, Image parameter would be configured. 6.4.3 elk.kibana.image.pullPolicy policy to pull the image IfnotPresent elk.kibana.kibanaReplicas Number of Kibana instances will be created 1 elk.kibana.service.tye Type of service to be created NodePort elk.kibana.service.targetPort Port on which the kibana will be accessed 5601 elk.kibana.service.nodePort nodePort is the port on which kibana service will be accessed from outside 31119 elk.logstash.enabled If enabled it will create a logstash deployment false elk.logstash.image.repository logstash Image Registry/Repository and name. Based on this logstash instance will be created logstash elk.logstash.image.tag logstash Image tag. Based on this, Image parameter would be configured. 6.6.0 elk.logstash.image.pullPolicy policy to pull the image IfnotPresent elk.logstash.containerPort Port on which the logstash container will be running 5044 elk.logstash.service.tye Type of service to be created NodePort elk.logstash.service.targetPort Port on which the logstash will be accessed 9600 elk.logstash.service.nodePort nodePort is the port on which logstash service will be accessed from outside 32222 elk.logstash.logstashConfigMap Provide the configmap name which is already created with the logstash conf. if empty default logstash configmap will be created and used elk.elkPorts.rest Port for REST 9200 elk.elkPorts.internode port used for communication between the nodes 9300 elk.busybox.image busy box image name. Used for initcontianers busybox elk.elkVolume.enabled If enabled, it will use the persistent volume. if value is false, PV and pods would be using the default emptyDir mount volume. true elk.elkVolume.pvname pvname to use an already created Persistent Volume , If blank will use the default name oudsm-\u0026lt; fullname \u0026gt;-espv elk.elkVolume.type supported values: either filesystem or networkstorage or custom filesystem elk.elkVolume.filesystem.hostPath.path The path location mentioned should be created and accessible from the local host provided with necessary privileges for the user. /scratch/shared/oud_elk/data elk.elkVolume.networkstorage.nfs.path Path of NFS Share location /scratch/shared/oud_elk/data elk.elkVolume.networkstorage.nfs.server IP or hostname of NFS Server 0.0.0.0 elk.elkVolume.custom.* Based on values/data, YAML content would be included in PersistenceVolume Object elk.elkVolume.accessMode Specifies the access mode of the location provided ReadWriteMany elk.elkVolume.size Specifies the size of the storage 20Gi elk.elkVolume.storageClass Specifies the storageclass of the persistence volume. elk elk.elkVolume.annotations specifies any annotations that will be used { } " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oud/manage-oud-containers/logging-and-visualization/", + "title": "a) Logging and Visualization for Helm Chart oud-ds-rs Deployment", + "tags": [], + "description": "Describes the steps for logging and visualization with Elasticsearch and Kibana.", + "content": " Introduction Installation Enable Elasticsearch, Logstash, and Kibana Create Data Mount Points Configure Logstash Install or Upgrade Oracle Unified Directory Container with ELK Configuration Configure ElasticSearch Verify Using the Kibana Application Introduction This section describes how to install and configure logging and visualization for the oud-ds-rs Helm Chart deployment.\nThe ELK stack consists of Elasticsearch, Logstash, and Kibana. Using ELK we can gain insights in real-time from the log data from your applications.\n Elasticsearch is a distributed, RESTful search and analytics engine capable of solving a growing number of use cases. As the heart of the Elastic Stack, it centrally stores your data so you can discover the expected and uncover the unexpected. Logstash is an open source, server-side data processing pipeline that ingests data from a multitude of sources simultaneously, transforms it, and then sends it to your favorite “stash.” Kibana lets you visualize your Elasticsearch data and navigate the Elastic Stack. It gives you the freedom to select the way you give shape to your data. And you don’t always have to know what you\u0026rsquo;re looking for. Installation ELK can be enabled for environments created using the Helm charts provided with this project. The example below will demonstrate installation and configuration of ELK for the oud-ds-rs chart.\nEnable Elasticsearch, Logstash, and Kibana Edit logging-override-values.yaml and set the enabled flag for each component to \u0026lsquo;true\u0026rsquo;.\nelk: elasticsearch: enabled: true ... kibana: enabled: true ... logstash: enabled: true ... elkVolume: # If enabled, it will use the persistent volume. # if value is false, PV and PVC would not be used and there would not be any mount point available for config enabled: true type: networkstorage networkstorage: nfs: server: myserver path: /scratch/oud_elk_data Note: if elkVolume.enabled is set to \u0026lsquo;true\u0026rsquo; you should supply a directory for the ELK log files. The userid for the directory can be anything but it must have uid:guid as 1000:1000, which is the same as the ‘oracle’ user running in the container. This ensures the ‘oracle’ user has access to the shared volume/directory.\nInstall or Upgrade Oracle Unified Directory Container with ELK Configuration If you have not installed the oud-ds-rs chart then you should install with the following command, picking up the ELK configuration from the previous steps:\n$ helm install --namespace \u0026lt;namespace\u0026gt; --values \u0026lt;valuesfile.yaml\u0026gt; \u0026lt;releasename\u0026gt; oud-ds-rs For example:\n$ helm install --namespace oudns --values logging-override-values.yaml oud-ds-rs oud-ds-rs If the oud-ds-rs chart is already installed then update the configuration with the ELK configuration from the previous steps:\n$ helm upgrade --namespace \u0026lt;namespace\u0026gt; --values \u0026lt;valuesfile.yaml\u0026gt; \u0026lt;releasename\u0026gt; oud-ds-rs For example:\n$ helm upgrade --namespace oudns --values logging-override-values.yaml oud-ds-rs oud-ds-rs Configure ElasticSearch List the PODs in your namespace:\n$ kubectl get pods -o wide -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl get pods -o wide -n oudns Output will be similar to the following:\n$ kubectl get pods -o wide -n oudns NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES oud-ds-rs-0 1/1 Running 0 39m 10.244.1.107 10.89.73.203 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; oud-ds-rs-1 1/1 Running 0 39m 10.244.1.108 10.89.73.203 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; oud-ds-rs-2 1/1 Running 0 39m 10.244.1.106 10.89.73.203 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; oud-ds-rs-es-cluster-0 1/1 Running 0 39m 10.244.1.109 10.89.73.203 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; oud-ds-rs-kibana-665f9d5fb-pmz4v 1/1 Running 0 39m 10.244.1.110 10.89.73.203 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; oud-ds-rs-logstash-756fd7c5f5-kvwrw 1/1 Running 0 39m 10.244.2.103 10.89.73.204 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; From this, identify the ElastiSearch POD, oud-ds-rs-es-cluster-0.\nRun the port-forward command to allow ElasticSearch to be listening on port 9200:\n$ kubectl port-forward oud-ds-rs-es-cluster-0 9200:9200 --namespace=\u0026lt;namespace\u0026gt; \u0026amp; For example:\n$ kubectl port-forward oud-ds-rs-es-cluster-0 9200:9200 --namespace=oudns \u0026amp; [1] 98458 bash-4.2$ Forwarding from 127.0.0.1:9200 -\u0026gt; 9200 Forwarding from [::1]:9200 -\u0026gt; 9200 Verify that ElasticSearch is running by interrogating port 9200:\n$ curl http://localhost:9200 Handling connection for 9200 { \u0026quot;name\u0026quot; : \u0026quot;mike-oud-ds-rs-es-cluster-0\u0026quot;, \u0026quot;cluster_name\u0026quot; : \u0026quot;OUD-elk\u0026quot;, \u0026quot;cluster_uuid\u0026quot; : \u0026quot;H2EBtAlJQUGpV6IkS46Yzw\u0026quot;, \u0026quot;version\u0026quot; : { \u0026quot;number\u0026quot; : \u0026quot;6.4.3\u0026quot;, \u0026quot;build_flavor\u0026quot; : \u0026quot;default\u0026quot;, \u0026quot;build_type\u0026quot; : \u0026quot;tar\u0026quot;, \u0026quot;build_hash\u0026quot; : \u0026quot;fe40335\u0026quot;, \u0026quot;build_date\u0026quot; : \u0026quot;2018-10-30T23:17:19.084789Z\u0026quot;, \u0026quot;build_snapshot\u0026quot; : false, \u0026quot;lucene_version\u0026quot; : \u0026quot;7.4.0\u0026quot;, \u0026quot;minimum_wire_compatibility_version\u0026quot; : \u0026quot;5.6.0\u0026quot;, \u0026quot;minimum_index_compatibility_version\u0026quot; : \u0026quot;5.0.0\u0026quot; }, \u0026quot;tagline\u0026quot; : \u0026quot;You Know, for Search\u0026quot; } Verify Using the Kibana Application List the Kibana application service using the following command:\n$ kubectl get svc -o wide -n \u0026lt;namespace\u0026gt; | grep kibana For example:\n$ kubectl get svc -o wide -n oudns | grep kibana Output will be similar to the following:\noud-ds-rs-kibana NodePort 10.103.169.218 \u0026lt;none\u0026gt; 5601:31199/TCP 67m app=kibana In this example, the port to access Kibana application via a Web browser will be 31199.\nEnter the following URL in a browser to access the Kibana application:\nhttp://\u0026lt;hostname\u0026gt;:\u0026lt;NodePort\u0026gt;/app/kibana\nFor example:\nhttp://myserver:31199/app/kibana\nFrom the Kibana Portal navigate to:\nManagement -\u0026gt; Index Patterns\nCreate an Index Pattern using the pattern \u0026lsquo;*\u0026rsquo;\nNavigate to Discover : from here you should be able to see logs from the Oracle Unified Directory environment.\n" +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oudsm/manage-oudsm-containers/logging-and-visualization/", + "title": "a) Logging and Visualization for Helm Chart oudsm Deployment", + "tags": [], + "description": "Describes the steps for logging and visualization with Elasticsearch and Kibana.", + "content": " Introduction Installation Enable Elasticsearch, Logstash, and Kibana Create Data Mount Points Configure Logstash Install or Upgrade Oracle Unified Directory Services Manager Container with ELK Configuration Configure ElasticSearch Verify Using the Kibana Application Introduction This section describes how to install and configure logging and visualization for the oudsm Helm Chart deployment.\nThe ELK stack consists of Elasticsearch, Logstash, and Kibana. Using ELK we can gain insights in real-time from the log data from your applications.\n Elasticsearch is a distributed, RESTful search and analytics engine capable of solving a growing number of use cases. As the heart of the Elastic Stack, it centrally stores your data so you can discover the expected and uncover the unexpected. Logstash is an open source, server-side data processing pipeline that ingests data from a multitude of sources simultaneously, transforms it, and then sends it to your favorite “stash.” Kibana lets you visualize your Elasticsearch data and navigate the Elastic Stack. It gives you the freedom to select the way you give shape to your data. And you don’t always have to know what you\u0026rsquo;re looking for. Installation ELK can be enabled for environments created using the Helm charts provided with this project. The example below will demonstrate installation and configuration of ELK for the oudsm chart.\nEdit logging-override-values.yaml and set the enabled flag for each component to \u0026lsquo;true\u0026rsquo;.\nelk: elasticsearch: enabled: true ... kibana: enabled: true ... logstash: enabled: true ... elkVolume: # If enabled, it will use the persistent volume. # if value is false, PV and PVC would not be used and there would not be any mount point available for config enabled: true type: networkstorage networkstorage: nfs: server: myserver path: /scratch/oud_elk_data Note: If elkVolume.enabled is set to \u0026lsquo;true\u0026rsquo; you should supply a directory for the ELK log files. The userid for the directory can be anything but it must have uid:guid as 1000:1000, which is the same as the ‘oracle’ user running in the container. This ensures the ‘oracle’ user has access to the shared volume/directory.\nInstall or Upgrade Oracle Unified Directory Services Manager Container with ELK Configuration If you have not installed the oudsm chart then you should install with the following command, picking up the ELK configuration from the previous steps:\n$ helm install --namespace \u0026lt;namespace\u0026gt; --values \u0026lt;valuesfile.yaml\u0026gt; \u0026lt;releasename\u0026gt; oudsm For example:\n$ helm install --namespace oudsm --values logging-override-values.yaml my-oud-ds-rs oudsm If the oudsm chart is already installed then update the configuration with the ELK configuration from the previous steps:\n$ helm upgrade --namespace \u0026lt;namespace\u0026gt; --values \u0026lt;valuesfile.yaml\u0026gt; \u0026lt;releasename\u0026gt; oudsm For example:\n$ helm upgrade --namespace oudsm --values logging-override-values.yaml my-oud-ds-rs oudsm Configure ElasticSearch List the PODs in your namespace:\n$ kubectl get pods -o wide -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl get pods -o wide -n oudsm Output will be similar to the following:\n$ kubectl get pods -o wide -n oudsm NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES oudsm-1 1/1 Running 0 19m 10.244.1.66 10.89.73.203 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; oudsm-es-cluster-0 1/1 Running 0 19m 10.244.1.69 10.89.73.203 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; oudsm-es-cluster-1 1/1 Running 0 18m 10.244.2.125 10.89.73.204 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; oudsm-es-cluster-2 1/1 Running 0 17m 10.244.1.70 10.89.73.203 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; oudsm-kibana-6bbd487d66-dr662 1/1 Running 0 19m 10.244.1.68 10.89.73.203 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; oudsm-logstash-56f4665997-vbx4q 1/1 Running 0 19m 10.244.1.67 10.89.73.203 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; From this, identify the ElastiSearch POD, oudsm-es-cluster-0.\nRun the port-forward command to allow ElasticSearch to be listening on port 9200:\n$ kubectl port-forward oudsm-es-cluster-0 9200:9200 --namespace=\u0026lt;namespace\u0026gt; \u0026amp; For example:\n$ kubectl port-forward oudsm-es-cluster-0 9200:9200 --namespace=oudsm \u0026amp; [1] 98458 bash-4.2$ Forwarding from 127.0.0.1:9200 -\u0026gt; 9200 Forwarding from [::1]:9200 -\u0026gt; 9200 Verify that ElasticSearch is running by interrogating port 9200:\n$ curl http://localhost:9200 Handling connection for 9200 { \u0026quot;name\u0026quot; : \u0026quot;oudsm-es-cluster-0\u0026quot;, \u0026quot;cluster_name\u0026quot; : \u0026quot;OUD-elk\u0026quot;, \u0026quot;cluster_uuid\u0026quot; : \u0026quot;w5LKK98RRp-LMoCGA2AnsA\u0026quot;, \u0026quot;version\u0026quot; : { \u0026quot;number\u0026quot; : \u0026quot;6.4.3\u0026quot;, \u0026quot;build_flavor\u0026quot; : \u0026quot;default\u0026quot;, \u0026quot;build_type\u0026quot; : \u0026quot;tar\u0026quot;, \u0026quot;build_hash\u0026quot; : \u0026quot;fe40335\u0026quot;, \u0026quot;build_date\u0026quot; : \u0026quot;2018-10-30T23:17:19.084789Z\u0026quot;, \u0026quot;build_snapshot\u0026quot; : false, \u0026quot;lucene_version\u0026quot; : \u0026quot;7.4.0\u0026quot;, \u0026quot;minimum_wire_compatibility_version\u0026quot; : \u0026quot;5.6.0\u0026quot;, \u0026quot;minimum_index_compatibility_version\u0026quot; : \u0026quot;5.0.0\u0026quot; }, \u0026quot;tagline\u0026quot; : \u0026quot;You Know, for Search\u0026quot; } Verify Using the Kibana Application List the Kibana application service using the following command:\n$ kubectl get svc -o wide -n \u0026lt;namespace\u0026gt; | grep kibana For example:\n$ kubectl get svc -o wide -n oudsm | grep kibana Output will be similar to the following:\noudsm-kibana NodePort 10.103.92.84 \u0026lt;none\u0026gt; 5601:31199/TCP 21m app=kibana In this example, the port to access Kibana application via a Web browser will be 31199.\nEnter the following URL in a browser to access the Kibana application:\nhttp://\u0026lt;hostname\u0026gt;:\u0026lt;NodePort\u0026gt;/app/kibana\nFor example:\nhttp://myserver:31199/app/kibana\nFrom the Kibana Portal navigate to:\nManagement -\u0026gt; Index Patterns\nCreate an Index Pattern using the pattern \u0026lsquo;*\u0026rsquo;\nNavigate to Discover : from here you should be able to see logs from the Oracle Unified Directory Services Manager environment.\n" +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oud/manage-oud-containers/monitoring-oud-instance/", + "title": "b) Monitoring an Oracle Unified Directory Instance", + "tags": [], + "description": "Describes the steps for Monitoring the Oracle Unified Directory environment.", + "content": " Introduction Install Prometheus and Grafana Create a Kubernetes Namespace Add Prometheus and Grafana Helm Repositories Install the Prometheus Operator View Prometheus and Grafana Objects Created Add the NodePort Verify Using Grafana GUI Introduction After the Oracle Unified Directory instance is set up you can monitor it using Prometheus and Grafana.\nInstall Prometheus and Grafana Create a Kubernetes Namespace Create a Kubernetes namespace to provide a scope for Prometheus and Grafana objects such as pods and services that you create in the environment. To create your namespace issue the following command:\n$ kubectl create ns mypgns namespace/mypgns created Add Prometheus and Grafana Helm Repositories Add the Prometheus and Grafana Helm repositories by issuing the following commands:\n$ helm repo add prometheus https://prometheus-community.github.io/helm-charts \u0026quot;prometheus\u0026quot; has been added to your repositories $ helm repo add stable https://kubernetes-charts.storage.googleapis.com/ \u0026quot;stable\u0026quot; has been added to your repositories $ helm repo update Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the \u0026quot;prometheus\u0026quot; chart repository ...Successfully got an update from the \u0026quot;stable\u0026quot; chart repository Update Complete. Happy Helming! $ Install the Prometheus Operator Install the Prometheus Operator using the helm command:\n$ helm install \u0026lt;release_name\u0026gt; prometheus/kube-prometheus-stack grafana.adminPassword=\u0026lt;password\u0026gt; -n \u0026lt;namespace\u0026gt; For example:\n$ helm install mypg prometheus/kube-prometheus-stack grafana.adminPassword=\u0026lt;password\u0026gt; -n mypgns Output should be similar to the following:\nNAME: mypg LAST DEPLOYED: Mon Oct 12 02:05:41 2020 NAMESPACE: mypgns STATUS: deployed REVISION: 1 NOTES: kube-prometheus-stack has been installed. Check its status by running: kubectl --namespace mypgns get pods -l \u0026quot;release=mypg\u0026quot; Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create \u0026amp; configure Alertmanager and Prometheus instances using the Operator. View Prometheus and Grafana Objects Created View the objects created for Prometheus and Grafana by issuing the following command:\n$ kubectl get all,service,pod -o wide -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl get all,service,pod -o wide -n mypgns NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/alertmanager-mypg-kube-prometheus-stack-alertmanager-0 2/2 Running 0 25m 10.244.1.25 10.89.73.203 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/mypg-grafana-b7d4fbfb-jzccm 2/2 Running 0 25m 10.244.2.140 10.89.73.204 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/mypg-kube-prometheus-stack-operator-7fb485bbcd-lbh9d 2/2 Running 0 25m 10.244.2.139 10.89.73.204 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/mypg-kube-state-metrics-86dfdf9c75-nvbss 1/1 Running 0 25m 10.244.1.146 10.89.73.203 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/mypg-prometheus-node-exporter-29dzd 1/1 Running 0 25m 10.244.2.141 10.89.73.204 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/prometheus-mypg-kube-prometheus-stack-prometheus-0 3/3 Running 0 25m 10.244.2.140 10.89.73.203 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR service/alertmanager-operated ClusterIP None \u0026lt;none\u0026gt; 9093/TCP,9094/TCP,9094/UDP 25m app=alertmanager service/mypg-grafana ClusterIP 10.111.28.76 \u0026lt;none\u0026gt; 80/TCP 25m app.kubernetes.io/instance=mypg,app.kubernetes.io/name=grafana service/mypg-kube-prometheus-stack-alertmanager ClusterIP 10.103.83.97 \u0026lt;none\u0026gt; 9093/TCP 25m alertmanager=mypg-kube-prometheus-stack-alertmanager,app=alertmanager service/mypg-kube-prometheus-stack-operator ClusterIP 10.110.216.204 \u0026lt;none\u0026gt; 8080/TCP,443/TCP 25m app=kube-prometheus-stack-operator,release=mypg service/mypg-kube-prometheus-stack-prometheus ClusterIP 10.104.11.9 \u0026lt;none\u0026gt; 9090/TCP 25m app=prometheus,prometheus=mypg-kube-prometheus-stack-prometheus service/mypg-kube-state-metrics ClusterIP 10.109.172.125 \u0026lt;none\u0026gt; 8080/TCP 25m app.kubernetes.io/instance=mypg,app.kubernetes.io/name=kube-state-metrics service/mypg-prometheus-node-exporter ClusterIP 10.110.249.92 \u0026lt;none\u0026gt; 9100/TCP 25m app=prometheus-node-exporter,release=mypg service/prometheus-operated ClusterIP None \u0026lt;none\u0026gt; 9090/TCP 25m app=prometheus NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE CONTAINERS IMAGES SELECTOR daemonset.apps/mypg-prometheus-node-exporter 3 3 0 3 0 \u0026lt;none\u0026gt; 25m node-exporter quay.io/prometheus/node-exporter:v1.0.1 app=prometheus-node-exporter,release=mypg NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR deployment.apps/mypg-grafana 1/1 1 1 25m grafana-sc-dashboard,grafana kiwigrid/k8s-sidecar:0.1.209,grafana/grafana:7.2.0 app.kubernetes.io/instance=mypg,app.kubernetes.io/name=grafana deployment.apps/mypg-kube-prometheus-stack-operator 1/1 1 1 25m kube-prometheus-stack,tls-proxy quay.io/prometheus-operator/prometheus-operator:v0.42.1,squareup/ghostunnel:v1.5.2 app=kube-prometheus-stack-operator,release=mypg deployment.apps/mypg-kube-state-metrics 1/1 1 1 25m kube-state-metrics quay.io/coreos/kube-state-metrics:v1.9.7 app.kubernetes.io/name=kube-state-metrics NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR replicaset.apps/mypg-grafana-b7d4fbfb 1 1 1 25m grafana-sc-dashboard,grafana kiwigrid/k8s-sidecar:0.1.209,grafana/grafana:7.2.0 app.kubernetes.io/instance=mypg,app.kubernetes.io/name=grafana,pod-template-hash=b7d4fbfb replicaset.apps/mypg-kube-prometheus-stack-operator-7fb485bbcd 1 1 1 25m kube-prometheus-stack,tls-proxy quay.io/prometheus-operator/prometheus-operator:v0.42.1,squareup/ghostunnel:v1.5.2 app=kube-prometheus-stack-operator,pod-template-hash=7fb485bbcd,release=mypg replicaset.apps/mypg-kube-state-metrics-86dfdf9c75 1 1 1 25m kube-state-metrics quay.io/coreos/kube-state-metrics:v1.9.7 app.kubernetes.io/name=kube-state-metrics,pod-template-hash=86dfdf9c75 NAME READY AGE CONTAINERS IMAGES statefulset.apps/alertmanager-mypg-kube-prometheus-stack-alertmanager 1/1 25m alertmanager,config-reloader quay.io/prometheus/alertmanager:v0.21.0,jimmidyson/configmap-reload:v0.4.0 statefulset.apps/prometheus-mypg-kube-prometheus-stack-prometheus 0/1 25m prometheus,prometheus-config-reloader,rules-configmap-reloader quay.io/prometheus/prometheus:v2.21.0,quay.io/prometheus-operator/prometheus-config-reloader:v0.42.1,docker.io/jimmidyson/configmap-reload:v0.4.0 Add the NodePort Edit the grafana service to add the NodePort in the service.nodeport=\u0026lt;nodeport\u0026gt; and type=NodePort and save:\n$ kubectl edit service/prometheus-grafana -n \u0026lt;namespace\u0026gt; ports: - name: service nodePort: 30091 port: 80 protocol: TCP targetPort: 3000 selector: app.kubernetes.io/instance: prometheus-operator app.kubernetes.io/name: grafana sessionAffinity: None type: NodePort Verify Using Grafana GUI Access the Grafana GUI using http://\u0026lt;HostIP\u0026gt;:\u0026lt;nodeport\u0026gt; with the default username=admin and password=grafana.adminPassword:\nCheck the Prometheus datasource from the DataSource pane:\nAdd the customized k8cluster view dashboard json to view the cluster monitoring dashboard, by importing the following json file.\nDownload the JSON file from monitoring a Kubernetes cluster using Prometheus from https://grafana.com/grafana/dashboards/10856. Import the downloaded json using the import option.\nVerify your installation by viewing some of the customized dashboard views.\n" +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oudsm/manage-oudsm-containers/monitoring-oudsm-instance/", + "title": "b) Monitoring an Oracle Unified Directory Services Manager Instance", + "tags": [], + "description": "Describes the steps for Monitoring the Oracle Unified Directory Services Manager environment.", + "content": " Introduction Install Prometheus and Grafana Create a Kubernetes Namespace Add Prometheus and Grafana Helm Repositories Install the Prometheus Operator View Prometheus and Grafana Objects Created Add the NodePort Verify Using Grafana GUI Introduction After the Oracle Unified Directory Services Manager instance is set up you can monitor it using Prometheus and Grafana.\nInstall Prometheus and Grafana Create a Kubernetes Namespace Create a Kubernetes namespace to provide a scope for Prometheus and Grafana objects such as pods and services that you create in the environment. To create your namespace issue the following command:\n$ kubectl create ns mypgns namespace/mypgns created Add Prometheus and Grafana Helm Repositories Add the Prometheus and Grafana Helm repositories by issuing the following commands:\n$ helm repo add prometheus https://prometheus-community.github.io/helm-charts \u0026quot;prometheus\u0026quot; has been added to your repositories $ helm repo add stable https://kubernetes-charts.storage.googleapis.com/ \u0026quot;stable\u0026quot; has been added to your repositories $ helm repo update Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the \u0026quot;prometheus\u0026quot; chart repository ...Successfully got an update from the \u0026quot;stable\u0026quot; chart repository Update Complete. Happy Helming! $ Install the Prometheus Operator Install the Prometheus Operator using the helm command:\n$ helm install \u0026lt;release_name\u0026gt; prometheus/kube-prometheus-stack grafana.adminPassword=\u0026lt;password\u0026gt; -n \u0026lt;namespace\u0026gt; For example:\n$ helm install mypg prometheus/kube-prometheus-stack grafana.adminPassword=\u0026lt;password\u0026gt; -n mypgns Output should be similar to the following:\nNAME: mypg LAST DEPLOYED: Mon Oct 12 02:05:41 2020 NAMESPACE: mypgns STATUS: deployed REVISION: 1 NOTES: kube-prometheus-stack has been installed. Check its status by running: kubectl --namespace mypgns get pods -l \u0026quot;release=mypg\u0026quot; Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create \u0026amp; configure Alertmanager and Prometheus instances using the Operator. View Prometheus and Grafana Objects Created View the objects created for Prometheus and Grafana by issuing the following command:\n$ kubectl get all,service,pod -o wide -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl get all,service,pod -o wide -n mypgns NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/alertmanager-mypg-kube-prometheus-stack-alertmanager-0 2/2 Running 0 25m 10.244.1.25 10.89.73.203 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/mypg-grafana-b7d4fbfb-jzccm 2/2 Running 0 25m 10.244.2.140 10.89.73.204 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/mypg-kube-prometheus-stack-operator-7fb485bbcd-lbh9d 2/2 Running 0 25m 10.244.2.139 10.89.73.204 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/mypg-kube-state-metrics-86dfdf9c75-nvbss 1/1 Running 0 25m 10.244.1.146 10.89.73.203 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/mypg-prometheus-node-exporter-29dzd 1/1 Running 0 25m 10.244.2.141 10.89.73.204 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/prometheus-mypg-kube-prometheus-stack-prometheus-0 3/3 Running 0 25m 10.244.2.142 10.89.73.203 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR service/alertmanager-operated ClusterIP None \u0026lt;none\u0026gt; 9093/TCP,9094/TCP,9094/UDP 25m app=alertmanager service/mypg-grafana ClusterIP 10.111.28.76 \u0026lt;none\u0026gt; 80/TCP 25m app.kubernetes.io/instance=mypg,app.kubernetes.io/name=grafana service/mypg-kube-prometheus-stack-alertmanager ClusterIP 10.103.83.97 \u0026lt;none\u0026gt; 9093/TCP 25m alertmanager=mypg-kube-prometheus-stack-alertmanager,app=alertmanager service/mypg-kube-prometheus-stack-operator ClusterIP 10.110.216.204 \u0026lt;none\u0026gt; 8080/TCP,443/TCP 25m app=kube-prometheus-stack-operator,release=mypg service/mypg-kube-prometheus-stack-prometheus ClusterIP 10.104.11.9 \u0026lt;none\u0026gt; 9090/TCP 25m app=prometheus,prometheus=mypg-kube-prometheus-stack-prometheus service/mypg-kube-state-metrics ClusterIP 10.109.172.125 \u0026lt;none\u0026gt; 8080/TCP 25m app.kubernetes.io/instance=mypg,app.kubernetes.io/name=kube-state-metrics service/mypg-prometheus-node-exporter ClusterIP 10.110.249.92 \u0026lt;none\u0026gt; 9100/TCP 25m app=prometheus-node-exporter,release=mypg service/prometheus-operated ClusterIP None \u0026lt;none\u0026gt; 9090/TCP 25m app=prometheus NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE CONTAINERS IMAGES SELECTOR daemonset.apps/mypg-prometheus-node-exporter 3 3 0 3 0 \u0026lt;none\u0026gt; 25m node-exporter quay.io/prometheus/node-exporter:v1.0.1 app=prometheus-node-exporter,release=mypg NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR deployment.apps/mypg-grafana 1/1 1 1 25m grafana-sc-dashboard,grafana kiwigrid/k8s-sidecar:0.1.209,grafana/grafana:7.2.0 app.kubernetes.io/instance=mypg,app.kubernetes.io/name=grafana deployment.apps/mypg-kube-prometheus-stack-operator 1/1 1 1 25m kube-prometheus-stack,tls-proxy quay.io/prometheus-operator/prometheus-operator:v0.42.1,squareup/ghostunnel:v1.5.2 app=kube-prometheus-stack-operator,release=mypg deployment.apps/mypg-kube-state-metrics 1/1 1 1 25m kube-state-metrics quay.io/coreos/kube-state-metrics:v1.9.7 app.kubernetes.io/name=kube-state-metrics NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR replicaset.apps/mypg-grafana-b7d4fbfb 1 1 1 25m grafana-sc-dashboard,grafana kiwigrid/k8s-sidecar:0.1.209,grafana/grafana:7.2.0 app.kubernetes.io/instance=mypg,app.kubernetes.io/name=grafana,pod-template-hash=b7d4fbfb replicaset.apps/mypg-kube-prometheus-stack-operator-7fb485bbcd 1 1 1 25m kube-prometheus-stack,tls-proxy quay.io/prometheus-operator/prometheus-operator:v0.42.1,squareup/ghostunnel:v1.5.2 app=kube-prometheus-stack-operator,pod-template-hash=7fb485bbcd,release=mypg replicaset.apps/mypg-kube-state-metrics-86dfdf9c75 1 1 1 25m kube-state-metrics quay.io/coreos/kube-state-metrics:v1.9.7 app.kubernetes.io/name=kube-state-metrics,pod-template-hash=86dfdf9c75 NAME READY AGE CONTAINERS IMAGES statefulset.apps/alertmanager-mypg-kube-prometheus-stack-alertmanager 1/1 25m alertmanager,config-reloader quay.io/prometheus/alertmanager:v0.21.0,jimmidyson/configmap-reload:v0.4.0 statefulset.apps/prometheus-mypg-kube-prometheus-stack-prometheus 0/1 25m prometheus,prometheus-config-reloader,rules-configmap-reloader quay.io/prometheus/prometheus:v2.21.0,quay.io/prometheus-operator/prometheus-config-reloader:v0.42.1,docker.io/jimmidyson/configmap-reload:v0.4.0 Add the NodePort Edit the grafana service to add the NodePort in the service.nodeport=\u0026lt;nodeport\u0026gt; and type=NodePort and save:\n$ kubectl edit service/prometheus-grafana -n \u0026lt;namespace\u0026gt; ports: - name: service nodePort: 30091 port: 80 protocol: TCP targetPort: 3000 selector: app.kubernetes.io/instance: prometheus-operator app.kubernetes.io/name: grafana sessionAffinity: None type: NodePort Verify Using Grafana GUI Access the Grafana GUI using http://\u0026lt;HostIP\u0026gt;:\u0026lt;nodeport\u0026gt; with the default username=admin and password=grafana.adminPassword:\nCheck the Prometheus datasource from the DataSource pane:\nAdd the customized k8cluster view dashboard json to view the cluster monitoring dashboard, by importing the following json file.\nDownload the JSON file from monitoring a Kubernetes cluster using Prometheus from https://grafana.com/grafana/dashboards/10856. Import the downloaded json using the import option.\nVerify your installation by viewing some of the customized dashboard views.\n" +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oam/manage-oam-domains/domain-lifecycle/", + "title": "a. Domain Life Cycle", + "tags": [], + "description": "Learn about the domain life cycle of an OAM domain.", + "content": "As OAM domains use the WebLogic Kubernetes Operator, domain lifecyle operations are managed using the WebLogic Kubernetes Operator itself.\nThis document shows the basic operations for starting, stopping and scaling servers in the OAM domain.\nFor more detailed information refer to Domain Life Cycle in the WebLogic Kubernetes Operator documentation.\nDo not use the WebLogic Server Administration Console or Oracle Enterprise Manager Console to start or stop servers.\n View existing OAM servers The default OAM deployment starts the Administration Server (AdminServer), two OAM Managed Servers (oam_server1 and oam_server2) and two OAM Policy Manager server (oam_policy_mgr1 and oam_policy_mgr2 ).\nThe deployment also creates, but doesn\u0026rsquo;t start, three extra OAM Managed Servers (oam-server3 to oam-server5) and three more OAM Policy Manager servers (oam_policy_mgr3 to oam_policy_mgr5).\nAll these servers are visible in the WebLogic Server Console https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console by navigating to Domain Structure \u0026gt; oamcluster \u0026gt; Environment \u0026gt; Servers.\nTo view the running servers using kubectl, run the following command:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oamns The output should look similar to the following:\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Running 0 3h29m accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 3h36m accessdomain-oam-policy-mgr1 1/1 Running 0 3h21m accessdomain-oam-policy-mgr2 1/1 Running 0 3h21m accessdomain-oam-server1 1/1 Running 0 3h21m accessdomain-oam-server2 1/1 Running 0 3h21m helper 1/1 Running 0 3h51m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 55m Starting/Scaling up OAM Managed Servers The number of OAM Managed Servers running is dependent on the replicas parameter configured for the cluster. To start more OAM Managed Servers perform the following steps:\n Run the following kubectl command to edit the domain:\n$ kubectl edit domain \u0026lt;domain_uid\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl edit domain accessdomain -n oamns Note: This opens an edit session for the domain where parameters can be changed using standard vi commands.\n In the edit session search for clusterName: oam_cluster and look for the replicas parameter. By default the replicas parameter is set to \u0026ldquo;2\u0026rdquo; hence two OAM Managed Servers are started (oam_server1 and oam_server2):\n clusters: - clusterName: oam_cluster replicas: 2 serverPod: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - podAffinityTerm: labelSelector: matchExpressions: - key: weblogic.clusterName operator: In values: - $(CLUSTER_NAME) To start more OAM Managed Servers, increase the replicas value as desired. In the example below, two more managed servers will be started by setting replicas to \u0026ldquo;4\u0026rdquo;:\n clusters: - clusterName: oam_cluster replicas: 4 serverPod: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - podAffinityTerm: labelSelector: matchExpressions: - key: weblogic.clusterName operator: In values: - $(CLUSTER_NAME) Save the file and exit (:wq!)\nThe output will look similar to the following:\ndomain.weblogic.oracle/accessdomain edited Run the following kubectl command to view the pods:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oamns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Running 0 3h33m accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 3h40m accessdomain-oam-policy-mgr1 1/1 Running 0 3h25m accessdomain-oam-policy-mgr2 1/1 Running 0 3h25m accessdomain-oam-server1 1/1 Running 0 3h25m accessdomain-oam-server2 1/1 Running 0 3h25m accessdomain-oam-server3 0/1 Running 0 9s accessdomain-oam-server4 0/1 Running 0 9s helper 1/1 Running 0 3h55m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 59m Two new pods (accessdomain-oam-server3 and accessdomain-oam-server4) are started, but currently have a READY status of 0/1. This means oam_server3 and oam_server4 are not currently running but are in the process of starting. The servers will take several minutes to start so keep executing the command until READY shows 1/1:\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Running 0 3h37m accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 3h43m accessdomain-oam-policy-mgr1 1/1 Running 0 3h29m accessdomain-oam-policy-mgr2 1/1 Running 0 3h29m accessdomain-oam-server1 1/1 Running 0 3h29m accessdomain-oam-server2 1/1 Running 0 3h29m accessdomain-oam-server3 1/1 Running 0 3m45s accessdomain-oam-server4 1/1 Running 0 3m45s helper 1/1 Running 0 3h59m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 63m Note: To check what is happening during server startup when READY is 0/1, run the following command to view the log of the pod that is starting:\n$ kubectl logs \u0026lt;pod\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl logs accessdomain-oam-server3 -n oamns To start more OAM Policy Manager servers, repeat the previous commands but change the replicas parameter for the policy_cluster. In the example below replicas has been increased to \u0026ldquo;4\u0026rdquo;:\n - clusterName: policy_cluster replicas: 2 serverPod: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - podAffinityTerm: labelSelector: matchExpressions: - key: weblogic.clusterName operator: In values: - $(CLUSTER_NAME) After saving the changes two new pods will be started (accessdomain-oam-policy-mgr3 and accessdomain-oam-policy-mgr4). After a few minutes they will have a READY status of 1/1. In the example below accessdomain-oam-policy-mgr3 and accessdomain-oam-policy-mgr4 are started:\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Running 0 3h43m accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 3h49m accessdomain-oam-policy-mgr1 1/1 Running 0 3h35m accessdomain-oam-policy-mgr2 1/1 Running 0 3h35m accessdomain-oam-policy-mgr3 1/1 Running 0 4m18s accessdomain-oam-policy-mgr4 1/1 Running 0 4m18s accessdomain-oam-server1 1/1 Running 0 3h35m accessdomain-oam-server2 1/1 Running 0 3h35m accessdomain-oam-server3 1/1 Running 0 9m27s accessdomain-oam-server4 1/1 Running 0 9m27s helper 1/1 Running 0 4h4m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 69m Stopping/Scaling down OAM Managed Servers As mentioned in the previous section, the number of OAM Managed Servers running is dependent on the replicas parameter configured for the cluster. To stop one or more OAM Managed Servers, perform the following:\n Run the following kubectl command to edit the domain:\n$ kubectl edit domain \u0026lt;domain_uid\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl edit domain accessdomain -n oamns In the edit session search for clusterName: oam_cluster and look for the replicas parameter. In the example below replicas is set to \u0026ldquo;4\u0026rdquo;, hence four OAM Managed Servers are started (access-domain-oam_server1 - access-domain-oam_server4):\n clusters: - clusterName: oam_cluster replicas: 4 serverPod: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - podAffinityTerm: labelSelector: matchExpressions: - key: weblogic.clusterName operator: In values: - $(CLUSTER_NAME) To stop OAM Managed Servers, decrease the replicas value as desired. In the example below, we will stop two managed servers by setting replicas to \u0026ldquo;2\u0026rdquo;:\n clusters: - clusterName: oam_cluster replicas: 2 serverPod: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - podAffinityTerm: labelSelector: matchExpressions: - key: weblogic.clusterName operator: In values: - $(CLUSTER_NAME) Save the file and exit (:wq!)\n Run the following kubectl command to view the pods:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oamns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Running 0 3h45m accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 3h51m accessdomain-oam-policy-mgr1 1/1 Running 0 3h37m accessdomain-oam-policy-mgr2 1/1 Running 0 3h37m accessdomain-oam-policy-mgr3 1/1 Running 0 6m18s accessdomain-oam-policy-mgr4 1/1 Running 0 6m18s accessdomain-oam-server1 1/1 Running 0 3h37m accessdomain-oam-server2 1/1 Running 0 3h37m accessdomain-oam-server3 1/1 Running 0 11m accessdomain-oam-server4 1/1 Terminating 0 11m helper 1/1 Running 0 4h6m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 71m One pod now has a STATUS of Terminating (accessdomain-oam-server4). The server will take a minute or two to stop. Once terminated the other pod (accessdomain-oam-server3) will move to Terminating and then stop. Keep executing the command until the pods have disappeared:\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Running 0 3h48m accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 3h54m accessdomain-oam-policy-mgr1 1/1 Running 0 3h40m accessdomain-oam-policy-mgr2 1/1 Running 0 3h40m accessdomain-oam-policy-mgr3 1/1 Running 0 9m18s accessdomain-oam-policy-mgr4 1/1 Running 0 9m18s accessdomain-oam-server1 1/1 Running 0 3h40m accessdomain-oam-server2 1/1 Running 0 3h40m helper 1/1 Running 0 4h9m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 74m To stop OAM Policy Manager servers, repeat the previous commands but change the replicas parameter for the policy_cluster. In the example below replicas has been decreased from \u0026ldquo;4\u0026rdquo; to \u0026ldquo;2\u0026rdquo;:\n - clusterName: policy_cluster replicas: 1 serverPod: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - podAffinityTerm: labelSelector: matchExpressions: - key: weblogic.clusterName operator: In values: - $(CLUSTER_NAME) After saving the changes one pod will move to a STATUS of Terminating (accessdomain-oam-policy-mgr4).\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Running 0 3h49m accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 3h55m accessdomain-oam-policy-mgr1 1/1 Running 0 3h41m accessdomain-oam-policy-mgr2 1/1 Running 0 3h41m accessdomain-oam-policy-mgr3 1/1 Running 0 10m accessdomain-oam-policy-mgr4 1/1 Terminating 0 10m accessdomain-oam-server1 1/1 Running 0 3h41m accessdomain-oam-server2 1/1 Running 0 3h41m helper 1/1 Running 0 4h11m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 75m The pods will take a minute or two to stop, so keep executing the command until the pods has disappeared:\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Running 0 3h50m accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 3h57m accessdomain-oam-policy-mgr1 1/1 Running 0 3h42m accessdomain-oam-policy-mgr2 1/1 Running 0 3h42m accessdomain-oam-server1 1/1 Running 0 3h42m accessdomain-oam-server2 1/1 Running 0 3h42m helper 1/1 Running 0 4h12m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 76m Stopping and Starting the Administration Server and Managed Servers To stop all the OAM Managed Servers and the Administration Server in one operation:\n Run the following kubectl command to edit the domain:\n$ kubectl edit domain \u0026lt;domain_uid\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl edit domain accessdomain -n oamns In the edit session search for serverStartPolicy: IF_NEEDED:\n volumes: - name: weblogic-domain-storage-volume persistentVolumeClaim: claimName: accessdomain-domain-pvc serverStartPolicy: IF_NEEDED webLogicCredentialsSecret: name: accessdomain-credentials Change serverStartPolicy: IF_NEEDED to NEVER as follows:\n volumes: - name: weblogic-domain-storage-volume persistentVolumeClaim: claimName: accessdomain-domain-pvc serverStartPolicy: NEVER webLogicCredentialsSecret: name: accessdomain-credentials Save the file and exit (:wq!).\n Run the following kubectl command to view the pods:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oamns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Terminating 0 3h52m accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 3h59m accessdomain-oam-policy-mgr1 1/1 Terminating 0 3h44m accessdomain-oam-policy-mgr2 1/1 Terminating 0 3h44m accessdomain-oam-server1 1/1 Terminating 0 3h44m accessdomain-oam-server2 1/1 Terminating 0 3h44m helper 1/1 Running 0 4h14m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 78m The Administration Server pods and Managed Server pods will move to a STATUS of Terminating. After a few minutes, run the command again and the pods should have disappeared:\nNAME READY STATUS RESTARTS AGE accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 4h helper 1/1 Running 0 4h15m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 80m To start the Administration Server and Managed Servers up again, repeat the previous steps but change serverStartPolicy: NEVER to IF_NEEDED as follows:\n volumes: - name: weblogic-domain-storage-volume persistentVolumeClaim: claimName: accessdomain-domain-pvc serverStartPolicy: IF_NEEDED webLogicCredentialsSecret: name: accessdomain-credentials Run the following kubectl command to view the pods:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oamns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 4h1m accessdomain-introspector-jwqxw 1/1 Running 0 10s helper 1/1 Running 0 4h17m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 81m The Administration Server pod will start followed by the OAM Managed Servers pods. This process will take several minutes, so keep executing the command until all the pods are running with READY status 1/1 :\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Running 0 10m accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 4h12m accessdomain-oam-policy-mgr1 1/1 Running 0 7m35s accessdomain-oam-policy-mgr2 1/1 Running 0 7m35s accessdomain-oam-server1 1/1 Running 0 7m35s accessdomain-oam-server2 1/1 Running 0 7m35s helper 1/1 Running 0 4h28m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 92m " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oam/patch-and-upgrade/patch_an_image/", + "title": "a. Patch an image", + "tags": [], + "description": "Instructions on how to update your OAM Kubernetes cluster with a new OAM Docker image.", + "content": "To update your OAM Kubernetes cluster with a new OAM Docker image, first install the new Docker image on all nodes in your Kubernetes cluster.\nOnce the new image is installed, choose one of the following options to update your OAM kubernetes cluster to use the new image:\n Run the kubectl edit domain command Run the kubectl patch domain command In all of the above cases, the WebLogic Kubernetes Operator will restart the Administration Server pod first and then perform a rolling restart on the OAM Managed Servers.\nRun the kubectl edit domain command To update the domain with the kubectl edit domain command, run the following:\n$ kubectl edit domain \u0026lt;domainname\u0026gt; -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl edit domain accessdomain -n oamns Update the image tag to point at the new image, for example:\ndomainHomeInImage: false image: oracle/oam:12.2.1.4.0-new imagePullPolicy: IfNotPresent Save the file and exit (:wq!)\n Run the kubectl patch command To update the domain with the kubectl patch domain command, run the following:\n$ kubectl patch domain \u0026lt;domain\u0026gt; -n \u0026lt;namespace\u0026gt; --type merge -p \u0026#39;{\u0026#34;spec\u0026#34;:{\u0026#34;image\u0026#34;:\u0026#34;newimage:tag\u0026#34;}}\u0026#39; For example:\n$ kubectl patch domain accessdomain -n oamns --type merge -p \u0026#39;{\u0026#34;spec\u0026#34;:{\u0026#34;image\u0026#34;:\u0026#34;oracle/oam:12.2.1.4-new\u0026#34;}}\u0026#39; The output will look similar to the following:\ndomain.weblogic.oracle/accessdomain patched " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oig/patch-and-upgrade/patch_an_image/", + "title": "a. Patch an image", + "tags": [], + "description": "Instructions on how to update your OIG Kubernetes cluster with a new OIG docker image.", + "content": "To update your OIG Kubernetes cluster with a new OIG Docker image, first install the new Docker image on all nodes in your Kubernetes cluster.\nOnce the new image is installed choose one of the following options to update your OIG Kubernetes cluster to use the new image:\n Run the kubectl edit domain command Run the kubectl patch domain command In all of the above cases, the WebLogic Kubernetes Operator will restart the Administration Server pod first and then perform a rolling restart on the OIG Managed Servers.\nRun the kubectl edit domain command To update the domain with the kubectl edit domain command, run the following:\n$ kubectl edit domain \u0026lt;domainname\u0026gt; -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl edit domain governancedomain -n oigns Update the image tag to point at the new image, for example:\ndomainHomeInImage: false image: oracle/oig:12.2.1.4.0-new imagePullPolicy: IfNotPresent Save the file and exit (:wq!)\n Run the kubectl patch command To update the domain with the kubectl patch domain command, run the following:\n$ kubectl patch domain \u0026lt;domain\u0026gt; -n \u0026lt;namespace\u0026gt; --type merge -p \u0026#39;{\u0026#34;spec\u0026#34;:{\u0026#34;image\u0026#34;:\u0026#34;newimage:tag\u0026#34;}}\u0026#39; For example:\n$ kubectl patch domain governancedomain -n oigns --type merge -p \u0026#39;{\u0026#34;spec\u0026#34;:{\u0026#34;image\u0026#34;:\u0026#34;oracle/oig:12.2.1.4-new\u0026#34;}}\u0026#39; The output will look similar to the following:\ndomain.weblogic.oracle/governancedomain patched " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oig/post-install-config/set_oimfronendurl_using_mbeans/", + "title": "a. Post Install Tasks", + "tags": [], + "description": "Perform post install tasks.", + "content": "Follow these post install configuration steps.\n Create a Server Overrides File Set OIMFrontendURL using MBeans Create a Server Overrides File Navigate to the following directory:\ncd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv/output/weblogic-domains/governancedomain Create a setUserOverrides.sh with the following contents:\nDERBY_FLAG=false JAVA_OPTIONS=\u0026quot;${JAVA_OPTIONS} -Djava.net.preferIPv4Stack=true\u0026quot; MEM_ARGS=\u0026quot;-Xms8192m -Xmx8192m\u0026quot; Copy the setUserOverrides.sh file to the Administration Server pod:\n$ chmod 755 setUserOverrides.sh $ kubectl cp setUserOverrides.sh oigns/governancedomain-adminserver:/u01/oracle/user_projects/domains/governancedomain/bin/setUserOverrides.sh Where oigns is the OIG namespace and governancedomain is the domain_UID.\n Stop the OIG domain using the following command:\n$ kubectl -n \u0026lt;domain_namespace\u0026gt; patch domains \u0026lt;domain_uid\u0026gt; --type=\u0026#39;json\u0026#39; -p=\u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/serverStartPolicy\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;NEVER\u0026#34; }]\u0026#39; For example:\n$ kubectl -n oigns patch domains governancedomain --type=\u0026#39;json\u0026#39; -p=\u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/serverStartPolicy\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;NEVER\u0026#34; }]\u0026#39; The output will look similar to the following:\ndomain.weblogic.oracle/governancedomain patched Check that all the pods are stopped:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oigns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE governancedomain-adminserver 1/1 Terminating 0 18h governancedomain-create-fmw-infra-domain-job-8cww8 0/1 Completed 0 24h governancedomain-oim-server1 1/1 Terminating 0 18h governancedomain-soa-server1 1/1 Terminating 0 18h helper 1/1 Running 0 41h The Administration Server pods and Managed Server pods will move to a STATUS of Terminating. After a few minutes, run the command again and the pods should have disappeared:\nNAME READY STATUS RESTARTS AGE governancedomain-create-fmw-infra-domain-job-8cww8 0/1 Completed 0 24h helper 1/1 Running 0 41h Start the domain using the following command:\n$ kubectl -n \u0026lt;domain_namespace\u0026gt; patch domains \u0026lt;domain_uid\u0026gt; --type=\u0026#39;json\u0026#39; -p=\u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/serverStartPolicy\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;IF_NEEDED\u0026#34; }]\u0026#39; For example:\n$ kubectl -n oigns patch domains governancedomain --type=\u0026#39;json\u0026#39; -p=\u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/serverStartPolicy\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;IF_NEEDED\u0026#34; }]\u0026#39; Run the following kubectl command to view the pods:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oigns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE governancedomain-create-fmw -infra-domain-job-vj69h 0/1 Completed 0 24h governancedomain-introspect-domain-job-7qx29 1/1 Running 0 8s helper 1/1 Running 0 41h The Administration Server pod will start followed by the OIG Managed Servers pods. This process will take several minutes, so keep executing the command until all the pods are running with READY status 1/1:\nNAME READY STATUS RESTARTS AGE governancedomain-adminserver 1/1 Running 0 6m4s governancedomain-create-fmw-infra-domain-job-vj69h 0/1 Completed 0 24h governancedomain-oim-server1 1/1 Running 0 3m5s governancedomain-soa-server1 1/1 Running 0 3m5s helper 1/1 Running 0 41h Set OIMFrontendURL using MBeans Login to Oracle Enterprise Manager using the following URL:\nhttps://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/em\n Click the Target Navigation icon in the top left of the screen and navigate to the following:\n Expand Identity and Access \u0026gt; Access \u0026gt; OIM \u0026gt; oim Right click the instance oim and select System MBean Browser Under Application Defined MBeans, navigate to oracle.iam, Server:oim_server1 \u0026gt; Application:oim \u0026gt; XMLConfig \u0026gt; Config \u0026gt; XMLConfig.DiscoveryConfig \u0026gt; Discovery. Enter a new value for the OimFrontEndURL attribute, in the format:\nhttp://\u0026lt;OIM-Cluster-Service-Name\u0026gt;:\u0026lt;Cluster-Service-Port\u0026gt;\nFor example:\nhttp://governancedomain-cluster-oim-cluster:14000\nThen click Apply.\nNote: To find the \u0026lt;OIM-Cluster-Service-Name\u0026gt; run the following command:\n$ kubectl -n oigns get svc Your output will look similar to this:\nNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE governancedomain-adminserver ClusterIP None \u0026lt;none\u0026gt; 7001/TCP 9m41s governancedomain-cluster-oim-cluster ClusterIP 10.107.205.207 \u0026lt;none\u0026gt; 14002/TCP,14000/TCP 2d20h governancedomain-cluster-soa-cluster ClusterIP 10.102.221.184 \u0026lt;none\u0026gt; 8001/TCP 2d20h governancedomain-oim-server1 ClusterIP None \u0026lt;none\u0026gt; 14002/TCP,14000/TCP 6m58s governancedomain-oim-server2 ClusterIP 10.100.28.88 \u0026lt;none\u0026gt; 14002/TCP,14000/TCP 6m58s governancedomain-oim-server3 ClusterIP 10.99.226.29 \u0026lt;none\u0026gt; 14002/TCP,14000/TCP 6m58s governancedomain-oim-server4 ClusterIP 10.96.253.210 \u0026lt;none\u0026gt; 14002/TCP,14000/TCP 6m58s governancedomain-oim-server5 ClusterIP 10.98.66.13 \u0026lt;none\u0026gt; 14002/TCP,14000/TCP 6m58s governancedomain-soa-server1 ClusterIP None \u0026lt;none\u0026gt; 8001/TCP 6m58s governancedomain-soa-server2 ClusterIP 10.111.168.68 \u0026lt;none\u0026gt; 8001/TCP 6m58s governancedomain-soa-server3 ClusterIP 10.96.183.16 \u0026lt;none\u0026gt; 8001/TCP 6m58s governancedomain-soa-server4 ClusterIP 10.98.35.5 \u0026lt;none\u0026gt; 8001/TCP 6m58s governancedomain-soa-server5 ClusterIP 10.98.200.195 \u0026lt;none\u0026gt; 8001/TCP 6m58s " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-k8s/", + "title": "a. Using an Ingress with NGINX (non-SSL)", + "tags": [], + "description": "Steps to set up an Ingress for NGINX to direct traffic to the OIG domain (non-SSL).", + "content": "Setting up an ingress for NGINX for the OIG domain on Kubernetes (non-SSL) The instructions below explain how to set up NGINX as an ingress for the OIG domain with non-SSL termination.\nNote: All the steps below should be performed on the master node.\n Install NGINX\na. Configure the repository\nb. Create a namespace\nc. Install NGINX using helm\nd. Setup routing rules for the domain\n Create an ingress for the domain\n Verify that you can access the domain URL\n Install NGINX Use helm to install NGINX.\nConfigure the repository Add the Helm chart repository for NGINX using the following command:\n$ helm repo add stable https://kubernetes.github.io/ingress-nginx The output will look similar to the following:\n\u0026quot;stable\u0026quot; has been added to your repositories Update the repository using the following command:\n$ helm repo update The output will look similar to the following:\nHang tight while we grab the latest from your chart repositories... ...Successfully got an update from the \u0026quot;stable\u0026quot; chart repository Update Complete. Happy Helming! Create a namespace Create a Kubernetes namespace for NGINX by running the following command:\n$ kubectl create namespace nginx The output will look similar to the following:\nnamespace/nginx created Install NGINX using helm If you can connect directly to the master node IP address from a browser, then install NGINX with the --set controller.service.type=NodePort parameter.\nIf you are using a Managed Service for your Kubernetes cluster,for example Oracle Kubernetes Engine (OKE) on Oracle Cloud Infrastructure (OCI), and connect from a browser to the Load Balancer IP address, then use the --set controller.service.type=LoadBalancer parameter. This instructs the Managed Service to setup a Load Balancer to direct traffic to the NGINX ingress.\n To install NGINX use the following helm command depending on if you are using NodePort or LoadBalancer:\na) Using NodePort\n$ helm install nginx-ingress -n nginx --set controller.service.type=NodePort --set controller.admissionWebhooks.enabled=false stable/ingress-nginx Note: If using Kubernetes 1.18 then add --version=3.34.0 to the end of command.\nThe output will look similar to the following:\nNAME: nginx-ingress LAST DEPLOYED: Fri Nov 12 07:55:04 2021 NAMESPACE: nginx STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: The ingress-nginx controller has been installed. Get the application URL by running these commands: export HTTP_NODE_PORT=$(kubectl --namespace nginx get services -o jsonpath=\u0026quot;{.spec.ports[0].nodePort}\u0026quot; nginx-ingress-ingress-nginx-controller) export HTTPS_NODE_PORT=$(kubectl --namespace nginx get services -o jsonpath=\u0026quot;{.spec.ports[1].nodePort}\u0026quot; nginx-ingress-ingress-nginx-controller) export NODE_IP=$(kubectl --namespace nginx get nodes -o jsonpath=\u0026quot;{.items[0].status.addresses[1].address}\u0026quot;) echo \u0026quot;Visit http://$NODE_IP:$HTTP_NODE_PORT to access your application via HTTP.\u0026quot; echo \u0026quot;Visit https://$NODE_IP:$HTTPS_NODE_PORT to access your application via HTTPS.\u0026quot; An example Ingress that makes use of the controller: apiVersion: networking.k8s.io/v1 kind: Ingress metadata: annotations: kubernetes.io/ingress.class: nginx name: example namespace: foo spec: ingressClassName: example-class rules: - host: www.example.com http: paths: - path: / pathType: Prefix backend: service: name: exampleService port: 80 # This section is only required if TLS is to be enabled for the Ingress tls: - hosts: - www.example.com secretName: example-tls If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: apiVersion: v1 kind: Secret metadata: name: example-tls namespace: foo data: tls.crt: \u0026lt;base64 encoded cert\u0026gt; tls.key: \u0026lt;base64 encoded key\u0026gt; type: kubernetes.io/tls b) Using LoadBalancer\n$ helm install nginx-ingress -n nginx --set controller.service.type=LoadBalancer --set controller.admissionWebhooks.enabled=false stable/ingress-nginx Note: If using Kubernetes 1.18 then add --version=3.34.0 to the end of command.\nThe output will look similar to the following:\nNAME: nginx-ingress LAST DEPLOYED: Fri Nov 12 07:59:17 2021 NAMESPACE: nginx STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: The nginx-ingress controller has been installed. It may take a few minutes for the LoadBalancer IP to be available. You can watch the status by running 'kubectl --namespace nginx get services -o wide -w nginx-ingress-controller' An example Ingress that makes use of the controller: apiVersion: networking.k8s.io/v1 kind: Ingress metadata: annotations: kubernetes.io/ingress.class: nginx name: example namespace: foo spec: ingressClassName: example-class rules: - host: www.example.com http: paths: - path: / pathType: Prefix backend: service: name: exampleService port: 80 # This section is only required if TLS is to be enabled for the Ingress tls: - hosts: - www.example.com secretName: example-tls If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: apiVersion: v1 kind: Secret metadata: name: example-tls namespace: foo data: tls.crt: \u0026lt;base64 encoded cert\u0026gt; tls.key: \u0026lt;base64 encoded key\u0026gt; type: kubernetes.io/tls Setup routing rules for the domain Setup routing rules by running the following commands:\n$ cd $WORKDIR/kubernetes/charts/ingress-per-domain Edit values.yaml and change the domainUID parameter to match your domainUID, for example domainUID: governancedomain. Also change sslType to NONSSL. The file should look as follows:\n# Load balancer type. Supported values are: TRAEFIK, NGINX type: NGINX # Type of Configuration Supported Values are : NONSSL, SSL sslType: NONSSL # TLS secret name if the mode is SSL secretName: domain1-tls-cert #WLS domain as backend to the load balancer wlsDomain: domainUID: governancedomain adminServerName: AdminServer adminServerPort: 7001 soaClusterName: soa_cluster soaManagedServerPort: 8001 oimClusterName: oim_cluster oimManagedServerPort: 14000 Create an ingress for the domain Create an Ingress for the domain (governancedomain-nginx), in the domain namespace by using the sample Helm chart:\n$ cd $WORKDIR $ helm install governancedomain-nginx kubernetes/charts/ingress-per-domain --namespace \u0026lt;namespace\u0026gt; --values kubernetes/charts/ingress-per-domain/values.yaml Note: The \u0026lt;workdir\u0026gt;/samples/kubernetes/charts/ingress-per-domain/templates//nginx-ingress-k8s1.19.yaml and nginx-ingress.yaml has nginx.ingress.kubernetes.io/enable-access-log set to false. If you want to enable access logs then set this value to true before executing the command. Enabling access-logs can cause issues with disk space if not regularly maintained.\nFor example:\n$ cd $WORKDIR $ helm install governancedomain-nginx kubernetes/charts/ingress-per-domain --namespace oigns --values kubernetes/charts/ingress-per-domain/values.yaml The output will look similar to the following:\n$ helm install governancedomain-nginx kubernetes/charts/ingress-per-domain --namespace oigns --values kubernetes/charts/ingress-per-domain/values.yaml NAME: governancedomain-nginx LAST DEPLOYED: Fri Nov 12 08:14:53 2021 NAMESPACE: oigns STATUS: deployed REVISION: 1 TEST SUITE: None Run the following command to show the ingress is created successfully:\n$ kubectl get ing -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get ing -n oigns The output will look similar to the following:\nNAME CLASS HOSTS ADDRESS PORTS AGE governancedomain-nginx \u0026lt;none\u0026gt; * x.x.x.x 80 47s Find the NodePort of NGINX using the following command (only if you installed NGINX using NodePort):\n$ kubectl get services -n nginx -o jsonpath=”{.spec.ports[0].nodePort}” nginx-ingress-ingress-nginx-controller The output will look similar to the following:\n31530 Run the following command to check the ingress:\n$ kubectl describe ing governancedomain-ingress -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl describe ing governancedomain-nginx -n oigns The output will look similar to the following:\nName: governancedomain-nginx Namespace: oigns Address: Default backend: default-http-backend:80 (\u0026lt;error: endpoints \u0026quot;default-http-backend\u0026quot; not found\u0026gt;) Rules: Host Path Backends ---- ---- -------- * /console governancedomain-adminserver:7001 (10.244.2.59:7001) /em governancedomain-adminserver:7001 (10.244.2.59:7001) /soa governancedomain-cluster-soa-cluster:8001 (10.244.2.60:8001) /integration governancedomain-cluster-soa-cluster:8001 (10.244.2.60:8001) /soa-infra governancedomain-cluster-soa-cluster:8001 (10.244.2.60:8001) /identity governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) /admin governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) /oim governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) /sysadmin governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) /workflowservice governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) /xlWebApp governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) /Nexaweb governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) /callbackResponseService governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) /spml-xsd governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) /HTTPClnt governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) /reqsvc governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) /iam governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) /provisioning-callback governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) /CertificationCallbackService governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) /ucs governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) /FacadeWebApp governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) /OIGUI governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) /weblogic governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) Annotations: kubernetes.io/ingress.class: nginx meta.helm.sh/release-name: governancedomain-nginx meta.helm.sh/release-namespace: oigns nginx.ingress.kubernetes.io/affinity: cookie nginx.ingress.kubernetes.io/enable-access-log: false Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Sync 35s nginx-ingress-controller Scheduled for sync To confirm that the new ingress is successfully routing to the domain\u0026rsquo;s server pods, run the following command to send a request to the URL for the WebLogic ReadyApp framework:\n$ curl -v http://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/weblogic/ready For example:\na) For NodePort\n$ curl -v http://masternode.example.com:31530/weblogic/ready b) For LoadBalancer\n$ curl -v http://masternode.example.com:80/weblogic/ready The output will look similar to the following:\n$ curl -v http://masternode.example.com:31530/weblogic/ready * About to connect() to masternode.example.com port 31530 (#0) * Trying X.X.X.X... * Connected to masternode.example.com (X.X.X.X) port 31530 (#0) \u0026gt; GET /weblogic/ready HTTP/1.1 \u0026gt; User-Agent: curl/7.29.0 \u0026gt; Host: masternode.example.com:31530 \u0026gt; Accept: */* \u0026gt; \u0026lt; HTTP/1.1 200 OK \u0026lt; Server: nginx/1.19.2 \u0026lt; Date: Fri Nov 12 08:10:17 2021 \u0026lt; Content-Length: 0 \u0026lt; Connection: keep-alive \u0026lt; * Connection #0 to host masternode.example.com left intact Verify that you can access the domain URL After setting up the NGINX ingress, verify that the domain applications are accessible through the NGINX ingress port (for example 31530) as per Validate Domain URLs \n" +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oig/post-install-config/install_and_configure_connectors/", + "title": "b. Install and configure connectors", + "tags": [], + "description": "Install and Configure Connectors.", + "content": "Download the connector Download the Connector you are interested in from Oracle Identity Manager Connector Downloads.\n Copy the connector zip file to a staging directory on the master node e.g. \u0026lt;workdir\u0026gt;/stage and unzip it:\n$ cp $HOME/Downloads/\u0026lt;connector\u0026gt;.zip \u0026lt;workdir\u0026gt;/\u0026lt;stage\u0026gt;/ $ cd \u0026lt;workdir\u0026gt;/\u0026lt;stage\u0026gt; $ unzip \u0026lt;connector\u0026gt;.zip For example:\n$ cp $HOME/Downloads/Exchange-12.2.1.3.0.zip /scratch/OIGK8S/stage/ $ cd /scratch/OIGK8S/stage/ $ unzip exchange-12.2.1.3.0.zip Create a directory in the persistent volume On the master node run the following command to create a ConnectorDefaultDirectory:\n$ kubectl exec -ti governancedomain-oim-server1 -n \u0026lt;domain_namespace\u0026gt; -- mkdir -p /u01/oracle/user_projects/domains/ConnectorDefaultDirectory For example:\n$ kubectl exec -ti governancedomain-oim-server1 -n oigns -- mkdir -p /u01/oracle/user_projects/domains/ConnectorDefaultDirectory Note: This will create a directory in the persistent volume e:g /scratch/OIGK8S/governancedomainpv/ConnectorDefaultDirectory,\n Copy OIG connectors There are two options to copy OIG Connectors to your Kubernetes cluster:\n a) Copy the connector directly to the Persistent Volume b) Use the kubectl cp command to copy the connector to the Persistent Volume It is recommended to use option a), however there may be cases, for example when using a Managed Service such as Oracle Kubernetes Engine on Oracle Cloud Infrastructure, where it may not be feasible to directly mount the domain directory. In such cases option b) should be used.\na) Copy the connector directly to the persistent volume Copy the connector zip file to the persistent volume. For example:\n$ cp -R \u0026lt;path_to\u0026gt;/\u0026lt;connector\u0026gt; \u0026lt;workdir\u0026gt;/governancedomainpv/ConnectorDefaultDirectory/ For example:\n$ cp -R /scratch/OIGK8S/stage/Exchange-12.2.1.3.0 /scratch/OIGK8S/governancedomainpv/ConnectorDefaultDirectory/ b) Use the kubectl cp command to copy the connector to the persistent volume Run the following command to copy over the connector:\n$ kubectl -n \u0026lt;domain_namespace\u0026gt; cp \u0026lt;path_to\u0026gt;/\u0026lt;connector\u0026gt; \u0026lt;cluster_name\u0026gt;:/u01/oracle/idm/server/ConnectorDefaultDirectory/ For example:\n$ kubectl -n oigns cp /scratch/OIGK8S/stage/Exchange-12.2.1.3.0 governancedomain-oim-server1:/u01/oracle/idm/server/ConnectorDefaultDirectory/ Install the connector The connectors are installed as they are on a standard on-premises setup, via Application On Boarding or via Connector Installer.\nRefer to your Connector specific documentation for instructions.\n" +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oam/patch-and-upgrade/upgrade_an_operator_release/", + "title": "b. Upgrade an operator release", + "tags": [], + "description": "Instructions on how to update the WebLogic Kubernetes Operator version.", + "content": "These instructions apply to upgrading the operator within the 3.x release family as additional versions are released.\nThe new WebLogic Kubernetes Operator Docker image must be installed on the master node and each of the worker nodes in your Kubernetes cluster. Alternatively you can place the image in a Docker registry that your cluster can access.\n Pull the WebLogic Kubernetes Operator 3.X.X image by running the following command on the master node:\n$ docker pull ghcr.io/oracle/weblogic-kubernetes-operator:3.X.X where 3.X.X is the version of the operator you require.\n Run the docker tag command as follows:\n$ docker tag ghcr.io/oracle/weblogic-kubernetes-operator:3.X.X weblogic-kubernetes-operator:3.X.X where 3.X.X is the version of the operator downloaded.\nAfter installing the new WebLogic Kubernetes Operator Docker image, repeat the above on the worker nodes.\n On the master node, download the new WebLogic Kubernetes Operator source code from the operator github project:\n$ mkdir \u0026lt;workdir\u0026gt;/weblogic-kubernetes-operator-3.X.X $ cd \u0026lt;workdir\u0026gt;/weblogic-kubernetes-operator-3.X.X $ git clone https://github.com/oracle/weblogic-kubernetes-operator.git --branch v3.X.X For example:\n$ mkdir /scratch/OAMK8S/weblogic-kubernetes-operator-3.X.X $ cd /scratch/OAMK8S/weblogic-kubernetes-operator-3.X.X $ git clone https://github.com/oracle/weblogic-kubernetes-operator.git --branch v3.X.X This will create the directory \u0026lt;workdir\u0026gt;/weblogic-kubernetes-operator-3.X.X/weblogic-kubernetes-operator\n Run the following helm command to upgrade the operator:\n$ cd \u0026lt;workdir\u0026gt;/weblogic-kubernetes-operator-3.X.X/weblogic-kubernetes-operator $ helm upgrade --reuse-values --set image=weblogic-kubernetes-operator:3.X.X --namespace \u0026lt;sample-kubernetes-operator-ns\u0026gt; --wait weblogic-kubernetes-operator kubernetes/charts/weblogic-operator For example:\n$ cd /scratch/OAMK8S/weblogic-kubernetes-operator-3.X.X/weblogic-kubernetes-operator $ helm upgrade --reuse-values --set image=weblogic-kubernetes-operator:3.X.X --namespace opns --wait weblogic-kubernetes-operator kubernetes/charts/weblogic-operator The output will look similar to the following:\nRelease \u0026quot;weblogic-kubernetes-operator\u0026quot; has been upgraded. Happy Helming! NAME: weblogic-kubernetes-operator LAST DEPLOYED: Wed Nov 3 04:36:10 2021 NAMESPACE: opns STATUS: deployed REVISION: 3 TEST SUITE: None Verify that the operator\u0026rsquo;s pod and services are running by executing the following command:\n$ kubectl get all -n \u0026lt;sample-kubernetes-operator-ns\u0026gt; For example:\n$ kubectl get all -n opns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE pod/weblogic-operator-69546866bd-h58sk 2/2 Running 0 112s NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/internal-weblogic-operator-svc ClusterIP 10.106.72.42 \u0026lt;none\u0026gt; 8082/TCP 2d NAME READY UP-TO-DATE AVAILABLE AGE deployment.apps/weblogic-operator 1/1 1 1 2d NAME DESIRED CURRENT READY AGE replicaset.apps/weblogic-operator-676d5cc6f4 0 0 0 2d replicaset.apps/weblogic-operator-69546866bd 1 1 1 112s " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oig/patch-and-upgrade/upgrade_an_operator_release/", + "title": "b. Upgrade an operator release", + "tags": [], + "description": "Instructions on how to update the WebLogic Kubernetes Operator version.", + "content": "These instructions apply to upgrading operators within the 3.x release family as additional versions are released.\nThe new WebLogic Kubernetes Operator Docker image must be installed on the master node AND each of the worker nodes in your Kubernetes cluster. Alternatively you can place the image in a Docker registry that your cluster can access.\n Pull the WebLogic Kubernetes Operator 3.X.X image by running the following command on the master node:\n$ docker pull oracle/weblogic-kubernetes-operator:3.X.X where 3.X.X is the version of the operator you require.\n Run the docker tag command as follows:\n$ docker tag oracle/weblogic-kubernetes-operator:3.X.X weblogic-kubernetes-operator:3.X.X where 3.X.X is the version of the operator downloaded.\nAfter installing the new WebLogic Kubernetes Operator Docker image, repeat the above on the worker nodes.\n On the master node, download the new WebLogic Kubernetes Operator source code from the operator github project:\n$ mkdir \u0026lt;workdir\u0026gt;/weblogic-kubernetes-operator-3.X.X $ cd \u0026lt;workdir\u0026gt;/weblogic-kubernetes-operator-3.X.X $ git clone https://github.com/oracle/weblogic-kubernetes-operator.git --branch v3.X.X For example:\n$ mkdir /scratch/OIGK8S/weblogic-kubernetes-operator-3.X.X $ cd /scratch/OIGK8S/weblogic-kubernetes-operator-3.X.X $ git clone https://github.com/oracle/weblogic-kubernetes-operator.git --branch v3.X.X This will create the directory \u0026lt;workdir\u0026gt;/weblogic-kubernetes-operator-3.X.X/weblogic-kubernetes-operator\n Run the following helm command to upgrade the operator:\n$ cd \u0026lt;workdir\u0026gt;/weblogic-kubernetes-operator-3.X.X/weblogic-kubernetes-operator $ helm upgrade --reuse-values --set image=oracle/weblogic-kubernetes-operator:3.X.X --namespace \u0026lt;sample-kubernetes-operator-ns\u0026gt; --wait weblogic-kubernetes-operator kubernetes/charts/weblogic-operator For example:\n$ cd /scratch/OIGK8S/weblogic-kubernetes-operator-3.X.X/weblogic-kubernetes-operator $ helm upgrade --reuse-values --set image=oracle/weblogic-kubernetes-operator:3.X.X --namespace operator --wait weblogic-kubernetes-operator kubernetes/charts/weblogic-operator The output will look similar to the following:\nRelease \u0026quot;weblogic-kubernetes-operator\u0026quot; has been upgraded. Happy Helming! NAME: weblogic-kubernetes-operator LAST DEPLOYED: Mon Nov 15 09:24:40 2021 NAMESPACE: operator STATUS: deployed REVISION: 3 TEST SUITE: None " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-k8s-ssl/", + "title": "b. Using an Ingress with NGINX (SSL)", + "tags": [], + "description": "Steps to set up an Ingress for NGINX to direct traffic to the OIG domain (SSL).", + "content": "Setting up an ingress for NGINX for the OIG domain on Kubernetes The instructions below explain how to set up NGINX as an ingress for the OIG domain with SSL termination.\nNote: All the steps below should be performed on the master node.\n Create a SSL certificate\na. Generate SSL certificate\nb. Create a Kubernetes secret for SSL\n Install NGINX\na. Configure the repository\nb. Create a namespace\nc. Install NGINX using helm\n Create an ingress for the domain\n Verify that you can access the domain URL\n Create a SSL certificate Generate SSL certificate Generate a private key and certificate signing request (CSR) using a tool of your choice. Send the CSR to your certificate authority (CA) to generate the certificate.\nIf you want to use a certificate for testing purposes you can generate a self signed certificate using openssl:\n$ mkdir \u0026lt;workdir\u0026gt;/ssl $ cd \u0026lt;workdir\u0026gt;/ssl $ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj \u0026#34;/CN=\u0026lt;nginx-hostname\u0026gt;\u0026#34; For example:\n$ mkdir /scratch/OIGK8S/ssl $ cd /scratch/OIGK8S/ssl $ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj \u0026#34;/CN=masternode.example.com\u0026#34; Note: The CN should match the host.domain of the master node in order to prevent hostname problems during certificate verification.\nThe output will look similar to the following:\nGenerating a 2048 bit RSA private key ..........................................+++ .......................................................................................................+++ writing new private key to 'tls.key' ----- Create a Kubernetes secret for SSL Create a secret for SSL containing the SSL certificate by running the following command:\n$ kubectl -n oigns create secret tls \u0026lt;domain_uid\u0026gt;-tls-cert --key \u0026lt;workdir\u0026gt;/tls.key --cert \u0026lt;workdir\u0026gt;/tls.crt For example:\n$ kubectl -n oigns create secret tls governancedomain-tls-cert --key /scratch/OIGK8S/ssl/tls.key --cert /scratch/OIGK8S/ssl/tls.crt The output will look similar to the following:\nsecret/governancedomain-tls-cert created Confirm that the secret is created by running the following command:\n$ kubectl get secret \u0026lt;domain_uid\u0026gt;-tls-cert -o yaml -n oigns For example:\n$ kubectl get secret governancedomain-tls-cert -o yaml -n oigns The output will look similar to the following:\napiVersion: v1 data: tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURGVENDQWYyZ0F3SUJBZ0lKQUl3ZjVRMWVxZnljTUEwR0NTcUdTSWIzRFFFQkN3VUFNQ0V4SHpBZEJnTlYKQkFNTUZtUmxiakF4WlhadkxuVnpMbTl5WVdOc1pTNWpiMjB3SGhjTk1qQXdPREV3TVRReE9UUXpXaGNOTWpFdwpPREV3TVRReE9UUXpXakFoTVI4d0hRWURWUVFEREJaa1pXNHdNV1YyYnk1MWN5NXZjbUZqYkdVdVkyOXRNSUlCCklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUEyY0lpVUhwcTRVZzBhaGR6aXkycHY2cHQKSVIza2s5REd2eVRNY0syaWZQQ2dtUU5CdHV6VXNFN0l4c294eldITmU5RFpXRXJTSjVON3Ym1lTzJkMVd2NQp1aFhzbkFTbnkwY1NLUE9xVDNQSlpDVk1MK0llZVFKdnhaVjZaWWU4V2FFL1NQSGJzczRjYy9wcG1mc3pxCnErUi83cXEyMm9ueHNHaE9vQ1h1TlQvMFF2WXVzMnNucGtueWRKRHUxelhGbDREYkFIZGMvamNVK0NPWWROeS8KT3Iza2JIV0FaTkR4OWxaZUREOTRmNXZLcUF2V0FkSVJZa2UrSmpNTHg0VHo2ZlM0VXoxbzdBSTVuSApPQ1ZMblV5U0JkaGVuWTNGNEdFU0wwbnorVlhFWjRWVjRucWNjRmo5cnJ0Q29pT1BBNlgvNGdxMEZJbi9Qd0lECkFRQUJvMUF3VGpBZEJnTlZIUTRFRmdRVWw1VnVpVDBDT0xGTzcxMFBlcHRxSC9DRWZyY3dId1lEVlIwakJCZ3cKRm9BVWw1VnVpVDBDT0xGTzcxMFBlcHRxSC9DRWZyY3dEQVlEVlIwVEJBVXdBd0VCL3pBTkJna3Foa2lHOXcwQgpBUXNGQUFPQ0FRRUFXdEN4b2ZmNGgrWXZEcVVpTFFtUnpqQkVBMHJCOUMwL1FWOG9JQzJ3d1hzYi9KaVNuMHdOCjNMdHppejc0aStEbk1yQytoNFQ3enRaSkc3NVluSGRKcmxQajgzVWdDLzhYTlFCSUNDbTFUa3RlVU1jWG0reG4KTEZEMHpReFhpVzV0N1FHcWtvK2FjeTlhUnUvN3JRMXlNSE9HdVVkTTZETzErNXF4cTdFNXFMamhyNEdKejV5OAoraW8zK25UcUVKMHFQOVRocG96RXhBMW80OEY0ZHJybWdqd3ROUldEQVpBYmYyV1JNMXFKWXhxTTJqdU1FQWNsCnFMek1TdEZUQ2o1UGFTQ0NUV1VEK3ZlSWtsRWRpaFdpRm02dzk3Y1diZ0lGMlhlNGk4L2szMmF1N2xUTDEvd28KU3Q2dHpsa20yV25uUFlVMzBnRURnVTQ4OU02Z1dybklpZz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV1d0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktVd2dnU2hBZ0VBQW9JQkFRRFp3aUpRZW1yaFNEUnEKRjNPTExhbS9xbTBoSGVTVDBNYS9KTXh3cmFKODhLQ1pBMEcyN05Td1Rzakd5akhOWWMxNzBObFlTdEluazN1cApkdVo0N1ozVmEvbTZGZXljQktmTFJ4SW84NnIwSmhQYzhsa0pVd3Y0aDU1QW0vRmxYcGxoN3hab1Q5SThkdXl6Cmh4eittbVorek9xcjVIL3VxcmJhaWZHd2FFNmdKZTQxUC9SQzlpNnpheWVtU2ZKMGtPN1hOY1dYZ05zQWQxeisKTnhUNEk1aDAzTDg2dmVSc2RZQmswUEgyVmw0TVAzaC9tOHFWdW5mK1NvQzlZQjBoRmlSNzRtTXd2SGhQUHA5TApoVFBXanNBam1jYzRKVXVkVEpJRjJGNmRqY1hnWVJJdlNmUDVWY1JuaFZYaWVweHdXUDJ1dTBLaUk0OERwZi9pCkNyUVVpZjgvQWdNQkFBRUNnZjl6cnE2TUVueTFNYWFtdGM2c0laWU1QSDI5R2lSVVlwVXk5bG1sZ3BqUHh3V0sKUkRDay9Td0FmZG9yd1Q2ejNVRk1oYWJ4UU01a04vVjZFYkJlamQxT15bjdvWTVEQWJRRTR3RG9SZWlrVApONndWU0FrVC92Z1RXc1RqRlY1bXFKMCt6U2ppOWtySkZQNVNRN1F2cUswQ3BHRlNhVjY2dW8ycktiNmJWSkJYCkxPZmZPMytlS0tVazBaTnE1Q1NVQk9mbnFoNVFJSGdpaDNiMTRlNjB6bndrNWhaMHBHZE9BQm9aTkoKZ21lanUyTEdzVWxXTjBLOVdsUy9lcUllQzVzQm9jaWlocmxMVUpGWnpPRUV6LzErT2cyemhmT29yTE9rMTIrTgpjQnV0cTJWQ2I4ZFJDaFg1ZzJ0WnBrdzgzcXN5RSt3M09zYlQxa0VDZ1lFQTdxUnRLWGFONUx1SENvWlM1VWhNCm9Hak1WcnYxTEg0eGNhaDJIZnMksrMHJqQkJONGpkZkFDMmF3R3ZzU1EyR0lYRzVGYmYyK0pwL1kxbktKOEgKZU80MzNLWVgwTDE4NlNNLzFVay9HSEdTek1CWS9KdGR6WkRrbTA4UnBwaTl4bExTeDBWUWtFNVJVcnJJcTRJVwplZzBOM2RVTHZhTVl1UTBrR2dncUFETUNnWUVBNlpqWCtjU2VMZ1BVajJENWRpUGJ1TmVFd2RMeFNPZDFZMUFjCkUzQ01YTWozK2JxQ3BGUVIrTldYWWVuVmM1QiszajlSdHVnQ0YyTkNSdVdkZWowalBpL243UExIRHdCZVY0bVIKM3VQVHJmamRJbFovSFgzQ2NjVE94TmlaajU4VitFdkRHNHNHOGxtRTRieStYRExIYTJyMWxmUk9sUVRMSyswVgpyTU93eU1VQ2dZRUF1dm14WGM4NWxZRW9hU0tkU0cvQk9kMWlYSUtmc2VDZHRNT2M1elJ0UXRsSDQwS0RscE54CmxYcXBjbVc3MWpyYzk1RzVKNmE1ZG5xTE9OSFZoWW8wUEpmSXhPU052RXI2MTE5NjRBMm5sZXRHYlk0M0twUkEKaHBPRHlmdkZoSllmK29kaUJpZFUyL3ZBMCtUczNSUHJzRzBSOUVDOEZqVDNaZVhaNTF1R0xPa0NnWUFpTmU0NwplQjRxWXdrNFRsMTZmZG5xQWpaQkpLR05xY2c1V1R3alpMSkp6R3owdCtuMkl4SFd2WUZFSjdqSkNmcHFsaDlqCmlDcjJQZVV3K09QTlNUTG1JcUgydzc5L1pQQnNKWXVsZHZ4RFdGVWFlRXg1aHpkNDdmZlNRRjZNK0NHQmthYnIKVzdzU3R5V000ZFdITHpDaGZMS20yWGJBd0VqNUQrbkN1WTRrZVFLQmdFSkRHb0puM1NCRXcra2xXTE85N09aOApnc3lYQm9mUW1lRktIS2NHNzFZUFhJbTRlV1kyUi9KOCt5anc5b1FJQ3o5NlRidkdSZEN5QlJhbWhoTmFGUzVyCk9MZUc0ejVENE4zdThUc0dNem9QcU13KzBGSXJiQ3FzTnpGWTg3ekZweEdVaXZvRWZLNE82YkdERTZjNHFqNGEKNmlmK0RSRSt1TWRMWTQyYTA3ekoKLS0tLS1FTkQgUFJJVkFURSBLRVktLS0tLQo= kind: Secret metadata: creationTimestamp: \u0026quot;2021-11-12T17:13:37Z\u0026quot; managedFields: - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:data: .: {} f:tls.crt: {} f:tls.key: {} f:type: {} manager: kubectl operation: Update time: \u0026quot;2021-11-12T17:13:37Z\u0026quot; name: governancedomain-tls-cert namespace: oigns resourceVersion: \u0026quot;1291036\u0026quot; selfLink: /api/v1/namespaces/oigns/secrets/governancedomain-tls-cert uid: a127e5fd-705b-43e1-ab56-590834efda5e type: kubernetes.io/tls Install NGINX Use helm to install NGINX.\nConfigure the repository Add the Helm chart repository for installing NGINX using the following command:\n$ helm repo add stable https://kubernetes.github.io/ingress-nginx The output will look similar to the following:\n\u0026quot;stable\u0026quot; has been added to your repositories Update the repository using the following command:\n$ helm repo update The output will look similar to the following:\nHang tight while we grab the latest from your chart repositories... ...Successfully got an update from the \u0026quot;stable\u0026quot; chart repository Update Complete. Happy Helming! Create a namespace Create a Kubernetes namespace for NGINX:\n$ kubectl create namespace nginxssl The output will look similar to the following:\nnamespace/nginxssl created Install NGINX using helm If you can connect directly to the master node IP address from a browser, then install NGINX with the --set controller.service.type=NodePort parameter.\nIf you are using a Managed Service for your Kubernetes cluster, for example Oracle Kubernetes Engine (OKE) on Oracle Cloud Infrastructure (OCI), and connect from a browser to the Load Balancer IP address, then use the --set controller.service.type=LoadBalancer parameter. This instructs the Managed Service to setup a Load Balancer to direct traffic to the NGINX ingress.\n To install NGINX use the following helm command depending on if you are using NodePort or LoadBalancer:\na) Using NodePort\n$ helm install nginx-ingress -n nginxssl --set controller.extraArgs.default-ssl-certificate=oigns/governancedomain-tls-cert --set controller.service.type=NodePort --set controller.admissionWebhooks.enabled=false stable/ingress-nginx Note: If using Kubernetes 1.18 then add --version=3.34.0 to the end of command.\nThe output will look similar to the following:\n$ helm install nginx-ingress -n nginxssl --set controller.extraArgs.default-ssl-certificate=oigns/governancedomain-tls-cert --set controller.service.type=NodePort --set controller.admissionWebhooks.enabled=false stable/ingress-nginx NAME: nginx-ingress LAST DEPLOYED: Mon Nov 15 02:23:30 2021 NAMESPACE: nginxssl STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: The nginx-ingress controller has been installed. Get the application URL by running these commands: export HTTP_NODE_PORT=$(kubectl --namespace nginxssl get services -o jsonpath=\u0026quot;{.spec.ports[0].nodePort}\u0026quot; nginx-ingress-controller) export HTTPS_NODE_PORT=$(kubectl --namespace nginxssl get services -o jsonpath=\u0026quot;{.spec.ports[1].nodePort}\u0026quot; nginx-ingress-controller) export NODE_IP=$(kubectl --namespace nginxssl get nodes -o jsonpath=\u0026quot;{.items[0].status.addresses[1].address}\u0026quot;) echo \u0026quot;Visit http://$NODE_IP:$HTTP_NODE_PORT to access your application via HTTP.\u0026quot; echo \u0026quot;Visit https://$NODE_IP:$HTTPS_NODE_PORT to access your application via HTTPS.\u0026quot; An example Ingress that makes use of the controller: apiVersion: networking.k8s.io/v1 kind: Ingress metadata: annotations: kubernetes.io/ingress.class: nginx name: example namespace: foo spec: ingressClassName: example-class rules: - host: www.example.com http: paths: - path: / pathType: Prefix backend: serviceName: exampleService servicePort: 80 path: / # This section is only required if TLS is to be enabled for the Ingress tls: - hosts: - www.example.com secretName: example-tls If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: apiVersion: v1 kind: Secret metadata: name: example-tls namespace: foo data: tls.crt: \u0026lt;base64 encoded cert\u0026gt; tls.key: \u0026lt;base64 encoded key\u0026gt; type: kubernetes.io/tls b) Using LoadBalancer\n$ helm install nginx-ingress -n nginxssl --set controller.extraArgs.default-ssl-certificate=oigns/governancedomain-tls-cert --set controller.service.type=LoadBalancer --set controller.admissionWebhooks.enabled=false stable/ingress-nginx Note: If using Kubernetes 1.18 then add --version=3.34.0 to the end of command.\nThe output will look similar to the following:\nNAME: nginx-ingress LAST DEPLOYED: Mon Nov 15 02:26:09 2021 NAMESPACE: nginxssl STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: The ingress-nginx controller has been installed. It may take a few minutes for the LoadBalancer IP to be available. You can watch the status by running 'kubectl --namespace nginxssl get services -o wide -w nginx-ingress-ingress-nginx-controller' An example Ingress that makes use of the controller: apiVersion: networking.k8s.io/v1 kind: Ingress metadata: annotations: kubernetes.io/ingress.class: nginx name: example namespace: foo spec: rules: - host: www.example.com http: paths: - path: / pathType: Prefix backend: service: name: exampleService port: 80 # This section is only required if TLS is to be enabled for the Ingress tls: - hosts: - www.example.com secretName: example-tls If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: apiVersion: v1 kind: Secret metadata: name: example-tls namespace: foo data: tls.crt: \u0026lt;base64 encoded cert\u0026gt; tls.key: \u0026lt;base64 encoded key\u0026gt; type: kubernetes.io/tls Setup routing rules for the domain Setup routing rules by running the following commands:\n$ cd $WORKDIR/kubernetes/charts/ingress-per-domain Edit values.yaml and change the domainUID parameter to match your domainUID, for example domainUID: governancedomain. Also change sslType to SSL. The file should look as follows:\n# Load balancer type. Supported values are: TRAEFIK, NGINX type: NGINX # Type of Configuration Supported Values are : NONSSL,SSL # tls: NONSSL tls: SSL # TLS secret name if the mode is SSL secretName: governancedomain-tls-cert # WLS domain as backend to the load balancer wlsDomain: domainUID: governancedomain adminServerName: AdminServer adminServerPort: 7001 soaClusterName: soa_cluster soaManagedServerPort: 8001 oimClusterName: oim_cluster oimManagedServerPort: 14000 Create an ingress for the domain Create an Ingress for the domain (governancedomain-nginx), in the domain namespace by using the sample Helm chart:\n$ cd $WORKDIR $ helm install governancedomain-nginx kubernetes/charts/ingress-per-domain --namespace oigns --values kubernetes/charts/ingress-per-domain/values.yaml Note: The $WORKDIR/kubernetes/charts/ingress-per-domain/templates/nginx-ingress-k8s1.19.yaml and nginx-ingress.yaml has nginx.ingress.kubernetes.io/enable-access-log set to false. If you want to enable access logs then set this value to true before executing the command. Enabling access-logs can cause issues with disk space if not regularly maintained.\nFor example:\n$ cd $WORKDIR $ helm install governancedomain-nginx kubernetes/charts/ingress-per-domain --namespace oigns --values kubernetes/charts/ingress-per-domain/values.yaml The output will look similar to the following:\nNAME: governancedomain-nginx LAST DEPLOYED: Mon Nov 15 02:35:05 2021 NAMESPACE: oigns STATUS: deployed REVISION: 1 TEST SUITE: None Run the following command to show the ingress is created successfully:\n$ kubectl get ing -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl get ing -n oigns The output will look similar to the following:\nNAME CLASS HOSTS ADDRESS PORTS AGE governancedomain-nginx \u0026lt;none\u0026gt; * x.x.x.x 80 49s Find the node port of NGINX using the following command:\n$ kubectl get services -n nginxssl -o jsonpath=\u0026#34;{.spec.ports[1].nodePort}\u0026#34; nginx-ingress-ingress-nginx-controller The output will look similar to the following:\n32033 Run the following command to check the ingress:\n$ kubectl describe ing governancedomain-nginx -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl describe ing governancedomain-nginx -n oigns The output will look similar to the following:\nNamespace: oigns Address: 10.96.160.58 Default backend: default-http-backend:80 (\u0026lt;error: endpoints \u0026quot;default-http-backend\u0026quot; not found\u0026gt;) Rules: Host Path Backends ---- ---- -------- * /console governancedomain-adminserver:7001 (10.244.2.96:7001) /em governancedomain-adminserver:7001 (10.244.2.96:7001) /soa governancedomain-cluster-soa-cluster:8001 (10.244.2.97:8001) /integration governancedomain-cluster-soa-cluster:8001 (10.244.2.97:8001) /soa-infra governancedomain-cluster-soa-cluster:8001 (10.244.2.97:8001) /identity governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) /admin governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) /oim governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) /sysadmin governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) /workflowservice governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) /xlWebApp governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) /Nexaweb governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) /callbackResponseService governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) /spml-xsd governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) /HTTPClnt governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) /reqsvc governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) /iam governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) /provisioning-callback governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) /CertificationCallbackService governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) /ucs governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) /FacadeWebApp governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) /OIGUI governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) /weblogic governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) Annotations: kubernetes.io/ingress.class: nginx meta.helm.sh/release-name: governancedomain-nginx meta.helm.sh/release-namespace: oigns nginx.ingress.kubernetes.io/affinity: cookie nginx.ingress.kubernetes.io/configuration-snippet: more_set_input_headers \u0026quot;X-Forwarded-Proto: https\u0026quot;; more_set_input_headers \u0026quot;WL-Proxy-SSL: true\u0026quot;; nginx.ingress.kubernetes.io/enable-access-log: false nginx.ingress.kubernetes.io/ingress.allow-http: false nginx.ingress.kubernetes.io/proxy-buffer-size: 2000k Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Sync 17s (x2 over 28s) nginx-ingress-controller Scheduled for sync To confirm that the new Ingress is successfully routing to the domain\u0026rsquo;s server pods, run the following command to send a request to the URL for the WebLogic ReadyApp framework:\n$ curl -v -k https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/weblogic/ready For example:\n$ curl -v -k https://masternode.example.com:32033/weblogic/ready The output will look similar to the following:\n$ curl -v -k https://masternode.example.com:32033/weblogic/ready * About to connect() to X.X.X.X port 32033 (#0) * Trying X.X.X.X... * Connected to masternode.example.com (X.X.X.X) port 32033 (#0) * Initializing NSS with certpath: sql:/etc/pki/nssdb * skipping SSL peer certificate verification * SSL connection using TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 * Server certificate: * subject: CN=masternode.example.com * start date: Nov 10 13:05:21 2021 GMT * expire date: Nov 10 13:05:21 2022 GMT * common name: masternode.example.com * issuer: CN=masternode.example.com \u0026gt; GET /weblogic/ready HTTP/1.1 \u0026gt; User-Agent: curl/7.29.0 \u0026gt; Host: X.X.X.X:32033 \u0026gt; Accept: */* \u0026gt; \u0026lt; HTTP/1.1 200 OK \u0026lt; Server: nginx/1.19.1 \u0026lt; Date: Mon, 15 Nov 2021 10:49:21 GMT \u0026lt; Content-Length: 0 \u0026lt; Connection: keep-alive \u0026lt; Strict-Transport-Security: max-age=15724800; includeSubDomains \u0026lt; * Connection #0 to host X.X.X.X left intact Verify that you can access the domain URL After setting up the NGINX ingress, verify that the domain applications are accessible through the NGINX ingress port (for example 32033) as per Validate Domain URLs \n" +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oam/manage-oam-domains/wlst-admin-operations/", + "title": "b. WLST Administration Operations", + "tags": [], + "description": "Describes the steps for WLST administration using helper pod running in the same Kubernetes Cluster as OAM Domain.", + "content": "To use WLST to administer the OAM domain, use the helper pod in the same Kubernetes cluster as the OAM Domain.\n Run the following command to start a bash shell in the helper pod:\n$ kubectl exec -it helper -n \u0026lt;domain_namespace\u0026gt; -- /bin/bash For example:\n$ kubectl exec -it helper -n oamns -- /bin/bash This will take you into a bash shell in the running helper pod:\n[oracle@helper ~]$ Connect to WLST using the following command:\n$ cd $ORACLE_HOME/oracle_common/common/bin $ ./wlst.sh The output will look similar to the following:\nInitializing WebLogic Scripting Tool (WLST) ... Jython scans all the jar files it can find at first startup. Depending on the system, this process may take a few minutes to complete, and WLST may not return a prompt right away. Welcome to WebLogic Server Administration Scripting Shell Type help() for help on available commands wls:/offline\u0026gt; To access t3 for the Administration Server connect as follows:\nwls:/offline\u0026gt; connect(\u0026#39;weblogic\u0026#39;,\u0026#39;\u0026lt;password\u0026gt;\u0026#39;,\u0026#39;t3://accessdomain-adminserver:7001\u0026#39;) The output will look similar to the following:\nConnecting to t3://accessdomain-adminserver:7001 with userid weblogic ... Successfully connected to Admin Server \u0026quot;AdminServer\u0026quot; that belongs to domain \u0026quot;accessdomain\u0026quot;. Warning: An insecure protocol was used to connect to the server. To ensure on-the-wire security, the SSL port or Admin port should be used instead. wls:/accessdomain/serverConfig/\u0026gt; Or to access t3 for the OAM Cluster service, connect as follows:\nconnect(\u0026#39;weblogic\u0026#39;,\u0026#39;\u0026lt;password\u0026gt;\u0026#39;,\u0026#39;t3://accessdomain-cluster-oam-cluster:14100\u0026#39;) The output will look similar to the following:\nConnecting to t3://accessdomain-cluster-oam-cluster:14100 with userid weblogic ... Successfully connected to managed Server \u0026quot;oam_server1\u0026quot; that belongs to domain \u0026quot;accessdomain\u0026quot;. Warning: An insecure protocol was used to connect to the server. To ensure on-the-wire security, the SSL port or Admin port should be used instead. wls:/accessdomain/serverConfig/\u0026gt; Sample operations For a full list of WLST operations refer to WebLogic Server WLST Online and Offline Command Reference.\nDisplay servers wls:/accessdomain/serverConfig/\u0026gt; cd(\u0026#39;/Servers\u0026#39;) wls:/accessdomain/serverConfig/Servers\u0026gt; ls() dr-- AdminServer dr-- oam_policy_mgr1 dr-- oam_policy_mgr2 dr-- oam_policy_mgr3 dr-- oam_policy_mgr4 dr-- oam_policy_mgr5 dr-- oam_server1 dr-- oam_server2 dr-- oam_server3 dr-- oam_server4 dr-- oam_server5 wls:/accessdomain/serverConfig/Servers\u0026gt; Configure logging for managed servers Connect to the Administration Server and run the following:\nwls:/accessdomain/serverConfig/\u0026gt; domainRuntime() Location changed to domainRuntime tree. This is a read-only tree with DomainMBean as the root MBean. For more help, use help(\u0026#39;domainRuntime\u0026#39;) wls:/accessdomain/domainRuntime/\u0026gt; wls:/accessdomain/domainRuntime/\u0026gt; listLoggers(pattern=\u0026#34;oracle.oam.*\u0026#34;,target=\u0026#34;oam_server1\u0026#34;) ------------------------------------------+----------------- Logger | Level ------------------------------------------+----------------- oracle.oam | \u0026lt;Inherited\u0026gt; oracle.oam.admin.foundation.configuration | \u0026lt;Inherited\u0026gt; oracle.oam.admin.service.config | \u0026lt;Inherited\u0026gt; oracle.oam.agent | \u0026lt;Inherited\u0026gt; oracle.oam.agent-default | \u0026lt;Inherited\u0026gt; oracle.oam.audit | \u0026lt;Inherited\u0026gt; oracle.oam.binding | \u0026lt;Inherited\u0026gt; oracle.oam.certvalidation | \u0026lt;Inherited\u0026gt; oracle.oam.certvalidation.mbeans | \u0026lt;Inherited\u0026gt; oracle.oam.common.healthcheck | \u0026lt;Inherited\u0026gt; oracle.oam.common.runtimeent | \u0026lt;Inherited\u0026gt; oracle.oam.commonutil | \u0026lt;Inherited\u0026gt; oracle.oam.config | \u0026lt;Inherited\u0026gt; oracle.oam.controller | \u0026lt;Inherited\u0026gt; oracle.oam.default | \u0026lt;Inherited\u0026gt; oracle.oam.diagnostic | \u0026lt;Inherited\u0026gt; oracle.oam.engine.authn | \u0026lt;Inherited\u0026gt; oracle.oam.engine.authz | \u0026lt;Inherited\u0026gt; oracle.oam.engine.policy | \u0026lt;Inherited\u0026gt; oracle.oam.engine.ptmetadata | \u0026lt;Inherited\u0026gt; oracle.oam.engine.session | \u0026lt;Inherited\u0026gt; oracle.oam.engine.sso | \u0026lt;Inherited\u0026gt; oracle.oam.esso | \u0026lt;Inherited\u0026gt; oracle.oam.extensibility.lifecycle | \u0026lt;Inherited\u0026gt; oracle.oam.foundation.access | \u0026lt;Inherited\u0026gt; oracle.oam.idm | \u0026lt;Inherited\u0026gt; oracle.oam.install | \u0026lt;Inherited\u0026gt; oracle.oam.install.bootstrap | \u0026lt;Inherited\u0026gt; oracle.oam.install.mbeans | \u0026lt;Inherited\u0026gt; oracle.oam.ipf.rest.api | \u0026lt;Inherited\u0026gt; oracle.oam.oauth | \u0026lt;Inherited\u0026gt; oracle.oam.plugin | \u0026lt;Inherited\u0026gt; oracle.oam.proxy.oam | \u0026lt;Inherited\u0026gt; oracle.oam.proxy.oam.workmanager | \u0026lt;Inherited\u0026gt; oracle.oam.proxy.opensso | \u0026lt;Inherited\u0026gt; oracle.oam.pswd.service.provider | \u0026lt;Inherited\u0026gt; oracle.oam.replication | \u0026lt;Inherited\u0026gt; oracle.oam.user.identity.provider | \u0026lt;Inherited\u0026gt; wls:/accessdomain/domainRuntime/\u0026gt; Set the log level to TRACE:32:\nwls:/accessdomain/domainRuntime/\u0026gt; setLogLevel(target=\u0026#39;oam_server1\u0026#39;,logger=\u0026#39;oracle.oam\u0026#39;,level=\u0026#39;TRACE:32\u0026#39;,persist=\u0026#34;1\u0026#34;,addLogger=1) wls:/accessdomain/domainRuntime/\u0026gt; wls:/accessdomain/domainRuntime/\u0026gt; listLoggers(pattern=\u0026#34;oracle.oam.*\u0026#34;,target=\u0026#34;oam_server1\u0026#34;) ------------------------------------------+----------------- Logger | Level ------------------------------------------+----------------- oracle.oam | TRACE:32 oracle.oam.admin.foundation.configuration | \u0026lt;Inherited\u0026gt; oracle.oam.admin.service.config | \u0026lt;Inherited\u0026gt; oracle.oam.agent | \u0026lt;Inherited\u0026gt; oracle.oam.agent-default | \u0026lt;Inherited\u0026gt; oracle.oam.audit | \u0026lt;Inherited\u0026gt; oracle.oam.binding | \u0026lt;Inherited\u0026gt; oracle.oam.certvalidation | \u0026lt;Inherited\u0026gt; oracle.oam.certvalidation.mbeans | \u0026lt;Inherited\u0026gt; oracle.oam.common.healthcheck | \u0026lt;Inherited\u0026gt; oracle.oam.common.runtimeent | \u0026lt;Inherited\u0026gt; oracle.oam.commonutil | \u0026lt;Inherited\u0026gt; oracle.oam.config | \u0026lt;Inherited\u0026gt; oracle.oam.controller | \u0026lt;Inherited\u0026gt; oracle.oam.default | \u0026lt;Inherited\u0026gt; oracle.oam.diagnostic | \u0026lt;Inherited\u0026gt; oracle.oam.engine.authn | \u0026lt;Inherited\u0026gt; oracle.oam.engine.authz | \u0026lt;Inherited\u0026gt; oracle.oam.engine.policy | \u0026lt;Inherited\u0026gt; oracle.oam.engine.ptmetadata | \u0026lt;Inherited\u0026gt; oracle.oam.engine.session | \u0026lt;Inherited\u0026gt; oracle.oam.engine.sso | \u0026lt;Inherited\u0026gt; oracle.oam.esso | \u0026lt;Inherited\u0026gt; oracle.oam.extensibility.lifecycle | \u0026lt;Inherited\u0026gt; oracle.oam.foundation.access | \u0026lt;Inherited\u0026gt; oracle.oam.idm | \u0026lt;Inherited\u0026gt; oracle.oam.install | \u0026lt;Inherited\u0026gt; oracle.oam.install.bootstrap | \u0026lt;Inherited\u0026gt; oracle.oam.install.mbeans | \u0026lt;Inherited\u0026gt; oracle.oam.ipf.rest.api | \u0026lt;Inherited\u0026gt; oracle.oam.oauth | \u0026lt;Inherited\u0026gt; oracle.oam.plugin | \u0026lt;Inherited\u0026gt; oracle.oam.proxy.oam | \u0026lt;Inherited\u0026gt; oracle.oam.proxy.oam.workmanager | \u0026lt;Inherited\u0026gt; oracle.oam.proxy.opensso | \u0026lt;Inherited\u0026gt; oracle.oam.pswd.service.provider | \u0026lt;Inherited\u0026gt; oracle.oam.replication | \u0026lt;Inherited\u0026gt; oracle.oam.user.identity.provider | \u0026lt;Inherited\u0026gt; wls:/accessdomain/domainRuntime/\u0026gt; Verify that TRACE:32 log level is set by connecting to the Administration Server and viewing the logs:\n$ kubectl exec -it accessdomain-adminserver -n oamns -- /bin/bash [oracle@accessdomain-adminserver oracle]$ [oracle@accessdomain-adminserver oracle]$ cd /u01/oracle/user_projects/domains/accessdomain/servers/oam_server1/logs [oracle@accessdomain-adminserver logs]$ tail oam_server1-diagnostic.log 2021-11-02T10:26:14.793+00:00] [oam_server1] [TRACE:32] [] [oracle.oam.config] [tid: Configuration Store Observer] [userId: \u0026lt;anonymous\u0026gt;] [ecid: 8b3ac37b-c7cf-46dd-aeee-5ed67886be21-0000000b,0:1795] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.admin.config.util.observable.ObservableConfigStore$StoreWatcher] [SRC_METHOD: run] Start of run before start of detection at 1,635,848,774,793. Detector: oracle.security.am.admin.config.util.observable.DbStoreChangeDetector:Database configuration store:DSN:jdbc/oamds. Monitor: { StoreMonitor: { disabled: \u0026#39;false\u0026#39; } } [2021-11-02T10:26:14.793+00:00] [oam_server1] [TRACE] [] [oracle.oam.config] [tid: Configuration Store Observer] [userId: \u0026lt;anonymous\u0026gt;] [ecid: 8b3ac37b-c7cf-46dd-aeee-5ed67886be21-0000000b,0:1795] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.admin.config.util.store.StoreUtil] [SRC_METHOD: getContainerProperty] Configuration property CONFIG_HISTORY not specified [2021-11-02T10:26:14.793+00:00] [oam_server1] [TRACE] [] [oracle.oam.config] [tid: Configuration Store Observer] [userId: \u0026lt;anonymous\u0026gt;] [ecid: 8b3ac37b-c7cf-46dd-aeee-5ed67886be21-0000000b,0:1795] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.admin.config.util.store.StoreUtil] [SRC_METHOD: getContainerProperty] Configuration property CONFIG not specified [2021-11-02T10:26:14.795+00:00] [oam_server1] [TRACE:32] [] [oracle.oam.config] [tid: Configuration Store Observer] [userId: \u0026lt;anonymous\u0026gt;] [ecid: 8b3ac37b-c7cf-46dd-aeee-5ed67886be21-0000000b,0:1795] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.admin.config.util.store.DbStore] [SRC_METHOD: getSelectSQL] SELECT SQL:SELECT version from IDM_OBJECT_STORE where id = ? and version = (select max(version) from IDM_OBJECT_STORE where id = ?) [2021-11-02T10:26:14.797+00:00] [oam_server1] [TRACE] [] [oracle.oam.config] [tid: Configuration Store Observer] [userId: \u0026lt;anonymous\u0026gt;] [ecid: 8b3ac37b-c7cf-46dd-aeee-5ed67886be21-0000000b,0:1795] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.admin.config.util.store.DbStore] [SRC_METHOD: load] Time (ms) to load key CONFIG:-1{FIELD_TYPES=INT, SELECT_FIELDS=SELECT version from IDM_OBJECT_STORE }:4 Performing WLST Administration via SSL By default the SSL port is not enabled for the Administration Server or OAM Managed Servers. To configure the SSL port for the Administration Server and Managed Servers login to WebLogic Administration console https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console and navigate to Lock \u0026amp; Edit -\u0026gt; Environment -\u0026gt;Servers -\u0026gt; server_name -\u0026gt;Configuration -\u0026gt; General -\u0026gt; SSL Listen Port Enabled -\u0026gt; Provide SSL Port ( For Administration Server: 7002 and for OAM Managed Server (oam_server1): 14101) - \u0026gt; Save -\u0026gt; Activate Changes.\nNote: If configuring the OAM Managed Servers for SSL you must enable SSL on the same port for all servers (oam_server1 through oam_server5)\n Create a myscripts directory as follows:\n$ cd $WORKDIR/kubernetes/ $ mkdir myscripts $ cd myscripts For example:\n$ cd $WORKDIR/kubernetes/ $ mkdir myscripts $ cd myscripts Create a sample yaml template file in the myscripts directory called \u0026lt;domain_uid\u0026gt;-adminserver-ssl.yaml to create a Kubernetes service for the Administration Server:\nNote: Update the domainName, domainUID and namespace based on your environment. For example:\napiVersion: v1 kind: Service metadata: labels: serviceType: SERVER weblogic.domainName: accessdomain weblogic.domainUID: accessdomain weblogic.resourceVersion: domain-v2 weblogic.serverName: AdminServer name: accessdomain-adminserverssl namespace: oamns spec: clusterIP: None ports: - name: default port: 7002 protocol: TCP targetPort: 7002 selector: weblogic.createdByOperator: \u0026quot;true\u0026quot; weblogic.domainUID: accessdomain weblogic.serverName: AdminServer type: ClusterIP and the following sample yaml template file \u0026lt;domain_uid\u0026gt;-oamcluster-ssl.yaml for the OAM Managed Server:\napiVersion: v1 kind: Service metadata: labels: serviceType: SERVER weblogic.domainName: accessdomain weblogic.domainUID: accessdomain weblogic.resourceVersion: domain-v2 name: accessdomain-oamcluster-ssl namespace: oamns spec: clusterIP: None ports: - name: default port: 14101 protocol: TCP targetPort: 14101 selector: weblogic.clusterName: oam_cluster weblogic.createdByOperator: \u0026quot;true\u0026quot; weblogic.domainUID: accessdomain type: ClusterIP Apply the template using the following command for the AdminServer:\n$ kubectl apply -f \u0026lt;domain_uid\u0026gt;-adminserver-ssl.yaml For example:\n$ kubectl apply -f accessdomain-adminserver-ssl.yaml service/accessdomain-adminserverssl created and using the following command for the OAM Managed Server:\n$ kubectl apply -f \u0026lt;domain_uid\u0026gt;-oamcluster-ssl.yaml For example:\n$ kubectl apply -f accessdomain-oamcluster-ssl.yaml service/accessdomain-oamcluster-ssl created Validate that the Kubernetes Services to access SSL ports are created successfully:\n$ kubectl get svc -n \u0026lt;domain_namespace\u0026gt; |grep ssl For example:\n$ kubectl get svc -n oamns |grep ssl The output will look similar to the following:\naccessdomain-adminserverssl ClusterIP None \u0026lt;none\u0026gt; 7002/TCP 102s accessdomain-oamcluster-ssl ClusterIP None \u0026lt;none\u0026gt; 14101/TCP 35s Inside the bash shell of the running helper pod, run the following:\n[oracle@helper bin]$ export WLST_PROPERTIES=\u0026#34;-Dweblogic.security.SSL.ignoreHostnameVerification=true -Dweblogic.security.TrustKeyStore=DemoTrust\u0026#34; [oracle@helper bin]$ cd /u01/oracle/oracle_common/common/bin [oracle@helper bin]$ ./wlst.sh Initializing WebLogic Scripting Tool (WLST) ... Welcome to WebLogic Server Administration Scripting Shell Type help() for help on available commands wls:/offline\u0026gt; To connect to the Administration Server t3s service:\nwls:/offline\u0026gt; connect(\u0026#39;weblogic\u0026#39;,\u0026#39;\u0026lt;password\u0026gt;\u0026#39;,\u0026#39;t3s://accessdomain-adminserverssl:7002\u0026#39;) Connecting to t3s://accessdomain-adminserverssl:7002 with userid weblogic ... \u0026lt;Nov 2, 2021 10:42:05 AM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;Security\u0026gt; \u0026lt;BEA-090905\u0026gt; \u0026lt;Disabling the CryptoJ JCE Provider self-integrity check for better startup performance. To enable this check, specify -Dweblogic.security.allowCryptoJDefaultJCEVerification=true.\u0026gt; \u0026lt;Nov 2, 2021 10:42:05 AM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;Security\u0026gt; \u0026lt;BEA-090906\u0026gt; \u0026lt;Changing the default Random Number Generator in RSA CryptoJ from ECDRBG128 to HMACDRBG. To disable this change, specify -Dweblogic.security.allowCryptoJDefaultPRNG=true.\u0026gt; \u0026lt;Nov 2, 2021 10:42:05 AM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;Security\u0026gt; \u0026lt;BEA-090909\u0026gt; \u0026lt;Using the configured custom SSL Hostname Verifier implementation: weblogic.security.utils.SSLWLSHostnameVerifier$NullHostnameVerifier.\u0026gt; Successfully connected to Admin Server \u0026#34;AdminServer\u0026#34; that belongs to domain \u0026#34;accessdomain\u0026#34;. wls:/accessdomain/serverConfig/\u0026gt; To connect to the OAM Managed Server t3s service:\nwls:/offline\u0026gt; connect(\u0026#39;weblogic\u0026#39;,\u0026#39;\u0026lt;password\u0026gt;\u0026#39;,\u0026#39;t3s://accessdomain-oamcluster-ssl:14101\u0026#39;) Connecting to t3s://accessdomain-oamcluster-ssl:14101 with userid weblogic ... \u0026lt;Nov 2, 2021 10:43:16 AM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;Security\u0026gt; \u0026lt;BEA-090905\u0026gt; \u0026lt;Disabling the CryptoJ JCE Provider self-integrity check for better startup performance. To enable this check, specify -Dweblogic.security.allowCryptoJDefaultJCEVerification=true.\u0026gt; \u0026lt;Nov 2, 2021 10:43:16 AM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;Security\u0026gt; \u0026lt;BEA-090906\u0026gt; \u0026lt;Changing the default Random Number Generator in RSA CryptoJ from ECDRBG128 to HMACDRBG. To disable this change, specify -Dweblogic.security.allowCryptoJDefaultPRNG=true.\u0026gt; \u0026lt;Nov 2, 2021 10:43:16 AM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;Security\u0026gt; \u0026lt;BEA-090909\u0026gt; \u0026lt;Using the configured custom SSL Hostname Verifier implementation: weblogic.security.utils.SSLWLSHostnameVerifier$NullHostnameVerifier.\u0026gt; Successfully connected to managed Server \u0026#34;oam_server1\u0026#34; that belongs to domain \u0026#34;accessdomain\u0026#34;. " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oam/manage-oam-domains/logging-and-visualization/", + "title": "c. Logging and Visualization", + "tags": [], + "description": "Describes the steps for logging and visualization with Elasticsearch and Kibana.", + "content": "After the OAM domain is set up you can publish operator and WebLogic Server logs into Elasticsearch and interact with them in Kibana.\nIn Prepare your environment if you decided to use the Elasticsearch and Kibana by setting the parameter elkIntegrationEnabled to true, then the steps below must be followed to complete the setup.\nIf you did not set elkIntegrationEnabled to true and want to do so post configuration, run the following command from the $WORKDIR directory:\n$ helm upgrade --reuse-values --namespace operator --set \u0026#34;elkIntegrationEnabled=true\u0026#34; --set \u0026#34;logStashImage=logstash:6.6.0\u0026#34; --set \u0026#34;elasticSearchHost=elasticsearch.default.svc.cluster.local\u0026#34; --set \u0026#34;elasticSearchPort=9200\u0026#34; --wait weblogic-kubernetes-operator kubernetes/charts/weblogic-operator The output will look similar to the following:\nRelease \u0026quot;weblogic-kubernetes-operator\u0026quot; has been upgraded. Happy Helming! NAME: weblogic-kubernetes-operator LAST DEPLOYED: Tue Nov 2 03:49:45 2021 NAMESPACE: operator STATUS: deployed REVISION: 3 TEST SUITE: None Install Elasticsearch and Kibana Create the Kubernetes resource using the following command:\n$ kubectl apply -f $WORKDIR/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml The output will look similar to the following:\ndeployment.apps/elasticsearch created service/elasticsearch created deployment.apps/kibana created service/kibana created Run the following command to ensure Elasticsearch is used by the operator:\n$ helm get values --all weblogic-kubernetes-operator -n opns The output will look similar to the following:\nCOMPUTED VALUES: clusterSizePaddingValidationEnabled: true domainNamespaceLabelSelector: weblogic-operator=enabled domainNamespaceSelectionStrategy: LabelSelector domainNamespaces: - default elasticSearchHost: elasticsearch.default.svc.cluster.local elasticSearchPort: 9200 elkIntegrationEnabled: true enableClusterRoleBinding: true externalDebugHttpPort: 30999 externalRestEnabled: false externalRestHttpsPort: 31001 externalServiceNameSuffix: -ext image: weblogic-kubernetes-operator:3.3.0 imagePullPolicy: IfNotPresent internalDebugHttpPort: 30999 introspectorJobNameSuffix: -introspector javaLoggingFileCount: 10 javaLoggingFileSizeLimit: 20000000 javaLoggingLevel: FINE logStashImage: logstash:6.6.0 remoteDebugNodePortEnabled: false serviceAccount: op-sa suspendOnDebugStartup: false To check that Elasticsearch and Kibana are deployed in the Kubernetes cluster, run the following command:\n$ kubectl get pods The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE elasticsearch-f7b7c4c4-tb4pp 1/1 Running 0 85s kibana-57f6685789-mgwdl 1/1 Running 0 85s Create the logstash pod OAM Server logs can be pushed to the Elasticsearch server using the logstash pod. The logstash pod needs access to the persistent volume of the OAM domain created previously, for example accessdomain-domain-pv. The steps to create the logstash pod are as follows:\n Obtain the OAM domain persistence volume details:\n$ kubectl get pv -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pv -n oamns The output will look similar to the following:\nNAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE accessdomain-domain-pv 10Gi RWX Retain Bound oamns/accessdomain-domain-pvc accessdomain-domain-storage-class 23h Make note of the CLAIM value, for example in this case accessdomain-domain-pvc\n Run the following command to get the mountPath of your domain:\n$ kubectl describe domains \u0026lt;domain_uid\u0026gt; -n \u0026lt;domain_namespace\u0026gt; | grep \u0026#34;Mount Path\u0026#34; For example:\n$ kubectl describe domains accessdomain -n oamns | grep \u0026#34;Mount Path\u0026#34; The output will look similar to the following:\nMount Path: /u01/oracle/user_projects/domains Navigate to the $WORKDIR/kubernetes/elasticsearch-and-kibana directory and create a logstash.yaml file as follows. Change the claimName and mountPath values to match the values returned in the previous commands. Change namespace to your domain namespace e.g oamns:\napiVersion: apps/v1 kind: Deployment metadata: name: logstash-wls namespace: oamns spec: selector: matchLabels: k8s-app: logstash-wls template: # create pods using pod definition in this template metadata: labels: k8s-app: logstash-wls spec: volumes: - name: weblogic-domain-storage-volume persistentVolumeClaim: claimName: accessdomain-domain-pvc - name: shared-logs emptyDir: {} containers: - name: logstash image: logstash:6.6.0 command: [\u0026quot;/bin/sh\u0026quot;] args: [\u0026quot;/usr/share/logstash/bin/logstash\u0026quot;, \u0026quot;-f\u0026quot;, \u0026quot;/u01/oracle/user_projects/domains/logstash/logstash.conf\u0026quot;] imagePullPolicy: IfNotPresent volumeMounts: - mountPath: /u01/oracle/user_projects/domains name: weblogic-domain-storage-volume - name: shared-logs mountPath: /shared-logs ports: - containerPort: 5044 name: logstash In the NFS persistent volume directory that corresponds to the mountPath /u01/oracle/user_projects/domains, create a logstash directory. For example:\n$ mkdir -p /scratch/OAMK8S/accessdomainpv/logstash Create a logstash.conf in the newly created logstash directory that contains the following. Make sure the paths correspond to your mountPath and domain name:\ninput { file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/logs/accessdomain/AdminServer*.log\u0026quot; tags =\u0026gt; \u0026quot;Adminserver_log\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/logs/accessdomain/oam_policy_mgr*.log\u0026quot; tags =\u0026gt; \u0026quot;Policymanager_log\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/logs/accessdomain/oam_server*.log\u0026quot; tags =\u0026gt; \u0026quot;Oamserver_log\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/accessdomain/servers/AdminServer/logs/AdminServer-diagnostic.log\u0026quot; tags =\u0026gt; \u0026quot;Adminserver_diagnostic\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/accessdomain/servers/**/logs/oam_policy_mgr*-diagnostic.log\u0026quot; tags =\u0026gt; \u0026quot;Policy_diagnostic\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/accessdomain/servers/**/logs/oam_server*-diagnostic.log\u0026quot; tags =\u0026gt; \u0026quot;Oamserver_diagnostic\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/accessdomain/servers/**/logs/access*.log\u0026quot; tags =\u0026gt; \u0026quot;Access_logs\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/accessdomain/servers/AdminServer/logs/auditlogs/OAM/audit.log\u0026quot; tags =\u0026gt; \u0026quot;Audit_logs\u0026quot; start_position =\u0026gt; beginning } } filter { grok { match =\u0026gt; [ \u0026quot;message\u0026quot;, \u0026quot;\u0026lt;%{DATA:log_timestamp}\u0026gt; \u0026lt;%{WORD:log_level}\u0026gt; \u0026lt;%{WORD:thread}\u0026gt; \u0026lt;%{HOSTNAME:hostname}\u0026gt; \u0026lt;%{HOSTNAME:servername}\u0026gt; \u0026lt;%{DATA:timer}\u0026gt; \u0026lt;\u0026lt;%{DATA:kernel}\u0026gt;\u0026gt; \u0026lt;\u0026gt; \u0026lt;%{DATA:uuid}\u0026gt; \u0026lt;%{NUMBER:timestamp}\u0026gt; \u0026lt;%{DATA:misc}\u0026gt; \u0026lt;%{DATA:log_number}\u0026gt; \u0026lt;%{DATA:log_message}\u0026gt;\u0026quot; ] } if \u0026quot;_grokparsefailure\u0026quot; in [tags] { mutate { remove_tag =\u0026gt; [ \u0026quot;_grokparsefailure\u0026quot; ] } } } output { elasticsearch { hosts =\u0026gt; [\u0026quot;elasticsearch.default.svc.cluster.local:9200\u0026quot;] } } Deploy the logstash pod by executing the following command:\n$ kubectl create -f $WORKDIR/kubernetes/elasticsearch-and-kibana/logstash.yaml The output will look similar to the following:\ndeployment.apps/logstash-wls created Run the following command to check the logstash pod is created correctly:\n$ kubectl get pods -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl get pods -n oamns The output should look similar to the following:\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Running 0 18h accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 23h accessdomain-oam-policy-mgr1 1/1 Running 0 18h accessdomain-oam-policy-mgr2 1/1 Running 0 18h accessdomain-oam-server1 1/1 Running 1 18h accessdomain-oam-server2 1/1 Running 1 18h helper 1/1 Running 0 23h logstash-wls-6687c5bf6-jmmdp 1/1 Running 0 12s nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 20h Then run the following to get the Elasticsearch pod name:\n$ kubectl get pods The output should look similar to the following:\nNAME READY STATUS RESTARTS AGE elasticsearch-f7b7c4c4-tb4pp 1/1 Running 0 9m28s kibana-57f6685789-mgwdl 1/1 Running 0 9m28s Verify and access the Kibana console Check if the indices are created correctly in the elasticsearch pod:\n$ kubectl exec -it elasticsearch-f7b7c4c4-tb4pp -- /bin/bash This will take you into a bash shell in the elasticsearch pod:\n[root@elasticsearch-f7b7c4c4-tb4pp elasticsearch]# In the elasticsearch bash shell, run the following to check the indices:\n[root@elasticsearch-f7b7c4c4-tb4pp elasticsearch]# curl -i \u0026#34;127.0.0.1:9200/_cat/indices?v\u0026#34; The output will look similar to the following:\nHTTP/1.1 200 OK content-type: text/plain; charset=UTF-8 content-length: 696 health status index uuid pri rep docs.count docs.deleted store.size pri.store.size green open .kibana_task_manager -IPDdiajTSyIRjelI2QJIg 1 0 2 0 12.6kb 12.6kb green open .kibana_1 YI9CZAjsTsCCuAyBb1ho3A 1 0 2 0 7.6kb 7.6kb yellow open logstash-2021.11.01 4pDJSTGVR3-oOwTtHnnTkQ 5 1 148 0 173.9kb 173.9kb yellow open logstash-2021.11.02 raOvTDoOTuC49nq241h4wg 5 1 115834 0 31.7mb 31.7mb Exit the bash shell by typing exit.\n Find the Kibana port by running the following command:\n$ kubectl get svc | grep kibana The output will look similar to the following:\nNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kibana NodePort 10.104.248.203 \u0026lt;none\u0026gt; 5601:31394/TCP 11m In the example above the Kibana port is 31394.\n Access the Kibana console with http://${MASTERNODE-HOSTNAME}:${KIBANA-PORT}/app/kibana.\n Click Dashboard and in the Create index pattern page enter logstash*. Click Next Step.\n From the Time Filter field name drop down menu select @timestamp and click Create index pattern.\n Once the index pattern is created click on Discover in the navigation menu to view the logs.\n For more details on how to use the Kibana console see the Kibana Guide\n" +}, +{ + "uri": "/fmw-kubernetes/21.4.2/categories/", + "title": "Categories", + "tags": [], + "description": "", + "content": "" +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oam/manage-oam-domains/monitoring-oam-domains/", + "title": "d. Monitoring an OAM domain", + "tags": [], + "description": "Describes the steps for Monitoring the OAM domain.", + "content": "After the OAM domain is set up you can monitor the OAM instance using Prometheus and Grafana. See Monitoring a domain.\nThe WebLogic Monitoring Exporter uses the WLS RESTful Management API to scrape runtime information and then exports Prometheus-compatible metrics. It is deployed as a web application in a WebLogic Server (WLS) instance, version 12.2.1 or later, typically, in the instance from which you want to get metrics.\nThere are two ways to setup monitoring and you should choose one method or the other:\n Setup automatically using setup-monitoring.sh Setup using manual configuration Setup automatically using setup-monitoring.sh The $WORKDIR/kubernetes/monitoring-service/setup-monitoring.sh sets up the monitoring for the OAM domain. It installs Prometheus, Grafana, WebLogic Monitoring Exporter and deploys the web applications to the OAM domain. It also deploys the WebLogic Server Grafana dashboard.\nFor usage details execute ./setup-monitoring.sh -h.\n Edit the $WORKDIR/kubernetes/monitoring-service/monitoring-inputs.yaml and change the domainUID, domainNamespace, and weblogicCredentialsSecretName to correspond to your deployment. For example:\nversion: create-accessdomain-monitoring-inputs-v1 # Unique ID identifying your domain. # This ID must not contain an underscope (\u0026quot;_\u0026quot;), and must be lowercase and unique across all domains in a Kubernetes cluster. domainUID: accessdomain # Name of the domain namespace domainNamespace: oamns # Boolean value indicating whether to install kube-prometheus-stack setupKubePrometheusStack: true # Additional parameters for helm install kube-prometheus-stack # Refer https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/values.yaml for additional parameters # Sample : # additionalParamForKubePrometheusStack: --set nodeExporter.enabled=false --set prometheusOperator.tls.enabled=false --set prometheusOperator.admissionWebhooks.enabled=false additionalParamForKubePrometheusStack: # Name of the monitoring namespace monitoringNamespace: monitoring # Name of the Admin Server adminServerName: AdminServer # # Port number for admin server adminServerPort: 7001 # Cluster name oamClusterName: oam_cluster # Port number for managed server oamManagedServerPort: 14100 # WebLogic Monitoring Exporter to Cluster wlsMonitoringExporterTooamCluster: true # Cluster name policyClusterName: policy_cluster # Port number for managed server policyManagedServerPort: 15100 # WebLogic Monitoring Exporter to Cluster wlsMonitoringExporterTopolicyCluster: true # Boolean to indicate if the adminNodePort will be exposed exposeMonitoringNodePort: true # NodePort to expose Prometheus prometheusNodePort: 32101 # NodePort to expose Grafana grafanaNodePort: 32100 # NodePort to expose Alertmanager alertmanagerNodePort: 32102 # Name of the Kubernetes secret for the Admin Server's username and password weblogicCredentialsSecretName: accessdomain-credentials Run the following command to setup monitoring.\n$ cd $WORKDIR/kubernetes/monitoring-service $ ./setup-monitoring.sh -i monitoring-inputs.yaml The output should be similar to the following:\nMonitoring setup in monitoring in progress node/worker-node1 not labeled node/worker-node2 not labeled node/master-node not labeled Setup prometheus-community/kube-prometheus-stack started \u0026quot;prometheus-community\u0026quot; has been added to your repositories Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the \u0026quot;stable\u0026quot; chart repository ...Successfully got an update from the \u0026quot;prometheus\u0026quot; chart repository ...Successfully got an update from the \u0026quot;prometheus-community\u0026quot; chart repository ...Successfully got an update from the \u0026quot;appscode\u0026quot; chart repository Update Complete. ⎈ Happy Helming!⎈ Setup prometheus-community/kube-prometheus-stack in progress NAME: monitoring LAST DEPLOYED: Thu Nov 18 14:13:49 2021 NAMESPACE: monitoring STATUS: deployed REVISION: 1 NOTES: kube-prometheus-stack has been installed. Check its status by running: kubectl --namespace monitoring get pods -l \u0026quot;release=monitoring\u0026quot; Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create \u0026amp; configure Alertmanager and Prometheus instances using the Operator. Setup prometheus-community/kube-prometheus-stack completed Deploy WebLogic Monitoring Exporter started Deploying WebLogic Monitoring Exporter with domainNamespace[oamns], domainUID[accessdomain], adminServerPodName[accessdomain-adminserver] % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 655 100 655 0 0 1564 0 --:--:-- --:--:-- --:--:-- 1566 100 2196k 100 2196k 0 0 2025k 0 0:00:01 0:00:01 --:--:-- 5951k created $WORKDIR/kubernetes/monitoring-service/scripts/wls-exporter-deploy dir created /tmp/ci-EHhB7bP847 /tmp/ci-EHhB7bP847 $WORKDIR/kubernetes/monitoring-service in temp dir adding: WEB-INF/weblogic.xml (deflated 61%) adding: config.yml (deflated 60%) $WORKDIR/kubernetes/monitoring-service created /tmp/ci-e7wPrlLlud 14:26 /tmp/ci-e7wPrlLlud $WORKDIR/kubernetes/monitoring-service in temp dir adding: WEB-INF/weblogic.xml (deflated 61%) adding: config.yml (deflated 60%) $WORKDIR/kubernetes/monitoring-service created /tmp/ci-U38XXs6d06 /tmp/ci-U38XXs6d06 $WORKDIR/kubernetes/monitoring-service in temp dir adding: WEB-INF/weblogic.xml (deflated 61%) adding: config.yml (deflated 60%) $WORKDIR/kubernetes/monitoring-service Initializing WebLogic Scripting Tool (WLST) ... Welcome to WebLogic Server Administration Scripting Shell Type help() for help on available commands Connecting to t3://accessdomain-adminserver:7001 with userid weblogic ... Successfully connected to Admin Server \u0026quot;AdminServer\u0026quot; that belongs to domain \u0026quot;accessdomain\u0026quot;. Warning: An insecure protocol was used to connect to the server. To ensure on-the-wire security, the SSL port or Admin port should be used instead. Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war to targets AdminServer (upload=true) ... \u0026lt;Nov 18, 2021 2:14:31 PM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating deploy operation for application, wls-exporter-adminserver [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war], to AdminServer .\u0026gt; .Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-adminserver. \u0026lt;Nov 18, 2021 2:14:36 PM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating start operation for application, wls-exporter-adminserver [archive: null], to AdminServer .\u0026gt; Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start Deployment State : completed 14:27 Deployment command type: start Deployment State : completed Deployment Message : no message Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-oam.war to targets oam_cluster (upload=true) ... \u0026lt;Nov 18, 2021 2:14:37 PM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating deploy operation for application, wls-exporter-oam [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-oam.war], to oam_cluster .\u0026gt; .Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-oam. \u0026lt;Nov 18, 2021 2:14:41 PM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating start operation for application, wls-exporter-oam [archive: null], to oam_cluster .\u0026gt; .Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start Deployment State : completed Deployment Message : no message Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-policy.war to targets policy_cluster (upload=true) ... \u0026lt;Nov 18, 2021 2:14:44 PM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating deploy operation for application, wls-exporter-policy [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-policy.war], to policy_cluster .\u0026gt; .Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-policy. \u0026lt;Nov 18, 2021 2:14:49 PM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating start operation for application, wls-exporter-policy [archive: null], to policy_cluster .\u0026gt; .Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start Deployment State : completed Deployment Message : no message Disconnected from weblogic server: AdminServer Exiting WebLogic Scripting Tool. \u0026lt;Nov 18, 2021 2:14:52 PM GMT\u0026gt; \u0026lt;Warning\u0026gt; \u0026lt;JNDI\u0026gt; \u0026lt;BEA-050001\u0026gt; \u0026lt;WLContext.close() was called in a different thread than the one in which it was created.\u0026gt; 14:27 Deploy WebLogic Monitoring Exporter completed secret/basic-auth created servicemonitor.monitoring.coreos.com/wls-exporter created Deploying WebLogic Server Grafana Dashboard.... {\u0026quot;id\u0026quot;:25,\u0026quot;slug\u0026quot;:\u0026quot;weblogic-server-dashboard\u0026quot;,\u0026quot;status\u0026quot;:\u0026quot;success\u0026quot;,\u0026quot;uid\u0026quot;:\u0026quot;5yUwzbZWz\u0026quot;,\u0026quot;url\u0026quot;:\u0026quot;/d/5yUwzbZWz/weblogic-server-dashboard\u0026quot;,\u0026quot;version\u0026quot;:1} Deployed WebLogic Server Grafana Dashboard successfully Grafana is available at NodePort: 32100 Prometheus is available at NodePort: 32101 Altermanager is available at NodePort: 32102 ============================================================== Prometheus service discovery After the ServiceMonitor is deployed, the wls-exporter should be discovered by Prometheus and be able to collect metrics.\n Access the following URL to view Prometheus service discovery: http://${MASTERNODE-HOSTNAME}:32101/service-discovery\n Click on serviceMonitor/oamns/wls-exporter/0 and then show more. Verify all the targets are mentioned.\n Note : It may take several minutes for serviceMonitor/oamns/wls-exporter/0 to appear, so refresh the page until it does.\nGrafana dashboard Access the Grafana dashboard with the following URL: http://${MASTERNODE-HOSTNAME}:32100 and login with admin/admin. Change your password when prompted.\n In the Dashboards panel, click on WebLogic Server Dashboard. The dashboard for your OAM domain should be displayed. If it is not displayed, click the Search icon in the left hand menu and search for WebLogic Server Dashboard.\n Cleanup To uninstall the Prometheus, Grafana, WebLogic Monitoring Exporter and the deployments, you can run the $WORKDIR/monitoring-service/kubernetes/delete-monitoring.sh script. For usage details execute ./delete-monitoring.sh -h\n To uninstall run the following command:\nFor example:\n$ cd $WORKDIR/kubernetes/monitoring-service $ ./delete-monitoring.sh -i monitoring-inputs.yaml Setup using manual configuration Install Prometheus, Grafana and WebLogic Monitoring Exporter manually. Create the web applications and deploy to the OAM domain.\nDeploy the Prometheus operator Kube-Prometheus requires all nodes to be labelled with kubernetes.io/os=linux. To check if your nodes are labelled, run the following:\n$ kubectl get nodes --show-labels If the nodes are labelled the output will look similar to the following:\nNAME STATUS ROLES AGE VERSION LABELS worker-node1 Ready \u0026lt;none\u0026gt; 42d v1.20.10 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker-node1,kubernetes.io/os=linux worker-node2 Ready \u0026lt;none\u0026gt; 42d v1.20.10 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker-node2,kubernetes.io/os=linux master-node Ready master 42d v1.20.10 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=master-node,kubernetes.io/os=linux,node-role.kubernetes.io/master= If the nodes are not labelled, run the following command:\n$ kubectl label nodes --all kubernetes.io/os=linux Clone Prometheus by running the following commands:\n$ cd $WORKDIR/kubernetes/monitoring-service $ git clone https://github.com/coreos/kube-prometheus.git -b v0.7.0 Note: Please refer the compatibility matrix of Kube Prometheus. Please download the release of the repository according to the Kubernetes version of your cluster.\n Run the following command to create the namespace and custom resource definitions:\n$ cd kube-prometheus $ kubectl create -f manifests/setup The output will look similar to the following:\nnamespace/monitoring created customresourcedefinition.apiextensions.k8s.io/alertmanagerconfigs.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/alertmanagers.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/podmonitors.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/probes.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/prometheuses.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/prometheusrules.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/servicemonitors.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/thanosrulers.monitoring.coreos.com created clusterrole.rbac.authorization.k8s.io/prometheus-operator created clusterrolebinding.rbac.authorization.k8s.io/prometheus-operator created deployment.apps/prometheus-operator created service/prometheus-operator created serviceaccount/prometheus-operator created Run the following command to created the rest of the resources:\n$ kubectl create -f manifests/ The output will look similar to the following:\nalertmanager.monitoring.coreos.com/main created prometheusrule.monitoring.coreos.com/alertmanager-main-rules created secret/alertmanager-main created service/alertmanager-main created serviceaccount/alertmanager-main created servicemonitor.monitoring.coreos.com/alertmanager-main created clusterrole.rbac.authorization.k8s.io/blackbox-exporter created clusterrolebinding.rbac.authorization.k8s.io/blackbox-exporter created configmap/blackbox-exporter-configuration created deployment.apps/blackbox-exporter created service/blackbox-exporter created serviceaccount/blackbox-exporter created servicemonitor.monitoring.coreos.com/blackbox-exporter created secret/grafana-config created secret/grafana-datasources created configmap/grafana-dashboard-alertmanager-overview created configmap/grafana-dashboard-apiserver created configmap/grafana-dashboard-cluster-total created configmap/grafana-dashboard-controller-manager created configmap/grafana-dashboard-k8s-resources-cluster created configmap/grafana-dashboard-k8s-resources-namespace created configmap/grafana-dashboard-k8s-resources-node created configmap/grafana-dashboard-k8s-resources-pod created configmap/grafana-dashboard-k8s-resources-workload created configmap/grafana-dashboard-k8s-resources-workloads-namespace created configmap/grafana-dashboard-kubelet created configmap/grafana-dashboard-namespace-by-pod created configmap/grafana-dashboard-namespace-by-workload created configmap/grafana-dashboard-node-cluster-rsrc-use created configmap/grafana-dashboard-node-rsrc-use created configmap/grafana-dashboard-nodes created configmap/grafana-dashboard-persistentvolumesusage created configmap/grafana-dashboard-pod-total created configmap/grafana-dashboard-prometheus-remote-write created configmap/grafana-dashboard-prometheus created configmap/grafana-dashboard-proxy created configmap/grafana-dashboard-scheduler created configmap/grafana-dashboard-workload-total created configmap/grafana-dashboards created deployment.apps/grafana created service/grafana created serviceaccount/grafana created servicemonitor.monitoring.coreos.com/grafana created prometheusrule.monitoring.coreos.com/kube-prometheus-rules created clusterrole.rbac.authorization.k8s.io/kube-state-metrics created clusterrolebinding.rbac.authorization.k8s.io/kube-state-metrics created deployment.apps/kube-state-metrics created prometheusrule.monitoring.coreos.com/kube-state-metrics-rules created service/kube-state-metrics created serviceaccount/kube-state-metrics created servicemonitor.monitoring.coreos.com/kube-state-metrics created prometheusrule.monitoring.coreos.com/kubernetes-monitoring-rules created servicemonitor.monitoring.coreos.com/kube-apiserver created servicemonitor.monitoring.coreos.com/coredns created servicemonitor.monitoring.coreos.com/kube-controller-manager created servicemonitor.monitoring.coreos.com/kube-scheduler created servicemonitor.monitoring.coreos.com/kubelet created clusterrole.rbac.authorization.k8s.io/node-exporter created clusterrolebinding.rbac.authorization.k8s.io/node-exporter created daemonset.apps/node-exporter created prometheusrule.monitoring.coreos.com/node-exporter-rules created service/node-exporter created serviceaccount/node-exporter created servicemonitor.monitoring.coreos.com/node-exporter created apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created clusterrole.rbac.authorization.k8s.io/prometheus-adapter created clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created clusterrolebinding.rbac.authorization.k8s.io/prometheus-adapter created clusterrolebinding.rbac.authorization.k8s.io/resource-metrics:system:auth-delegator created clusterrole.rbac.authorization.k8s.io/resource-metrics-server-resources created configmap/adapter-config created deployment.apps/prometheus-adapter created rolebinding.rbac.authorization.k8s.io/resource-metrics-auth-reader created service/prometheus-adapter created serviceaccount/prometheus-adapter created servicemonitor.monitoring.coreos.com/prometheus-adapter created clusterrole.rbac.authorization.k8s.io/prometheus-k8s created clusterrolebinding.rbac.authorization.k8s.io/prometheus-k8s created prometheusrule.monitoring.coreos.com/prometheus-operator-rules created servicemonitor.monitoring.coreos.com/prometheus-operator created prometheus.monitoring.coreos.com/k8s created prometheusrule.monitoring.coreos.com/prometheus-k8s-prometheus-rules created rolebinding.rbac.authorization.k8s.io/prometheus-k8s-config created rolebinding.rbac.authorization.k8s.io/prometheus-k8s created rolebinding.rbac.authorization.k8s.io/prometheus-k8s created rolebinding.rbac.authorization.k8s.io/prometheus-k8s created role.rbac.authorization.k8s.io/prometheus-k8s-config created role.rbac.authorization.k8s.io/prometheus-k8s created role.rbac.authorization.k8s.io/prometheus-k8s created role.rbac.authorization.k8s.io/prometheus-k8s created service/prometheus-k8s created serviceaccount/prometheus-k8s created servicemonitor.monitoring.coreos.com/prometheus-k8s created unable to recognize \u0026quot;manifests/alertmanager-podDisruptionBudget.yaml\u0026quot;: no matches for kind \u0026quot;PodDisruptionBudget\u0026quot; in version \u0026quot;policy/v1\u0026quot; unable to recognize \u0026quot;manifests/prometheus-adapter-podDisruptionBudget.yaml\u0026quot;: no matches for kind \u0026quot;PodDisruptionBudget\u0026quot; in version \u0026quot;policy/v1\u0026quot; unable to recognize \u0026quot;manifests/prometheus-podDisruptionBudget.yaml\u0026quot;: no matches for kind \u0026quot;PodDisruptionBudget\u0026quot; in version \u0026quot;policy/v1\u0026quot; Provide external access for Grafana, Prometheus, and Alertmanager, by running the following commands:\n$ kubectl patch svc grafana -n monitoring --type=json -p \u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/type\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;NodePort\u0026#34; },{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/ports/0/nodePort\u0026#34;, \u0026#34;value\u0026#34;: 32100 }]\u0026#39; $ kubectl patch svc prometheus-k8s -n monitoring --type=json -p \u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/type\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;NodePort\u0026#34; },{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/ports/0/nodePort\u0026#34;, \u0026#34;value\u0026#34;: 32101 }]\u0026#39; $ kubectl patch svc alertmanager-main -n monitoring --type=json -p \u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/type\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;NodePort\u0026#34; },{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/ports/0/nodePort\u0026#34;, \u0026#34;value\u0026#34;: 32102 }]\u0026#39; Note: This assigns port 32100 to Grafana, 32101 to Prometheus, and 32102 to Alertmanager.\nThe output will look similar to the following:\nservice/grafana patched service/prometheus-k8s patched service/alertmanager-main patched Verify that the Prometheus, Grafana, and Alertmanager pods are running in the monitoring namespace and the respective services have the exports configured correctly:\n$ kubectl get pods,services -o wide -n monitoring The output should look similar to the following:\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/alertmanager-main-0 2/2 Running 0 67s 10.244.1.7 worker-node1 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/alertmanager-main-1 2/2 Running 0 67s 10.244.2.26 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/alertmanager-main-2 2/2 Running 0 67s 10.244.1.8 worker-node1 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/grafana-f8cd57fcf-tmlqt 1/1 Running 0 65s 10.244.2.28 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/kube-state-metrics-587bfd4f97-l8knh 3/3 Running 0 65s 10.244.1.9 worker-node1 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/node-exporter-2ztpd 2/2 Running 0 65s 10.247.95.26 worker-node1 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/node-exporter-92sxb 2/2 Running 0 65s 10.250.40.59 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/node-exporter-d77tl 2/2 Running 0 65s 10.196.54.36 master-node \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/prometheus-adapter-69b8496df6-6gqrz 1/1 Running 0 65s 10.244.2.29 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/prometheus-k8s-0 2/2 Running 1 66s 10.244.2.27 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/prometheus-k8s-1 2/2 Running 1 66s 10.244.1.10 worker-node1 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/prometheus-operator-7649c7454f-9p747 2/2 Running 0 2m 10.244.2.25 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR service/alertmanager-main NodePort 10.104.92.62 \u0026lt;none\u0026gt; 9093:32102/TCP 67s alertmanager=main,app=alertmanager service/alertmanager-operated ClusterIP None \u0026lt;none\u0026gt; 9093/TCP,9094/TCP,9094/UDP 67s app=alertmanager service/grafana NodePort 10.100.171.3 \u0026lt;none\u0026gt; 3000:32100/TCP 66s app=grafana service/kube-state-metrics ClusterIP None \u0026lt;none\u0026gt; 8443/TCP,9443/TCP 66s app.kubernetes.io/name=kube-state-metrics service/node-exporter ClusterIP None \u0026lt;none\u0026gt; 9100/TCP 66s app.kubernetes.io/name=node-exporter service/prometheus-adapter ClusterIP 10.109.248.92 \u0026lt;none\u0026gt; 443/TCP 66s name=prometheus-adapter service/prometheus-k8s NodePort 10.98.212.247 \u0026lt;none\u0026gt; 9090:32101/TCP 66s app=prometheus,prometheus=k8s service/prometheus-operated ClusterIP None \u0026lt;none\u0026gt; 9090/TCP 66s app=prometheus service/prometheus-operator ClusterIP None \u0026lt;none\u0026gt; 8443/TCP 2m1s app.kubernetes.io/component=controller,app.kubernetes.io/name=prometheus-operator Deploy WebLogic Monitoring Exporter Generate the WebLogic Monitoring Exporter deployment package. The wls-exporter.war package need to be updated and created for each listening port (Administration Server and Managed Servers) in the domain. Set the below environment values and run the script get-wls-exporter.sh to generate the required WAR files at ${WORKDIR}/kubernetes/monitoring-service/scripts/wls-exporter-deploy:\n$ cd $WORKDIR/kubernetes/monitoring-service/scripts $ export adminServerPort=7001 $ export wlsMonitoringExporterTopolicyCluster=true $ export policyManagedServerPort=15100 $ export wlsMonitoringExporterTooamCluster=true $ export oamManagedServerPort=14100 $ sh get-wls-exporter.sh The output will look similar to the following:\n % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 655 100 655 0 0 1107 0 --:--:-- --:--:-- --:--:-- 1108 100 2196k 100 2196k 0 0 1787k 0 0:00:01 0:00:01 --:--:-- 9248k created $WORKDIR/kubernetes/monitoring-service/scripts/wls-exporter-deploy dir domainNamespace is empty, setting to default oamns domainUID is empty, setting to default accessdomain weblogicCredentialsSecretName is empty, setting to default \u0026quot;accessdomain-domain-credentials\u0026quot; adminServerName is empty, setting to default \u0026quot;AdminServer\u0026quot; oamClusterName is empty, setting to default \u0026quot;oam_cluster\u0026quot; policyClusterName is empty, setting to default \u0026quot;policy_cluster\u0026quot; created /tmp/ci-Bu74rCBxwu /tmp/ci-Bu74rCBxwu $WORKDIR/kubernetes/monitoring-service/scripts in temp dir adding: WEB-INF/weblogic.xml (deflated 61%) adding: config.yml (deflated 60%) $WORKDIR/kubernetes/monitoring-service/scripts created /tmp/ci-RQv3rLbLsX /tmp/ci-RQv3rLbLsX $WORKDIR/kubernetes/monitoring-service/scripts in temp dir adding: WEB-INF/weblogic.xml (deflated 61%) adding: config.yml (deflated 60%) $WORKDIR/kubernetes/monitoring-service/scripts created /tmp/ci-DWIYlocP5e /tmp/ci-DWIYlocP5e $WORKDIR/kubernetes/monitoring-service/scripts in temp dir adding: WEB-INF/weblogic.xml (deflated 61%) adding: config.yml (deflated 60%) $WORKDIR/kubernetes/monitoring-service/scripts Deploy the WebLogic Monitoring Exporter WAR files into the Oracle Access Management domain:\n$ cd $WORKDIR/kubernetes/monitoring-service/scripts $ kubectl cp wls-exporter-deploy \u0026lt;domain_namespace\u0026gt;/\u0026lt;domain_uid\u0026gt;-adminserver:/u01/oracle $ kubectl cp deploy-weblogic-monitoring-exporter.py \u0026lt;domain_namespace\u0026gt;/\u0026lt;domain_uid\u0026gt;-adminserver:/u01/oracle/wls-exporter-deploy $ kubectl exec -it -n \u0026lt;domain_namespace\u0026gt; \u0026lt;domain_uid\u0026gt;-adminserver -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/deploy-weblogic-monitoring-exporter.py -domainName \u0026lt;domain_uid\u0026gt; -adminServerName AdminServer -adminURL \u0026lt;domain_uid\u0026gt;-adminserver:7001 -username weblogic -password \u0026lt;password\u0026gt; -oamClusterName oam_cluster -wlsMonitoringExporterTooamCluster true -policyClusterName policy_cluster -wlsMonitoringExporterTopolicyCluster true For example:\n$ cd $WORKDIR/kubernetes/monitoring-service/scripts $ kubectl cp wls-exporter-deploy oamns/accessdomain-adminserver:/u01/oracle $ kubectl cp deploy-weblogic-monitoring-exporter.py oamns/accessdomain-adminserver:/u01/oracle/wls-exporter-deploy $ kubectl exec -it -n oamns accessdomain-adminserver -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/deploy-weblogic-monitoring-exporter.py -domainName accessdomain -adminServerName AdminServer -adminURL accessdomain-adminserver:7001 -username weblogic -password \u0026lt;password\u0026gt; -oamClusterName oam_cluster -wlsMonitoringExporterTooamCluster true -policyClusterName policy_cluster -wlsMonitoringExporterTopolicyCluster true The output will look similar to the following:\nInitializing WebLogic Scripting Tool (WLST) ... Welcome to WebLogic Server Administration Scripting Shell Type help() for help on available commands Connecting to t3://accessdomain-adminserver:7001 with userid weblogic ... Successfully connected to Admin Server \u0026quot;AdminServer\u0026quot; that belongs to domain \u0026quot;accessdomain\u0026quot;. Warning: An insecure protocol was used to connect to the server. To ensure on-the-wire security, the SSL port or Admin port should be used instead. Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war to targets AdminServer (upload=true) ... \u0026lt;Nov 10, 2021 3:38:15 PM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating deploy operation for application, wls-exporter-adminserver [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war], to AdminServer .\u0026gt; ..Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-adminserver. \u0026lt;Nov 10, 2021 3:38:25 PM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating start operation for application, wls-exporter-adminserver [archive: null], to AdminServer .\u0026gt; .Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start Deployment State : completed Deployment Message : no message Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-oam.war to targets oam_cluster (upload=true) ... \u0026lt;Nov 10, 2021 3:38:28 PM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating deploy operation for application, wls-exporter-oam [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-oam.war], to oam_cluster .\u0026gt; .Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-oam. \u0026lt;Nov 10, 2021 3:38:34 PM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating start operation for application, wls-exporter-oam [archive: null], to oam_cluster .\u0026gt; .Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start Deployment State : completed Deployment Message : no message Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-policy.war to targets policy_cluster (upload=true) ... \u0026lt;Nov 10, 2021 3:38:38 PM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating deploy operation for application, wls-exporter-policy [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-policy.war], to policy_cluster .\u0026gt; .Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-policy. \u0026lt;Nov 10, 2021 3:38:44 PM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating start operation for application, wls-exporter-policy [archive: null], to policy_cluster .\u0026gt; .Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start Deployment State : completed Deployment Message : no message Disconnected from weblogic server: AdminServer Exiting WebLogic Scripting Tool. \u0026lt;Nov 10, 2021 3:38:47 PM GMT\u0026gt; \u0026lt;Warning\u0026gt; \u0026lt;JNDI\u0026gt; \u0026lt;BEA-050001\u0026gt; \u0026lt;WLContext.close() was called in a different thread than the one in which it was created.\u0026gt; Configure Prometheus Operator Prometheus enables you to collect metrics from the WebLogic Monitoring Exporter. The Prometheus Operator identifies the targets using service discovery. To get the WebLogic Monitoring Exporter end point discovered as a target, you must create a service monitor pointing to the service.\nThe exporting of metrics from wls-exporter requires basicAuth, so a Kubernetes Secret is created with the user name and password that are base64 encoded. This Secret is used in the ServiceMonitor deployment. The wls-exporter-ServiceMonitor.yaml has basicAuth with credentials as username: weblogic and password: \u0026lt;password\u0026gt; in base64 encoded.\n Run the following command to get the base64 encoded version of the weblogic password:\n$ echo -n \u0026#34;\u0026lt;password\u0026gt;\u0026#34; | base64 The output will look similar to the following:\nV2VsY29tZTE= Update the $WORKDIR/kubernetes/monitoring-service/manifests/wls-exporter-ServiceMonitor.yaml and change the password: value to the value returned above. Also change the namespace: and weblogic.domainName: values to match your OAM namespace and domain name:\napiVersion: v1 kind: Secret metadata: name: basic-auth namespace: oamns data: password: V2VsY29tZTE= user: d2VibG9naWM= type: Opaque --- apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: name: wls-exporter namespace: oamns labels: k8s-app: wls-exporter release: monitoring spec: namespaceSelector: matchNames: - oamns selector: matchLabels: weblogic.domainName: accessdomain endpoints: - basicAuth: password: name: basic-auth key: password username: name: basic-auth key: user port: default relabelings: - action: labelmap regex: __meta_kubernetes_service_label_(.+) interval: 10s honorLabels: true path: /wls-exporter/metrics Update the $WORKDIR/kubernetes/monitoring-service/manifests/prometheus-roleSpecific-domain-namespace.yaml and change the namespace to match your OAM namespace. For example:\napiVersion: rbac.authorization.k8s.io/v1 items: - apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: prometheus-k8s namespace: oamns rules: - apiGroups: - \u0026quot;\u0026quot; resources: - services - endpoints - pods verbs: - get - list - watch kind: RoleList Update the $WORKDIR/kubernetes/monitoring-service/manifests/prometheus-roleBinding-domain-namespace.yaml and change the namespace` to match your OAM namespace. For example:\napiVersion: rbac.authorization.k8s.io/v1 items: - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: prometheus-k8s namespace: oamns roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: prometheus-k8s subjects: - kind: ServiceAccount name: prometheus-k8s namespace: monitoring kind: RoleBindingList Run the following command to enable Prometheus:\n$ kubectl apply -f . The output will look similar to the following:\nrolebinding.rbac.authorization.k8s.io/prometheus-k8s created role.rbac.authorization.k8s.io/prometheus-k8s created secret/basic-auth created servicemonitor.monitoring.coreos.com/wls-exporter created Prometheus Service Discovery After the ServiceMonitor is deployed, the wls-exporter should be discovered by Prometheus and be able to collect metrics.\n Access the following URL to view Prometheus service discovery: http://${MASTERNODE-HOSTNAME}:32101/service-discovery\n Click on oamns/wls-exporter/0 and then show more. Verify all the targets are mentioned.\n Grafana Dashboard Access the Grafana dashboard with the following URL: http://${MASTERNODE-HOSTNAME}:32100 and login with admin/admin. Change your password when prompted.\n Import the Grafana dashboard by navigating on the left hand menu to Create \u0026gt; Import. Copy the content from $WORKDIR/kubernetes/monitoring-service/config/weblogic-server-dashboard-import.json and paste. Then click Load and Import. The dashboard should be displayed in the Dashboards panel.\n Cleanup To clean up a manual installation:\n Run the following commands:\n$ cd $WORKDIR/kubernetes/monitoring-service/manifests/ $ kubectl delete -f . Delete the deployments:\n$ cd $WORKDIR/kubernetes/monitoring-service/scripts/ $ kubectl cp undeploy-weblogic-monitoring-exporter.py \u0026lt;domain_namespace\u0026gt;/\u0026lt;domain_uid\u0026gt;-adminserver:/u01/oracle/wls-exporter-deploy $ kubectl exec -it -n \u0026lt;domain_namespace\u0026gt; \u0026lt;domain_uid\u0026gt;-adminserver -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/undeploy-weblogic-monitoring-exporter.py -domainName \u0026lt;domain_uid\u0026gt; -adminServerName AdminServer -adminURL \u0026lt;domain_uid\u0026gt;-adminserver:7001 -username weblogic -password \u0026lt;password\u0026gt; -oamClusterName oam_cluster -wlsMonitoringExporterTooamCluster true -policyClusterName policy_cluster -wlsMonitoringExporterTopolicyCluster true Delete Prometheus:\n$ cd $WORKDIR/kubernetes/monitoring-service/kube-prometheus $ kubectl delete -f manifests $ kubectl delete -f manifests/setup " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/oam/manage-oam-domains/delete-domain-home/", + "title": "e. Delete the OAM domain home", + "tags": [], + "description": "Learn about the steps to cleanup the OAM domain home.", + "content": "Sometimes in production, but most likely in testing environments, you might want to remove the domain home that is generated using the create-domain.sh script.\n Run the following command to delete the domain:\n$ cd $WORKDIR/kubernetes/delete-domain $ ./delete-weblogic-domain-resources.sh -d \u0026lt;domain_uid\u0026gt; For example:\n$ cd $WORKDIR/kubernetes/delete-domain $ ./delete-weblogic-domain-resources.sh -d accessdomain Drop the RCU schemas as follows:\n$ kubectl exec -it helper -n \u0026lt;domain_namespace\u0026gt; -- /bin/bash [oracle@helper ~]$ [oracle@helper ~]$ export CONNECTION_STRING=\u0026lt;db_host.domain\u0026gt;:\u0026lt;db_port\u0026gt;/\u0026lt;service_name\u0026gt; [oracle@helper ~]$ export RCUPREFIX=\u0026lt;rcu_schema_prefix\u0026gt; /u01/oracle/oracle_common/bin/rcu -silent -dropRepository -databaseType ORACLE -connectString $CONNECTION_STRING \\ -dbUser sys -dbRole sysdba -selectDependentsForComponents true -schemaPrefix $RCUPREFIX \\ -component MDS -component IAU -component IAU_APPEND -component IAU_VIEWER -component OPSS \\ -component WLS -component STB -component OAM -f \u0026lt; /tmp/pwd.txt For example:\n$ kubectl exec -it helper -n oamns -- /bin/bash [oracle@helper ~]$ export CONNECTION_STRING=mydatabasehost.example.com:1521/orcl.example.com [oracle@helper ~]$ export RCUPREFIX=OAMK8S /u01/oracle/oracle_common/bin/rcu -silent -dropRepository -databaseType ORACLE -connectString $CONNECTION_STRING \\ -dbUser sys -dbRole sysdba -selectDependentsForComponents true -schemaPrefix $RCUPREFIX \\ -component MDS -component IAU -component IAU_APPEND -component IAU_VIEWER -component OPSS \\ -component WLS -component STB -component OAM -f \u0026lt; /tmp/pwd.txt Delete the contents of the persistent volume, for example:\n$ rm -rf \u0026lt;workdir\u0026gt;/accessdomainpv/* For example:\n$ rm -rf /scratch/OAMK8S/accessdomainpv/* Delete the WebLogic Kubernetes Operator, by running the following command:\n$ helm delete weblogic-kubernetes-operator -n opns Delete the label from the OAM namespace:\n$ kubectl label namespaces \u0026lt;domain_namespace\u0026gt; weblogic-operator- For example:\n$ kubectl label namespaces oamns weblogic-operator- Delete the service account for the operator:\n$ kubectl delete serviceaccount \u0026lt;sample-kubernetes-operator-sa\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl delete serviceaccount op-sa -n opns Delete the operator namespace:\n$ kubectl delete namespace \u0026lt;sample-kubernetes-operator-ns\u0026gt; For example:\n$ kubectl delete namespace opns To delete NGINX:\n$ helm delete oam-nginx -n \u0026lt;domain_namespace\u0026gt; For example:\n$ helm delete oam-nginx -n oamns Then run:\n$ helm delete nginx-ingress -n \u0026lt;domain_namespace\u0026gt; For example:\n$ helm delete nginx-ingress -n oamns Delete the OAM namespace:\n$ kubectl delete namespace \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl delete namespace oamns " +}, +{ + "uri": "/fmw-kubernetes/21.4.2/tags/", + "title": "Tags", + "tags": [], + "description": "", + "content": "" +}] \ No newline at end of file diff --git a/docs/21.4.2/index.xml b/docs/21.4.2/index.xml new file mode 100644 index 000000000..2c03014bd --- /dev/null +++ b/docs/21.4.2/index.xml @@ -0,0 +1,597 @@ + + + + Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/21.4.2/ + Recent content on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + Thu, 18 Apr 2019 06:46:23 -0500 + + + + + + Release Notes + /fmw-kubernetes/21.4.2/oam/release-notes/ + Fri, 15 Mar 2019 11:25:28 -0400 + + /fmw-kubernetes/21.4.2/oam/release-notes/ + Review the latest changes and known issues for Oracle Access Management on Kubernetes. +Recent changes Date Version Change November, 2021 21.4.2 Supports Oracle Access Management domain deployment using WebLogic Kubernetes Operator 3.3.0. Voyager ingress removed as no longer supported. October 2021 21.4.1 A) References to supported Kubernetes, Helm and Docker versions removed and replaced with Support note reference. B) Namespace and domain names changed to be consistent with Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster. + + + + Release Notes + /fmw-kubernetes/21.4.2/oid/release-notes/ + Fri, 15 Mar 2019 11:25:28 -0400 + + /fmw-kubernetes/21.4.2/oid/release-notes/ + Review the latest changes and known issues for Oracle Internet Directory on Kubernetes. +Recent changes Date Version Change October, 2021 21.4.1 Initial release of Oracle Identity Directory on Kubernetes. + + + + Release Notes + /fmw-kubernetes/21.4.2/oig/release-notes/ + Fri, 15 Mar 2019 11:25:28 -0400 + + /fmw-kubernetes/21.4.2/oig/release-notes/ + Review the latest changes and known issues for Oracle Identity Governance on Kubernetes. +Recent changes Date Version Change November, 2021 21.4.2 Supports Oracle Identity Governance domain deployment using WebLogic Kubernetes Operator 3.3.0. Voyager ingress removed as no longer supported. October 2021 21.4.1 A) References to supported Kubernetes, Helm and Docker versions removed and replaced with Support note reference. B) Namespace and domain names changed to be consistent with Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster. + + + + Release Notes + /fmw-kubernetes/21.4.2/oud/release-notes/ + Fri, 15 Mar 2019 11:25:28 -0400 + + /fmw-kubernetes/21.4.2/oud/release-notes/ + Review the latest changes and known issues for Oracle Unified Directory on Kubernetes. +Recent changes Date Version Change November 2021 21.4.2 Voyager ingress removed as no longer supported. October 2021 21.4.1 A) References to supported Kubernetes, Helm and Docker versions removed and replaced with Support note reference. B) Namespace and domain names changed to be consistent with Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster. + + + + Release Notes + /fmw-kubernetes/21.4.2/oudsm/release-notes/ + Fri, 15 Mar 2019 11:25:28 -0400 + + /fmw-kubernetes/21.4.2/oudsm/release-notes/ + Review the latest changes and known issues for Oracle Unified Directory Services Manager on Kubernetes. +Recent changes Date Version Change November 2021 21.4.2 Voyager ingress removed as no longer supported. October 2021 21.4.1 A) References to supported Kubernetes, Helm and Docker versions removed and replaced with Support note reference. B) Namespace and domain names changed to be consistent with Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster. + + + + Release Notes + /fmw-kubernetes/21.4.2/soa-domains/release-notes/ + Fri, 15 Mar 2019 11:25:28 -0400 + + /fmw-kubernetes/21.4.2/soa-domains/release-notes/ + Review the latest changes and known issues for Oracle SOA Suite on Kubernetes. +Recent changes Date Version Change November 30, 2021 21.4.2 Supports Oracle SOA Suite 12.2.1.4 domains deployment using October 2021 PSU and known bug fixes. Oracle SOA Suite 12.2.1.4 Docker image for this release can be downloaded from My Oracle Support (MOS patch 33467899). August 6, 2021 21.3.2 Supports Oracle SOA Suite 12. + + + + Deploy using JDeveloper + /fmw-kubernetes/21.4.2/soa-domains/adminguide/deploying-composites/supportjdev/ + Fri, 22 Feb 2019 15:44:42 -0500 + + /fmw-kubernetes/21.4.2/soa-domains/adminguide/deploying-composites/supportjdev/ + Learn how to deploy Oracle SOA Suite and Oracle Service Bus composite applications from Oracle JDeveloper (running outside the Kubernetes network) to an Oracle SOA Suite instance in the WebLogic Kubernetes Operator environment. +Use JDeveloper for development and test environments only. For a production environment, you should deploy using Application Control and WLST methods. + Deploy Oracle SOA Suite and Oracle Service Bus composite applications to Oracle SOA Suite from JDeveloper To deploy Oracle SOA Suite and Oracle Service Bus composite applications from Oracle JDeveloper, the Administration Server must be configured to expose a T3 channel. + + + + Domain resource sizing + /fmw-kubernetes/21.4.2/soa-domains/appendix/soa-cluster-sizing-info/ + Fri, 22 Feb 2019 15:44:42 -0500 + + /fmw-kubernetes/21.4.2/soa-domains/appendix/soa-cluster-sizing-info/ + Oracle SOA cluster sizing recommendations Oracle SOA Normal Usage Moderate Usage High Usage Administration Server No of CPU core(s) : 1, Memory : 4GB No of CPU core(s) : 1, Memory : 4GB No of CPU core(s) : 1, Memory : 4GB Number of Managed Servers 2 2 4 Configurations per Managed Server No of CPU core(s) : 2, Memory : 16GB No of CPU core(s) : 4, Memory : 16GB No of CPU core(s) : 6, Memory : 16-32GB PV Storage Minimum 250GB Minimum 250GB Minimum 500GB + + + + Patch an image + /fmw-kubernetes/21.4.2/soa-domains/patch_and_upgrade/patch-an-image/ + Fri, 22 Feb 2019 15:44:42 -0500 + + /fmw-kubernetes/21.4.2/soa-domains/patch_and_upgrade/patch-an-image/ + Oracle releases Oracle SOA Suite images regularly with the latest bundle and recommended interim patches in My Oracle Support (MOS). However, if you need to create images with new bundle and interim patches, you can build these images using the WebLogic Image Tool. +If you have access to the Oracle SOA Suite patches, you can patch an existing Oracle SOA Suite image with a bundle patch and interim patches. Oracle recommends that you use the WebLogic Image Tool to patch the Oracle SOA Suite image. + + + + Traefik + /fmw-kubernetes/21.4.2/soa-domains/adminguide/configure-load-balancer/traefik/ + Fri, 22 Feb 2019 15:44:42 -0500 + + /fmw-kubernetes/21.4.2/soa-domains/adminguide/configure-load-balancer/traefik/ + This section provides information about how to install and configure the ingress-based Traefik load balancer (version 2.2.1 or later for production deployments) to load balance Oracle SOA Suite domain clusters. You can configure Traefik for non-SSL, SSL termination, and end-to-end SSL access of the application URL. +Follow these steps to set up Traefik as a load balancer for an Oracle SOA Suite domain in a Kubernetes cluster: + Install the Traefik (ingress-based) load balancer Create an Ingress for the domain Verify domain application URL access Uninstall the Traefik ingress Uninstall Traefik Install the Traefik (ingress-based) load balancer Use Helm to install the Traefik (ingress-based) load balancer. + + + + a. Using Design Console with NGINX(non-SSL) + /fmw-kubernetes/21.4.2/oig/configure-design-console/using-the-design-console-with-nginx-non-ssl/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/21.4.2/oig/configure-design-console/using-the-design-console-with-nginx-non-ssl/ + Configure an NGINX ingress (non-SSL) to allow Design Console to connect to your Kubernetes cluster. + Prerequisites + Setup routing rules for the Design Console ingress + Create the ingress + Update the T3 channel + Restart the OIG domain + Design Console client +a. Using an on-premises installed Design Console +b. Using a container image for Design Console + Login to the Design Console + + + + Domain life cycle + /fmw-kubernetes/21.4.2/oig/manage-oig-domains/domain-lifecycle/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/21.4.2/oig/manage-oig-domains/domain-lifecycle/ + View existing OIG servers Starting/Scaling up OIG Managed servers Stopping/Scaling down OIG Managed servers Stopping and starting the Administration Server and Managed Servers As OIG domains use the WebLogic Kubernetes Operator, domain lifecyle operations are managed using the WebLogic Kubernetes Operator itself. +This document shows the basic operations for starting, stopping and scaling servers in the OIG domain. +For more detailed information refer to Domain Life Cycle in the WebLogic Kubernetes Operator documentation. + + + + Quick start deployment on-premise + /fmw-kubernetes/21.4.2/soa-domains/appendix/quickstart-deployment-on-prem/ + Thu, 18 Jun 2020 15:27:38 -0500 + + /fmw-kubernetes/21.4.2/soa-domains/appendix/quickstart-deployment-on-prem/ + Use this Quick Start to create an Oracle SOA Suite domain deployment in a Kubernetes cluster (on-premise environments) with the WebLogic Kubernetes Operator. Note that this walkthrough is for demonstration purposes only, not for use in production. These instructions assume that you are already familiar with Kubernetes. If you need more detailed instructions, refer to the Install Guide. +Hardware requirements The Linux kernel supported for deploying and running Oracle SOA Suite domains with the operator is Oracle Linux 7 (UL6+) and Red Hat Enterprise Linux 7 (UL3+ only with standalone Kubernetes). + + + + Deploy using Maven and Ant + /fmw-kubernetes/21.4.2/soa-domains/adminguide/deploying-composites/deploy-using-maven-ant/ + Fri, 22 Feb 2019 15:44:42 -0500 + + /fmw-kubernetes/21.4.2/soa-domains/adminguide/deploying-composites/deploy-using-maven-ant/ + Learn how to deploy Oracle SOA Suite and Oracle Service Bus composite applications using the Maven and Ant based approach in an Oracle SOA Suite in WebLogic Kubernetes Operator environment. +Before deploying composite applications, we need to create a Kubernetes pod in the same cluster where the Oracle SOA Suite domain is running, so that composite applications can be deployed using the internal Kubernetes Service for the Administration Server URL. + + + + Enable additional URL access + /fmw-kubernetes/21.4.2/soa-domains/adminguide/enable-additional-url-access/ + Fri, 22 Feb 2019 15:44:42 -0500 + + /fmw-kubernetes/21.4.2/soa-domains/adminguide/enable-additional-url-access/ + This section provides information about how to extend an existing ingress (Non-SSL and SSL termination) to enable additional application URL access for Oracle SOA Suite domains. +The ingress per domain created in the steps in Set up a load balancer exposes the application paths defined in template YAML files present at ${WORKDIR}/charts/ingress-per-domain/templates/. +To extend an existing ingress with additional application URL access: + Update the template YAML file at ${WORKDIR}/charts/ingress-per-domain/templates/ to define additional path rules. + + + + NGINX + /fmw-kubernetes/21.4.2/soa-domains/adminguide/configure-load-balancer/nginx/ + Fri, 22 Feb 2019 15:44:42 -0500 + + /fmw-kubernetes/21.4.2/soa-domains/adminguide/configure-load-balancer/nginx/ + This section provides information about how to install and configure the ingress-based NGINX load balancer to load balance Oracle SOA Suite domain clusters. You can configure NGINX for non-SSL, SSL termination, and end-to-end SSL access of the application URL. +Follow these steps to set up NGINX as a load balancer for an Oracle SOA Suite domain in a Kubernetes cluster: +See the official installation document for prerequisites. + Install the NGINX load balancer for non-SSL and SSL termination configuration Generate secret for SSL access Install NGINX load balancer for end-to-end SSL configuration Configure NGINX to manage ingresses Verify domain application URL access Uninstall NGINX ingress Uninstall NGINX To get repository information, enter the following Helm commands: + + + + Upgrade an operator release + /fmw-kubernetes/21.4.2/soa-domains/patch_and_upgrade/upgrade-operator-release/ + Fri, 22 Feb 2019 15:44:42 -0500 + + /fmw-kubernetes/21.4.2/soa-domains/patch_and_upgrade/upgrade-operator-release/ + To upgrade the WebLogic Kubernetes operator, use the helm upgrade command with new Helm chart and operator image. See the steps here to pull the operator image and set up the Oracle SOA Suite repository that contains the operator chart. To upgrade the operator run the following command: +$ cd ${WORKDIR} $ helm upgrade \ --reuse-values \ --set image=oracle/weblogic-kubernetes-operator:3.3.0 \ --namespace weblogic-operator-namespace \ --wait \ weblogic-kubernetes-operator \ charts/weblogic-operator Note: When the WebLogic Kubernetes Operator is upgraded from release version 3. + + + + b. Using Design Console with NGINX(SSL) + /fmw-kubernetes/21.4.2/oig/configure-design-console/using-the-design-console-with-nginx-ssl/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/21.4.2/oig/configure-design-console/using-the-design-console-with-nginx-ssl/ + Configure an NGINX ingress (SSL) to allow Design Console to connect to your Kubernetes cluster. + Prerequisites + Setup routing rules for the Design Console ingress + Create the ingress + Update the T3 channel + Restart the OIG domain + Design Console client +a. Using an on-premises installed Design Console +b. Using a container image for Design Console + Login to the Design Console + + + + WLST administration operations + /fmw-kubernetes/21.4.2/oig/manage-oig-domains/wlst-admin-operations/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/21.4.2/oig/manage-oig-domains/wlst-admin-operations/ + Invoke WLST and access Administration Server To use WLST to administer the OIG domain, use a helper pod in the same Kubernetes cluster as the OIG Domain. + Run the following command to create a helper pod if one doesn&rsquo;t already exist: +$ kubectl run helper --image &lt;image_name&gt; -n &lt;domain_namespace&gt; -- sleep infinity For example: +$ kubectl run helper --image 12.2.1.4.0-8-ol7-211022.0723 -n oigns -- sleep infinity The output will look similar to the following: + + + + Deploy using composites in a persistent volume or image + /fmw-kubernetes/21.4.2/soa-domains/adminguide/deploying-composites/deploy-artifacts/ + Tue, 19 Oct 2021 12:04:42 -0500 + + /fmw-kubernetes/21.4.2/soa-domains/adminguide/deploying-composites/deploy-artifacts/ + Learn how to deploy Oracle SOA Suite and Oracle Service Bus composite applications artifacts in a Kubernetes persistent volume or in an image to an Oracle SOA Suite environment deployed using a WebLogic Kubernetes Operator. +The deployment methods described in Deploy using JDeveloper and Deploy using Maven and Ant are manual processes. If you have the deployment artifacts (archives) already built, then you can package them either into a Kubernetes persistent volume or in an image and use this automated process to deploy the artifacts to an Oracle SOA Suite domain. + + + + Apache web tier + /fmw-kubernetes/21.4.2/soa-domains/adminguide/configure-load-balancer/apache/ + Fri, 22 Feb 2019 15:44:42 -0500 + + /fmw-kubernetes/21.4.2/soa-domains/adminguide/configure-load-balancer/apache/ + This section provides information about how to install and configure the Apache web tier to load balance Oracle SOA Suite domain clusters. You can configure Apache web tier for non-SSL and SSL termination access of the application URL. +Follow these steps to set up the Apache web tier as a load balancer for an Oracle SOA Suite domain in a Kubernetes cluster: + Build the Apache web tier image Create the Apache plugin configuration file Prepare the certificate and private key Install the Apache web tier Helm chart Verify domain application URL access Uninstall Apache web tier Build the Apache web tier image Refer to the sample, to build the Apache web tier Docker image. + + + + Configure SSL certificates + /fmw-kubernetes/21.4.2/soa-domains/adminguide/configuring-custom-ssl-certificates/ + Fri, 22 Feb 2019 15:44:42 -0500 + + /fmw-kubernetes/21.4.2/soa-domains/adminguide/configuring-custom-ssl-certificates/ + Secure Socket Layer (SSL) provides a secured communication for data sent over unsecured networks. In an SSL termination scenario, you can configure SSL between the client browser and the load balancer in your Oracle SOA Suite instance to ensure that applications are accessed securely. In an SSL end-to-end scenario, an Oracle SOA Suite domain is configured to use a self-signed SSL certificate that was generated during domain creation. Clients will typically receive a message indicating that the signing CA for the certificate is unknown and not trusted. + + + + Security hardening + /fmw-kubernetes/21.4.2/soa-domains/appendix/docker-k8s-hardening/ + Fri, 22 Feb 2019 15:44:42 -0500 + + /fmw-kubernetes/21.4.2/soa-domains/appendix/docker-k8s-hardening/ + Securing a Kubernetes cluster involves hardening on multiple fronts - securing the API servers, etcd, nodes, container images, container run-time, and the cluster network. Apply principles of defense in depth, principle of least privilege, and minimize the attack surface. Use security tools such as Kube-Bench to verify the cluster&rsquo;s security posture. Since Kubernetes is evolving rapidly refer to Kubernetes Security Overview for the latest information on securing a Kubernetes cluster. Also ensure the deployed Docker containers follow the Docker Security guidance. + + + + Upgrade a Kubernetes cluster + /fmw-kubernetes/21.4.2/soa-domains/patch_and_upgrade/upgrade-k8s-cluster/ + Fri, 22 Feb 2019 15:44:42 -0500 + + /fmw-kubernetes/21.4.2/soa-domains/patch_and_upgrade/upgrade-k8s-cluster/ + These instructions describe how to upgrade a Kubernetes cluster created using kubeadm on which an Oracle SOA Suite domain is deployed. A rolling upgrade approach is used to upgrade nodes (master and worker) of the Kubernetes cluster. +It is expected that there will be a down time during the upgrade of the Kubernetes cluster as the nodes need to be drained as part of the upgrade process. + Prerequisites Review Prerequisites and ensure that your Kubernetes cluster is ready for upgrade. + + + + Runnning OIG utilities + /fmw-kubernetes/21.4.2/oig/manage-oig-domains/running-oig-utilities/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/21.4.2/oig/manage-oig-domains/running-oig-utilities/ + Run OIG utlities inside the OIG Kubernetes cluster. +Run utilities in an interactive bash shell Access a bash shell inside the governancedomain-oim-server1 pod: +$ kubectl -n oigns exec -it governancedomain-oim-server1 -- bash This will take you into a bash shell in the running governancedomain-oim-server1 pod: +[oracle@governancedomain-oim-server1 oracle]$ Navigate to the /u01/oracle/idm/server/bin directory and execute the utility as required. For example: +[oracle@governancedomain-oim-server1 oracle] cd /u01/oracle/idm/server/bin [oracle@governancedomain-oim-server1 bin]$ . + + + + Monitor a domain and publish logs + /fmw-kubernetes/21.4.2/soa-domains/adminguide/monitoring-soa-domains/ + Fri, 22 Feb 2019 15:44:42 -0500 + + /fmw-kubernetes/21.4.2/soa-domains/adminguide/monitoring-soa-domains/ + After the Oracle SOA Suite domain is set up, you can: + Monitor the Oracle SOA Suite instance using Prometheus and Grafana Publish WebLogic Server logs into Elasticsearch Publish SOA server diagnostics logs into Elasticsearch Monitor the Oracle SOA Suite instance using Prometheus and Grafana Using the WebLogic Monitoring Exporter you can scrape runtime information from a running Oracle SOA Suite instance and monitor them using Prometheus and Grafana. + + + + Logging and visualization + /fmw-kubernetes/21.4.2/oig/manage-oig-domains/logging-and-visualization/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/21.4.2/oig/manage-oig-domains/logging-and-visualization/ + After the OIG domain is set up you can publish operator and WebLogic Server logs into Elasticsearch and interact with them in Kibana. +In Prepare your environment if you decided to use the Elasticsearch and Kibana by setting the parameter elkIntegrationEnabled to true, then the steps below must be followed to complete the setup. +If you did not set elkIntegrationEnabled to true and want to do so post configuration, run the following command from the $WORKDIR directory: + + + + Expose the T3/T3S protocol + /fmw-kubernetes/21.4.2/soa-domains/adminguide/enablingt3/ + Fri, 22 Feb 2019 15:44:42 -0500 + + /fmw-kubernetes/21.4.2/soa-domains/adminguide/enablingt3/ + Oracle strongly recommends that you do not expose non-HTTPS traffic (T3/T3s/LDAP/IIOP/IIOPs) outside of the external firewall. You can control this access using a combination of network channels and firewalls. + You can create T3/T3S channels and the corresponding Kubernetes service to expose the T3/T3S protocol for the Administration Server and Managed Servers in an Oracle SOA Suite domain. +The WebLogic Kubernetes Operator provides an option to expose a T3 channel for the Administration Server using the exposeAdminT3Channel setting during domain creation, then the matching T3 service can be used to connect. + + + + Monitoring an OIG domain + /fmw-kubernetes/21.4.2/oig/manage-oig-domains/monitoring-oim-domains/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/21.4.2/oig/manage-oig-domains/monitoring-oim-domains/ + After the OIG domain is set up you can monitor the OIG instance using Prometheus and Grafana. See Monitoring a domain. +The WebLogic Monitoring Exporter uses the WLS RESTful Management API to scrape runtime information and then exports Prometheus-compatible metrics. It is deployed as a web application in a WebLogic Server (WLS) instance, version 12.2.1 or later, typically, in the instance from which you want to get metrics. +There are two ways to setup monitoring and you should choose one method or the other: + + + + Uninstall + /fmw-kubernetes/21.4.2/soa-domains/cleanup-domain-setup/ + Fri, 22 Feb 2019 15:44:42 -0500 + + /fmw-kubernetes/21.4.2/soa-domains/cleanup-domain-setup/ + Learn how to clean up the Oracle SOA Suite domain setup. +Remove the domain Remove the domain&rsquo;s ingress (for example, Traefik ingress) using Helm: +$ helm uninstall soa-domain-ingress -n sample-domain1-ns For example: +$ helm uninstall soainfra-traefik -n soans Remove the domain resources by using the sample delete-weblogic-domain-resources.sh script present at ${WORKDIR}/delete-domain: +$ cd ${WORKDIR}/delete-domain $ ./delete-weblogic-domain-resources.sh -d sample-domain1 For example: +$ cd ${WORKDIR}/delete-domain $ ./delete-weblogic-domain-resources.sh -d soainfra Use kubectl to confirm that the server pods and domain resource are deleted: + + + + Delete the OIG domain home + /fmw-kubernetes/21.4.2/oig/manage-oig-domains/delete-domain-home/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/21.4.2/oig/manage-oig-domains/delete-domain-home/ + Sometimes in production, but most likely in testing environments, you might want to remove the domain home that is generated using the create-domain.sh script. + Run the following command to delete the domain: +$ cd $WORKDIR/kubernetes/delete-domain $ ./delete-weblogic-domain-resources.sh -d &lt;domain_uid&gt; For example: +$ cd $WORKDIR/kubernetes/delete-domain $ ./delete-weblogic-domain-resources.sh -d governancedomain Drop the RCU schemas as follows: +$ kubectl exec -it helper -n &lt;domain_namespace&gt; -- /bin/bash [oracle@helper ~]$ [oracle@helper ~]$ export CONNECTION_STRING=&lt;db_host. + + + + Frequently Asked Questions + /fmw-kubernetes/21.4.2/soa-domains/faq/ + Fri, 22 Feb 2019 15:44:42 -0500 + + /fmw-kubernetes/21.4.2/soa-domains/faq/ + Overriding tuning parameters is not supported using configuration overrides The WebLogic Kubernetes Operator enables you to override some of the domain configuration using configuration overrides (also called situational configuration). See supported overrides. Overriding the tuning parameters such as MaxMessageSize and PAYLOAD, for Oracle SOA Suite domains is not supported using the configuration overrides feature. However, you can override them using the following steps: + Specify the new value using the environment variable K8S_REFCONF_OVERRIDES in serverPod. + + + + Persist adapter customizations + /fmw-kubernetes/21.4.2/soa-domains/adminguide/persisting-soa-adapters-customizations/ + Fri, 22 Feb 2019 15:44:42 -0500 + + /fmw-kubernetes/21.4.2/soa-domains/adminguide/persisting-soa-adapters-customizations/ + The lifetime for any customization done in a file on a server pod is up to the lifetime of that pod. The changes are not persisted once the pod goes down or is restarted. +For example, the following configuration updates DbAdapter.rar to create a new connection instance and creates data source CoffeeShop on the Administration Console for the same with jdbc/CoffeeShopDS. +File location: /u01/oracle/soa/soa/connectors/DbAdapter.rar +&lt;connection-instance&gt; &lt;jndi-name&gt;eis/DB/CoffeeShop&lt;/jndi-name&gt; &lt;connection-properties&gt; &lt;properties&gt; &lt;property&gt; &lt;name&gt;XADataSourceName&lt;/name&gt; &lt;value&gt;jdbc/CoffeeShopDS&lt;/value&gt; &lt;/property&gt; &lt;property&gt; &lt;name&gt;DataSourceName&lt;/name&gt; &lt;value&gt;&lt;/value&gt; &lt;/property&gt; &lt;property&gt; &lt;name&gt;PlatformClassName&lt;/name&gt; &lt;value&gt;org. + + + + Perform WLST operations + /fmw-kubernetes/21.4.2/soa-domains/adminguide/performing-wlst-operations/ + Fri, 22 Feb 2019 15:44:42 -0500 + + /fmw-kubernetes/21.4.2/soa-domains/adminguide/performing-wlst-operations/ + You can use the WebLogic Scripting Tool (WLST) to manage a domain running in a Kubernetes cluster. Some of the many ways to do this are provided here. +If the Administration Server was configured to expose a T3 channel using exposeAdminT3Channel when creating the domain, refer to Use WLST. +If you do not want to expose additional ports and perform WLST administration operations using the existing Kubernetes services created by the WebLogic Server Kubernetes operator, then follow this documentation. + + + + a) Logging and Visualization for Helm Chart oud-ds-rs Deployment + /fmw-kubernetes/21.4.2/oud/manage-oud-containers/logging-and-visualization/ + Fri, 22 Feb 2019 15:44:42 -0500 + + /fmw-kubernetes/21.4.2/oud/manage-oud-containers/logging-and-visualization/ + Introduction Installation Enable Elasticsearch, Logstash, and Kibana Create Data Mount Points Configure Logstash Install or Upgrade Oracle Unified Directory Container with ELK Configuration Configure ElasticSearch Verify Using the Kibana Application Introduction This section describes how to install and configure logging and visualization for the oud-ds-rs Helm Chart deployment. +The ELK stack consists of Elasticsearch, Logstash, and Kibana. Using ELK we can gain insights in real-time from the log data from your applications. + + + + a) Logging and Visualization for Helm Chart oudsm Deployment + /fmw-kubernetes/21.4.2/oudsm/manage-oudsm-containers/logging-and-visualization/ + Fri, 22 Feb 2019 15:44:42 -0500 + + /fmw-kubernetes/21.4.2/oudsm/manage-oudsm-containers/logging-and-visualization/ + Introduction Installation Enable Elasticsearch, Logstash, and Kibana Create Data Mount Points Configure Logstash Install or Upgrade Oracle Unified Directory Services Manager Container with ELK Configuration Configure ElasticSearch Verify Using the Kibana Application Introduction This section describes how to install and configure logging and visualization for the oudsm Helm Chart deployment. +The ELK stack consists of Elasticsearch, Logstash, and Kibana. Using ELK we can gain insights in real-time from the log data from your applications. + + + + b) Monitoring an Oracle Unified Directory Instance + /fmw-kubernetes/21.4.2/oud/manage-oud-containers/monitoring-oud-instance/ + Fri, 22 Feb 2019 15:44:42 -0500 + + /fmw-kubernetes/21.4.2/oud/manage-oud-containers/monitoring-oud-instance/ + Introduction Install Prometheus and Grafana Create a Kubernetes Namespace Add Prometheus and Grafana Helm Repositories Install the Prometheus Operator View Prometheus and Grafana Objects Created Add the NodePort Verify Using Grafana GUI Introduction After the Oracle Unified Directory instance is set up you can monitor it using Prometheus and Grafana. +Install Prometheus and Grafana Create a Kubernetes Namespace Create a Kubernetes namespace to provide a scope for Prometheus and Grafana objects such as pods and services that you create in the environment. + + + + b) Monitoring an Oracle Unified Directory Services Manager Instance + /fmw-kubernetes/21.4.2/oudsm/manage-oudsm-containers/monitoring-oudsm-instance/ + Fri, 22 Feb 2019 15:44:42 -0500 + + /fmw-kubernetes/21.4.2/oudsm/manage-oudsm-containers/monitoring-oudsm-instance/ + Introduction Install Prometheus and Grafana Create a Kubernetes Namespace Add Prometheus and Grafana Helm Repositories Install the Prometheus Operator View Prometheus and Grafana Objects Created Add the NodePort Verify Using Grafana GUI Introduction After the Oracle Unified Directory Services Manager instance is set up you can monitor it using Prometheus and Grafana. +Install Prometheus and Grafana Create a Kubernetes Namespace Create a Kubernetes namespace to provide a scope for Prometheus and Grafana objects such as pods and services that you create in the environment. + + + + a. Domain Life Cycle + /fmw-kubernetes/21.4.2/oam/manage-oam-domains/domain-lifecycle/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/21.4.2/oam/manage-oam-domains/domain-lifecycle/ + As OAM domains use the WebLogic Kubernetes Operator, domain lifecyle operations are managed using the WebLogic Kubernetes Operator itself. +This document shows the basic operations for starting, stopping and scaling servers in the OAM domain. +For more detailed information refer to Domain Life Cycle in the WebLogic Kubernetes Operator documentation. +Do not use the WebLogic Server Administration Console or Oracle Enterprise Manager Console to start or stop servers. + View existing OAM servers The default OAM deployment starts the Administration Server (AdminServer), two OAM Managed Servers (oam_server1 and oam_server2) and two OAM Policy Manager server (oam_policy_mgr1 and oam_policy_mgr2 ). + + + + a. Patch an image + /fmw-kubernetes/21.4.2/oam/patch-and-upgrade/patch_an_image/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/21.4.2/oam/patch-and-upgrade/patch_an_image/ + To update your OAM Kubernetes cluster with a new OAM Docker image, first install the new Docker image on all nodes in your Kubernetes cluster. +Once the new image is installed, choose one of the following options to update your OAM kubernetes cluster to use the new image: + Run the kubectl edit domain command Run the kubectl patch domain command In all of the above cases, the WebLogic Kubernetes Operator will restart the Administration Server pod first and then perform a rolling restart on the OAM Managed Servers. + + + + a. Patch an image + /fmw-kubernetes/21.4.2/oig/patch-and-upgrade/patch_an_image/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/21.4.2/oig/patch-and-upgrade/patch_an_image/ + To update your OIG Kubernetes cluster with a new OIG Docker image, first install the new Docker image on all nodes in your Kubernetes cluster. +Once the new image is installed choose one of the following options to update your OIG Kubernetes cluster to use the new image: + Run the kubectl edit domain command Run the kubectl patch domain command In all of the above cases, the WebLogic Kubernetes Operator will restart the Administration Server pod first and then perform a rolling restart on the OIG Managed Servers. + + + + a. Post Install Tasks + /fmw-kubernetes/21.4.2/oig/post-install-config/set_oimfronendurl_using_mbeans/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/21.4.2/oig/post-install-config/set_oimfronendurl_using_mbeans/ + Follow these post install configuration steps. + Create a Server Overrides File Set OIMFrontendURL using MBeans Create a Server Overrides File Navigate to the following directory: +cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv/output/weblogic-domains/governancedomain Create a setUserOverrides.sh with the following contents: +DERBY_FLAG=false JAVA_OPTIONS=&quot;${JAVA_OPTIONS} -Djava.net.preferIPv4Stack=true&quot; MEM_ARGS=&quot;-Xms8192m -Xmx8192m&quot; Copy the setUserOverrides.sh file to the Administration Server pod: +$ chmod 755 setUserOverrides.sh $ kubectl cp setUserOverrides.sh oigns/governancedomain-adminserver:/u01/oracle/user_projects/domains/governancedomain/bin/setUserOverrides.sh Where oigns is the OIG namespace and governancedomain is the domain_UID. + + + + a. Using an Ingress with NGINX (non-SSL) + /fmw-kubernetes/21.4.2/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-k8s/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/21.4.2/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-k8s/ + Setting up an ingress for NGINX for the OIG domain on Kubernetes (non-SSL) The instructions below explain how to set up NGINX as an ingress for the OIG domain with non-SSL termination. +Note: All the steps below should be performed on the master node. + Install NGINX +a. Configure the repository +b. Create a namespace +c. Install NGINX using helm +d. Setup routing rules for the domain + Create an ingress for the domain + + + + b. Install and configure connectors + /fmw-kubernetes/21.4.2/oig/post-install-config/install_and_configure_connectors/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/21.4.2/oig/post-install-config/install_and_configure_connectors/ + Download the connector Download the Connector you are interested in from Oracle Identity Manager Connector Downloads. + Copy the connector zip file to a staging directory on the master node e.g. &lt;workdir&gt;/stage and unzip it: +$ cp $HOME/Downloads/&lt;connector&gt;.zip &lt;workdir&gt;/&lt;stage&gt;/ $ cd &lt;workdir&gt;/&lt;stage&gt; $ unzip &lt;connector&gt;.zip For example: +$ cp $HOME/Downloads/Exchange-12.2.1.3.0.zip /scratch/OIGK8S/stage/ $ cd /scratch/OIGK8S/stage/ $ unzip exchange-12.2.1.3.0.zip Create a directory in the persistent volume On the master node run the following command to create a ConnectorDefaultDirectory: + + + + b. Upgrade an operator release + /fmw-kubernetes/21.4.2/oam/patch-and-upgrade/upgrade_an_operator_release/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/21.4.2/oam/patch-and-upgrade/upgrade_an_operator_release/ + These instructions apply to upgrading the operator within the 3.x release family as additional versions are released. +The new WebLogic Kubernetes Operator Docker image must be installed on the master node and each of the worker nodes in your Kubernetes cluster. Alternatively you can place the image in a Docker registry that your cluster can access. + Pull the WebLogic Kubernetes Operator 3.X.X image by running the following command on the master node: + + + + b. Upgrade an operator release + /fmw-kubernetes/21.4.2/oig/patch-and-upgrade/upgrade_an_operator_release/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/21.4.2/oig/patch-and-upgrade/upgrade_an_operator_release/ + These instructions apply to upgrading operators within the 3.x release family as additional versions are released. +The new WebLogic Kubernetes Operator Docker image must be installed on the master node AND each of the worker nodes in your Kubernetes cluster. Alternatively you can place the image in a Docker registry that your cluster can access. + Pull the WebLogic Kubernetes Operator 3.X.X image by running the following command on the master node: + + + + b. Using an Ingress with NGINX (SSL) + /fmw-kubernetes/21.4.2/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-k8s-ssl/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/21.4.2/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-k8s-ssl/ + Setting up an ingress for NGINX for the OIG domain on Kubernetes The instructions below explain how to set up NGINX as an ingress for the OIG domain with SSL termination. +Note: All the steps below should be performed on the master node. + Create a SSL certificate +a. Generate SSL certificate +b. Create a Kubernetes secret for SSL + Install NGINX +a. Configure the repository +b. Create a namespace + + + + b. WLST Administration Operations + /fmw-kubernetes/21.4.2/oam/manage-oam-domains/wlst-admin-operations/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/21.4.2/oam/manage-oam-domains/wlst-admin-operations/ + To use WLST to administer the OAM domain, use the helper pod in the same Kubernetes cluster as the OAM Domain. + Run the following command to start a bash shell in the helper pod: +$ kubectl exec -it helper -n &lt;domain_namespace&gt; -- /bin/bash For example: +$ kubectl exec -it helper -n oamns -- /bin/bash This will take you into a bash shell in the running helper pod: +[oracle@helper ~]$ Connect to WLST using the following command: + + + + c. Logging and Visualization + /fmw-kubernetes/21.4.2/oam/manage-oam-domains/logging-and-visualization/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/21.4.2/oam/manage-oam-domains/logging-and-visualization/ + After the OAM domain is set up you can publish operator and WebLogic Server logs into Elasticsearch and interact with them in Kibana. +In Prepare your environment if you decided to use the Elasticsearch and Kibana by setting the parameter elkIntegrationEnabled to true, then the steps below must be followed to complete the setup. +If you did not set elkIntegrationEnabled to true and want to do so post configuration, run the following command from the $WORKDIR directory: + + + + d. Monitoring an OAM domain + /fmw-kubernetes/21.4.2/oam/manage-oam-domains/monitoring-oam-domains/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/21.4.2/oam/manage-oam-domains/monitoring-oam-domains/ + After the OAM domain is set up you can monitor the OAM instance using Prometheus and Grafana. See Monitoring a domain. +The WebLogic Monitoring Exporter uses the WLS RESTful Management API to scrape runtime information and then exports Prometheus-compatible metrics. It is deployed as a web application in a WebLogic Server (WLS) instance, version 12.2.1 or later, typically, in the instance from which you want to get metrics. +There are two ways to setup monitoring and you should choose one method or the other: + + + + e. Delete the OAM domain home + /fmw-kubernetes/21.4.2/oam/manage-oam-domains/delete-domain-home/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/21.4.2/oam/manage-oam-domains/delete-domain-home/ + Sometimes in production, but most likely in testing environments, you might want to remove the domain home that is generated using the create-domain.sh script. + Run the following command to delete the domain: +$ cd $WORKDIR/kubernetes/delete-domain $ ./delete-weblogic-domain-resources.sh -d &lt;domain_uid&gt; For example: +$ cd $WORKDIR/kubernetes/delete-domain $ ./delete-weblogic-domain-resources.sh -d accessdomain Drop the RCU schemas as follows: +$ kubectl exec -it helper -n &lt;domain_namespace&gt; -- /bin/bash [oracle@helper ~]$ [oracle@helper ~]$ export CONNECTION_STRING=&lt;db_host. + + + + \ No newline at end of file diff --git a/docs/21.4.2/js/auto-complete.js b/docs/21.4.2/js/auto-complete.js new file mode 100644 index 000000000..7fbde995e --- /dev/null +++ b/docs/21.4.2/js/auto-complete.js @@ -0,0 +1,223 @@ +/* + JavaScript autoComplete v1.0.4 + Copyright (c) 2014 Simon Steinberger / Pixabay + GitHub: https://github.com/Pixabay/JavaScript-autoComplete + License: http://www.opensource.org/licenses/mit-license.php +*/ + +var autoComplete = (function(){ + // "use strict"; + function autoComplete(options){ + if (!document.querySelector) return; + + // helpers + function hasClass(el, className){ return el.classList ? el.classList.contains(className) : new RegExp('\\b'+ className+'\\b').test(el.className); } + + function addEvent(el, type, handler){ + if (el.attachEvent) el.attachEvent('on'+type, handler); else el.addEventListener(type, handler); + } + function removeEvent(el, type, handler){ + // if (el.removeEventListener) not working in IE11 + if (el.detachEvent) el.detachEvent('on'+type, handler); else el.removeEventListener(type, handler); + } + function live(elClass, event, cb, context){ + addEvent(context || document, event, function(e){ + var found, el = e.target || e.srcElement; + while (el && !(found = hasClass(el, elClass))) el = el.parentElement; + if (found) cb.call(el, e); + }); + } + + var o = { + selector: 0, + source: 0, + minChars: 3, + delay: 150, + offsetLeft: 0, + offsetTop: 1, + cache: 1, + menuClass: '', + renderItem: function (item, search){ + // escape special characters + search = search.replace(/[-\/\\^$*+?.()|[\]{}]/g, '\\$&'); + var re = new RegExp("(" + search.split(' ').join('|') + ")", "gi"); + return '
' + item.replace(re, "$1") + '
'; + }, + onSelect: function(e, term, item){} + }; + for (var k in options) { if (options.hasOwnProperty(k)) o[k] = options[k]; } + + // init + var elems = typeof o.selector == 'object' ? [o.selector] : document.querySelectorAll(o.selector); + for (var i=0; i 0) + that.sc.scrollTop = selTop + that.sc.suggestionHeight + scrTop - that.sc.maxHeight; + else if (selTop < 0) + that.sc.scrollTop = selTop + scrTop; + } + } + } + addEvent(window, 'resize', that.updateSC); + document.body.appendChild(that.sc); + + live('autocomplete-suggestion', 'mouseleave', function(e){ + var sel = that.sc.querySelector('.autocomplete-suggestion.selected'); + if (sel) setTimeout(function(){ sel.className = sel.className.replace('selected', ''); }, 20); + }, that.sc); + + live('autocomplete-suggestion', 'mouseover', function(e){ + var sel = that.sc.querySelector('.autocomplete-suggestion.selected'); + if (sel) sel.className = sel.className.replace('selected', ''); + this.className += ' selected'; + }, that.sc); + + live('autocomplete-suggestion', 'mousedown', function(e){ + if (hasClass(this, 'autocomplete-suggestion')) { // else outside click + var v = this.getAttribute('data-val'); + that.value = v; + o.onSelect(e, v, this); + that.sc.style.display = 'none'; + } + }, that.sc); + + that.blurHandler = function(){ + try { var over_sb = document.querySelector('.autocomplete-suggestions:hover'); } catch(e){ var over_sb = 0; } + if (!over_sb) { + that.last_val = that.value; + that.sc.style.display = 'none'; + setTimeout(function(){ that.sc.style.display = 'none'; }, 350); // hide suggestions on fast input + } else if (that !== document.activeElement) setTimeout(function(){ that.focus(); }, 20); + }; + addEvent(that, 'blur', that.blurHandler); + + var suggest = function(data){ + var val = that.value; + that.cache[val] = data; + if (data.length && val.length >= o.minChars) { + var s = ''; + for (var i=0;i 40) && key != 13 && key != 27) { + var val = that.value; + if (val.length >= o.minChars) { + if (val != that.last_val) { + that.last_val = val; + clearTimeout(that.timer); + if (o.cache) { + if (val in that.cache) { suggest(that.cache[val]); return; } + // no requests if previous suggestions were empty + for (var i=1; i https://github.com/noelboss/featherlight/issues/317 +!function(u){"use strict";if(void 0!==u)if(u.fn.jquery.match(/-ajax/))"console"in window&&window.console.info("Featherlight needs regular jQuery, not the slim version.");else{var r=[],i=function(t){return r=u.grep(r,function(e){return e!==t&&0','
','",'
'+n.loading+"
","
",""].join("")),o="."+n.namespace+"-close"+(n.otherClose?","+n.otherClose:"");return n.$instance=i.clone().addClass(n.variant),n.$instance.on(n.closeTrigger+"."+n.namespace,function(e){if(!e.isDefaultPrevented()){var t=u(e.target);("background"===n.closeOnClick&&t.is("."+n.namespace)||"anywhere"===n.closeOnClick||t.closest(o).length)&&(n.close(e),e.preventDefault())}}),this},getContent:function(){if(!1!==this.persist&&this.$content)return this.$content;var t=this,e=this.constructor.contentFilters,n=function(e){return t.$currentTarget&&t.$currentTarget.attr(e)},r=n(t.targetAttr),i=t.target||r||"",o=e[t.type];if(!o&&i in e&&(o=e[i],i=t.target&&r),i=i||n("href")||"",!o)for(var a in e)t[a]&&(o=e[a],i=t[a]);if(!o){var s=i;if(i=null,u.each(t.contentFilters,function(){return(o=e[this]).test&&(i=o.test(s)),!i&&o.regex&&s.match&&s.match(o.regex)&&(i=s),!i}),!i)return"console"in window&&window.console.error("Featherlight: no content filter found "+(s?' for "'+s+'"':" (no target specified)")),!1}return o.process.call(t,i)},setContent:function(e){return this.$instance.removeClass(this.namespace+"-loading"),this.$instance.toggleClass(this.namespace+"-iframe",e.is("iframe")),this.$instance.find("."+this.namespace+"-inner").not(e).slice(1).remove().end().replaceWith(u.contains(this.$instance[0],e[0])?"":e),this.$content=e.addClass(this.namespace+"-inner"),this},open:function(t){var n=this;if(n.$instance.hide().appendTo(n.root),!(t&&t.isDefaultPrevented()||!1===n.beforeOpen(t))){t&&t.preventDefault();var e=n.getContent();if(e)return r.push(n),s(!0),n.$instance.fadeIn(n.openSpeed),n.beforeContent(t),u.when(e).always(function(e){n.setContent(e),n.afterContent(t)}).then(n.$instance.promise()).done(function(){n.afterOpen(t)})}return n.$instance.detach(),u.Deferred().reject().promise()},close:function(e){var t=this,n=u.Deferred();return!1===t.beforeClose(e)?n.reject():(0===i(t).length&&s(!1),t.$instance.fadeOut(t.closeSpeed,function(){t.$instance.detach(),t.afterClose(e),n.resolve()})),n.promise()},resize:function(e,t){if(e&&t&&(this.$content.css("width","").css("height",""),this.$content.parent().width()');return n.onload=function(){r.naturalWidth=n.width,r.naturalHeight=n.height,t.resolve(r)},n.onerror=function(){t.reject(r)},n.src=e,t.promise()}},html:{regex:/^\s*<[\w!][^<]*>/,process:function(e){return u(e)}},ajax:{regex:/./,process:function(e){var n=u.Deferred(),r=u("
").load(e,function(e,t){"error"!==t&&n.resolve(r.contents()),n.fail()});return n.promise()}},iframe:{process:function(e){var t=new u.Deferred,n=u("