From 922edbb78f00a1f935920f4b2956796f991af053 Mon Sep 17 00:00:00 2001 From: Ashageetha Rao Date: Tue, 5 Dec 2023 14:02:35 +0530 Subject: [PATCH] Oracle SOA Suite script and documentation changes for release 23.4.2 (#187) --- .../templates/nginx-ingress-e2essl.yaml | 2 +- .../templates/_operator-cm.tpl | 2 +- .../templates/_operator-dep.tpl | 4 + OracleSOASuite/kubernetes/common/utility.sh | 4 +- .../create-domain-inputs.yaml | 4 +- .../kubernetes/domain-lifecycle/README.md | 9 +- docs-source/content/soa-domains/_index.md | 3 +- .../configure-load-balancer/apache.md | 2 +- .../configure-load-balancer/nginx.md | 2 +- .../configure-load-balancer/traefik.md | 2 +- .../adminguide/monitoring-soa-domains.md | 2 +- .../appendix/quickstart-deployment-on-prem.md | 12 +- .../create-or-update-image/_index.md | 99 +- .../soa-domains/edg-guide/setup-edg.md | 3 + .../installguide/create-soa-domains/_index.md | 158 +- .../prepare-your-environment/_index.md | 20 +- .../installguide/prerequisites/_index.md | 6 +- .../patch_and_upgrade/patch-an-image/index.md | 2 +- .../patch_and_upgrade/upgrade-k8s-cluster.md | 3 +- .../upgrade-operator-release.md | 4 +- .../content/soa-domains/release-notes.md | 1 + docs/23.4.2/404.html | 14 +- docs/23.4.2/categories/index.html | 2897 ++++++-- .../index.html | 2946 ++++++-- .../enterprise-deployment-guide/index.html | 2964 ++++++-- .../enterprise-deployments/index.html | 2992 ++++++-- docs/23.4.2/idm-products/index.html | 3178 +++++--- .../oam/configure-ingress/index.html | 3530 ++++++--- .../index.html | 3166 +++++--- .../create-oam-domains-using-wlst/index.html | 4064 +++++++---- .../oam/create-oam-domains/index.html | 3024 +++++--- .../oam/create-or-update-image/index.html | 3434 ++++++--- docs/23.4.2/idm-products/oam/index.html | 3300 ++++++--- .../idm-products/oam/introduction/index.html | 3146 +++++--- .../delete-domain-home/index.html | 3170 +++++--- .../domain-lifecycle/index.html | 3554 ++++++--- .../oam/manage-oam-domains/hpa/index.html | 3084 +++++--- .../oam/manage-oam-domains/index.html | 3234 ++++++--- .../logging-and-visualization/index.html | 3084 +++++--- .../monitoring-oam-domains/index.html | 4386 ++++++++---- .../wlst-admin-operations/index.html | 3554 ++++++--- .../oam/patch-and-upgrade/index.html | 3058 +++++--- .../patch-an-image/index.html | 3022 +++++--- .../upgrade-an-ingress/index.html | 3194 ++++++--- .../upgrade-an-operator-release/index.html | 3030 +++++--- .../patch-and-upgrade/upgrade-elk/index.html | 2984 ++++++-- .../oam/post-install-config/index.html | 3126 +++++--- .../oam/prepare-your-environment/index.html | 4316 +++++++---- .../idm-products/oam/prerequisites/index.html | 3098 +++++--- .../idm-products/oam/release-notes/index.html | 3044 +++++--- .../oam/troubleshooting/index.html | 3146 +++++--- .../oam/validate-domain-urls/index.html | 3010 ++++++-- .../oam/validate-sso-using-webgate/index.html | 3204 ++++++--- docs/23.4.2/idm-products/oid/index.html | 2932 ++++++-- .../idm-products/oid/release-notes/index.html | 2990 ++++++-- .../oig/configure-design-console/index.html | 3102 +++++--- .../index.html | 3128 +++++--- .../index.html | 3128 +++++--- .../oig/configure-ingress/index.html | 3020 +++++--- .../index.html | 3442 ++++++--- .../index.html | 3474 ++++++--- .../create-oig-domains-using-wlst/index.html | 4068 +++++++---- .../create-oig-domains-wdt/index.html | 3212 ++++++--- .../oig/create-oig-domains/index.html | 3024 +++++--- .../oig/create-or-update-image/index.html | 3142 +++++--- docs/23.4.2/idm-products/oig/index.html | 3296 ++++++--- .../idm-products/oig/introduction/index.html | 3052 +++++--- .../delete-domain-home/index.html | 3098 +++++--- .../domain-lifecycle/index.html | 2958 ++++++-- .../oig/manage-oig-domains/hpa/index.html | 2920 ++++++-- .../oig/manage-oig-domains/index.html | 3276 ++++++--- .../logging-and-visualization/index.html | 2920 ++++++-- .../monitoring-oim-domains/index.html | 4176 +++++++---- .../running-oig-utilities/index.html | 3102 +++++--- .../wlst-admin-operations/index.html | 3238 ++++++--- .../oig/patch-and-upgrade/index.html | 3168 +++++--- .../patch-an-image/index.html | 3138 +++++--- .../upgrade-an-ingress/index.html | 3162 +++++--- .../upgrade-an-operator-release/index.html | 3034 +++++--- .../patch-and-upgrade/upgrade-elk/index.html | 2992 ++++++-- .../oig/post-install-config/index.html | 3102 +++++--- .../index.html | 3042 +++++--- .../set_oimfronendurl_using_mbeans/index.html | 3162 +++++--- .../oig/prepare-your-environment/index.html | 4318 +++++++---- .../idm-products/oig/prerequisites/index.html | 3002 ++++++-- .../idm-products/oig/release-notes/index.html | 3126 +++++--- .../oig/troubleshooting/index.html | 3108 +++++--- .../oig/validate-domain-urls/index.html | 3006 ++++++-- .../oud/configure-ingress/index.html | 3588 +++++++--- .../oud/create-or-update-image/index.html | 3224 ++++++--- .../oud/create-oud-instances/index.html | 4614 +++++++----- docs/23.4.2/idm-products/oud/index.html | 3220 ++++++--- .../idm-products/oud/introduction/index.html | 3012 +++++--- .../oud/manage-oud-containers/hpa/index.html | 3168 +++++--- .../oud/manage-oud-containers/index.html | 3060 +++++--- .../logging-and-visualization/index.html | 3446 ++++++--- .../monitoring-oud-instance/index.html | 3222 ++++++--- .../scaling-up-down/index.html | 3190 ++++++--- .../oud/patch-and-upgrade/index.html | 3854 ++++++---- .../oud/prepare-your-environment/index.html | 3176 +++++--- .../idm-products/oud/prerequisites/index.html | 2968 ++++++-- .../idm-products/oud/release-notes/index.html | 3172 +++++--- .../oud/troubleshooting/index.html | 3172 +++++--- .../oudsm/configure-ingress/index.html | 3310 ++++++--- .../oudsm/create-or-update-image/index.html | 3052 +++++--- .../oudsm/create-oudsm-instances/index.html | 3630 +++++++--- docs/23.4.2/idm-products/oudsm/index.html | 3222 ++++++--- .../oudsm/introduction/index.html | 2996 ++++++-- .../oudsm/manage-oudsm-containers/index.html | 3054 +++++--- .../logging-and-visualization/index.html | 3168 +++++--- .../monitoring-oudsm-instance/index.html | 3222 ++++++--- .../scaling-up-down/index.html | 3122 +++++--- .../oudsm/patch-and-upgrade/index.html | 3002 ++++++-- .../patch-an-oudsm-image/index.html | 3198 ++++++--- .../patch-and-upgrade/upgrade-elk/index.html | 3042 +++++--- .../oudsm/prepare-your-environment/index.html | 3104 +++++--- .../oudsm/prerequisites/index.html | 2968 ++++++-- .../oudsm/release-notes/index.html | 3126 +++++--- .../oudsm/troubleshooting/index.html | 3166 +++++--- docs/23.4.2/index.html | 2914 ++++++-- docs/23.4.2/index.json | 270 +- docs/23.4.2/index.xml | 242 + docs/23.4.2/sitemap.xml | 179 + .../configure-load-balancer/apache/index.html | 5789 +++++++++++++++ .../configure-load-balancer/index.html | 5605 +++++++++++++++ .../configure-load-balancer/index.xml | 49 + .../configure-load-balancer/nginx/index.html | 6005 ++++++++++++++++ .../traefik/index.html | 6001 ++++++++++++++++ .../index.html | 5773 +++++++++++++++ .../deploy-artifacts/index.html | 5956 +++++++++++++++ .../deploy-using-maven-ant/index.html | 5751 +++++++++++++++ .../deploying-composites/index.html | 5608 +++++++++++++++ .../adminguide/deploying-composites/index.xml | 46 + .../supportjdev/index.html | 5686 +++++++++++++++ .../enable-additional-url-access/index.html | 5601 +++++++++++++++ .../adminguide/enablingt3/index.html | 5916 +++++++++++++++ docs/23.4.2/soa-domains/adminguide/index.html | 5705 +++++++++++++++ docs/23.4.2/soa-domains/adminguide/index.xml | 80 + .../monitoring-soa-domains/index.html | 5620 +++++++++++++++ .../performing-wlst-operations/index.html | 5720 +++++++++++++++ .../index.html | 5591 +++++++++++++++ .../appendix/docker-k8s-hardening/index.html | 5550 ++++++++++++++ docs/23.4.2/soa-domains/appendix/index.html | 5598 +++++++++++++++ docs/23.4.2/soa-domains/appendix/index.xml | 43 + .../quickstart-deployment-on-prem/index.html | 6220 ++++++++++++++++ .../soa-cluster-sizing-info/index.html | 5558 ++++++++++++++ .../cleanup-domain-setup/index.html | 5591 +++++++++++++++ .../create-or-update-image/index.html | 6260 ++++++++++++++++ .../create-or-update-image/index.xml | 15 + docs/23.4.2/soa-domains/edg-guide/index.html | 5581 +++++++++++++++ docs/23.4.2/soa-domains/edg-guide/index.xml | 34 + .../edg-guide/setup-edg/index.html | 6202 ++++++++++++++++ .../soa-domains/edg-guide/topology/index.html | 5602 +++++++++++++++ docs/23.4.2/soa-domains/faq/index.html | 5633 +++++++++++++++ docs/23.4.2/soa-domains/index.html | 5580 +++++++++++++++ docs/23.4.2/soa-domains/index.xml | 49 + .../create-soa-domains/index.html | 6374 +++++++++++++++++ .../installguide/create-soa-domains/index.xml | 15 + .../soa-domains/installguide/index.html | 5600 +++++++++++++++ .../23.4.2/soa-domains/installguide/index.xml | 15 + .../prepare-your-environment/index.html | 5953 +++++++++++++++ .../prepare-your-environment/index.xml | 15 + .../installguide/prerequisites/index.html | 5610 +++++++++++++++ .../installguide/prerequisites/index.xml | 15 + .../soa-domains/patch_and_upgrade/index.html | 5598 +++++++++++++++ .../soa-domains/patch_and_upgrade/index.xml | 46 + .../patch-an-image/index.html | 5648 +++++++++++++++ .../upgrade-k8s-cluster/index.html | 5545 ++++++++++++++ .../upgrade-operator-release/index.html | 5564 ++++++++++++++ .../soa-domains/release-notes/index.html | 5604 +++++++++++++++ .../soa-domains/troubleshooting/index.html | 5718 +++++++++++++++ .../soa-domains/troubleshooting/index.xml | 14 + docs/23.4.2/tags/index.html | 2897 ++++++-- docs/v23.4.2/content/soa-domains/_index.md | 86 + .../content/soa-domains/adminguide/_index.md | 11 + .../configure-load-balancer/_index.md | 12 + .../configure-load-balancer/apache.md | 260 + .../configure-load-balancer/nginx.md | 469 ++ .../configure-load-balancer/traefik.md | 476 ++ .../configuring-custom-ssl-certificates.md | 257 + .../adminguide/deploying-composites/_index.md | 11 + .../deploying-composites/deploy-artifacts.md | 340 + .../deploy-using-maven-ant.md | 262 + .../deploying-composites/supportJDEV.md | 148 + .../enable-additional-url-access.md | 121 + .../soa-domains/adminguide/enablingT3.md | 423 ++ .../adminguide/monitoring-soa-domains.md | 123 + .../adminguide/performing-wlst-operations.md | 286 + .../persisting-soa-adapters-customizations.md | 72 + .../content/soa-domains/appendix/_index.md | 10 + .../appendix/docker-k8s-hardening.md | 26 + .../appendix/quickstart-deployment-on-prem.md | 817 +++ .../appendix/soa-cluster-sizing-info.md | 16 + .../soa-domains/cleanup-domain-setup.md | 120 + .../create-or-update-image/_index.md | 750 ++ .../content/soa-domains/edg-guide/_index.md | 15 + .../soa-domains/edg-guide/setup-edg.md | 725 ++ .../content/soa-domains/edg-guide/topology.md | 83 + docs/v23.4.2/content/soa-domains/faq.md | 160 + .../soa-domains/installguide/_index.md | 11 + .../installguide/create-soa-domains/_index.md | 706 ++ .../prepare-your-environment/_index.md | 452 ++ .../installguide/prerequisites/_index.md | 106 + .../soa-domains/patch_and_upgrade/_index.md | 10 + .../patch_and_upgrade/patch-an-image/index.md | 163 + .../patch_and_upgrade/upgrade-k8s-cluster.md | 29 + .../upgrade-operator-release.md | 59 + .../content/soa-domains/release-notes.md | 38 + .../soa-domains/troubleshooting/_index.md | 227 + 209 files changed, 445999 insertions(+), 89558 deletions(-) create mode 100644 docs/23.4.2/soa-domains/adminguide/configure-load-balancer/apache/index.html create mode 100644 docs/23.4.2/soa-domains/adminguide/configure-load-balancer/index.html create mode 100644 docs/23.4.2/soa-domains/adminguide/configure-load-balancer/index.xml create mode 100644 docs/23.4.2/soa-domains/adminguide/configure-load-balancer/nginx/index.html create mode 100644 docs/23.4.2/soa-domains/adminguide/configure-load-balancer/traefik/index.html create mode 100644 docs/23.4.2/soa-domains/adminguide/configuring-custom-ssl-certificates/index.html create mode 100644 docs/23.4.2/soa-domains/adminguide/deploying-composites/deploy-artifacts/index.html create mode 100644 docs/23.4.2/soa-domains/adminguide/deploying-composites/deploy-using-maven-ant/index.html create mode 100644 docs/23.4.2/soa-domains/adminguide/deploying-composites/index.html create mode 100644 docs/23.4.2/soa-domains/adminguide/deploying-composites/index.xml create mode 100644 docs/23.4.2/soa-domains/adminguide/deploying-composites/supportjdev/index.html create mode 100644 docs/23.4.2/soa-domains/adminguide/enable-additional-url-access/index.html create mode 100644 docs/23.4.2/soa-domains/adminguide/enablingt3/index.html create mode 100644 docs/23.4.2/soa-domains/adminguide/index.html create mode 100644 docs/23.4.2/soa-domains/adminguide/index.xml create mode 100644 docs/23.4.2/soa-domains/adminguide/monitoring-soa-domains/index.html create mode 100644 docs/23.4.2/soa-domains/adminguide/performing-wlst-operations/index.html create mode 100644 docs/23.4.2/soa-domains/adminguide/persisting-soa-adapters-customizations/index.html create mode 100644 docs/23.4.2/soa-domains/appendix/docker-k8s-hardening/index.html create mode 100644 docs/23.4.2/soa-domains/appendix/index.html create mode 100644 docs/23.4.2/soa-domains/appendix/index.xml create mode 100644 docs/23.4.2/soa-domains/appendix/quickstart-deployment-on-prem/index.html create mode 100644 docs/23.4.2/soa-domains/appendix/soa-cluster-sizing-info/index.html create mode 100644 docs/23.4.2/soa-domains/cleanup-domain-setup/index.html create mode 100644 docs/23.4.2/soa-domains/create-or-update-image/index.html create mode 100644 docs/23.4.2/soa-domains/create-or-update-image/index.xml create mode 100644 docs/23.4.2/soa-domains/edg-guide/index.html create mode 100644 docs/23.4.2/soa-domains/edg-guide/index.xml create mode 100644 docs/23.4.2/soa-domains/edg-guide/setup-edg/index.html create mode 100644 docs/23.4.2/soa-domains/edg-guide/topology/index.html create mode 100644 docs/23.4.2/soa-domains/faq/index.html create mode 100644 docs/23.4.2/soa-domains/index.html create mode 100644 docs/23.4.2/soa-domains/index.xml create mode 100644 docs/23.4.2/soa-domains/installguide/create-soa-domains/index.html create mode 100644 docs/23.4.2/soa-domains/installguide/create-soa-domains/index.xml create mode 100644 docs/23.4.2/soa-domains/installguide/index.html create mode 100644 docs/23.4.2/soa-domains/installguide/index.xml create mode 100644 docs/23.4.2/soa-domains/installguide/prepare-your-environment/index.html create mode 100644 docs/23.4.2/soa-domains/installguide/prepare-your-environment/index.xml create mode 100644 docs/23.4.2/soa-domains/installguide/prerequisites/index.html create mode 100644 docs/23.4.2/soa-domains/installguide/prerequisites/index.xml create mode 100644 docs/23.4.2/soa-domains/patch_and_upgrade/index.html create mode 100644 docs/23.4.2/soa-domains/patch_and_upgrade/index.xml create mode 100644 docs/23.4.2/soa-domains/patch_and_upgrade/patch-an-image/index.html create mode 100644 docs/23.4.2/soa-domains/patch_and_upgrade/upgrade-k8s-cluster/index.html create mode 100644 docs/23.4.2/soa-domains/patch_and_upgrade/upgrade-operator-release/index.html create mode 100644 docs/23.4.2/soa-domains/release-notes/index.html create mode 100644 docs/23.4.2/soa-domains/troubleshooting/index.html create mode 100644 docs/23.4.2/soa-domains/troubleshooting/index.xml create mode 100644 docs/v23.4.2/content/soa-domains/_index.md create mode 100644 docs/v23.4.2/content/soa-domains/adminguide/_index.md create mode 100644 docs/v23.4.2/content/soa-domains/adminguide/configure-load-balancer/_index.md create mode 100644 docs/v23.4.2/content/soa-domains/adminguide/configure-load-balancer/apache.md create mode 100644 docs/v23.4.2/content/soa-domains/adminguide/configure-load-balancer/nginx.md create mode 100644 docs/v23.4.2/content/soa-domains/adminguide/configure-load-balancer/traefik.md create mode 100644 docs/v23.4.2/content/soa-domains/adminguide/configuring-custom-ssl-certificates.md create mode 100644 docs/v23.4.2/content/soa-domains/adminguide/deploying-composites/_index.md create mode 100644 docs/v23.4.2/content/soa-domains/adminguide/deploying-composites/deploy-artifacts.md create mode 100644 docs/v23.4.2/content/soa-domains/adminguide/deploying-composites/deploy-using-maven-ant.md create mode 100644 docs/v23.4.2/content/soa-domains/adminguide/deploying-composites/supportJDEV.md create mode 100644 docs/v23.4.2/content/soa-domains/adminguide/enable-additional-url-access.md create mode 100644 docs/v23.4.2/content/soa-domains/adminguide/enablingT3.md create mode 100644 docs/v23.4.2/content/soa-domains/adminguide/monitoring-soa-domains.md create mode 100644 docs/v23.4.2/content/soa-domains/adminguide/performing-wlst-operations.md create mode 100644 docs/v23.4.2/content/soa-domains/adminguide/persisting-soa-adapters-customizations.md create mode 100644 docs/v23.4.2/content/soa-domains/appendix/_index.md create mode 100644 docs/v23.4.2/content/soa-domains/appendix/docker-k8s-hardening.md create mode 100644 docs/v23.4.2/content/soa-domains/appendix/quickstart-deployment-on-prem.md create mode 100644 docs/v23.4.2/content/soa-domains/appendix/soa-cluster-sizing-info.md create mode 100644 docs/v23.4.2/content/soa-domains/cleanup-domain-setup.md create mode 100644 docs/v23.4.2/content/soa-domains/create-or-update-image/_index.md create mode 100644 docs/v23.4.2/content/soa-domains/edg-guide/_index.md create mode 100644 docs/v23.4.2/content/soa-domains/edg-guide/setup-edg.md create mode 100644 docs/v23.4.2/content/soa-domains/edg-guide/topology.md create mode 100644 docs/v23.4.2/content/soa-domains/faq.md create mode 100644 docs/v23.4.2/content/soa-domains/installguide/_index.md create mode 100644 docs/v23.4.2/content/soa-domains/installguide/create-soa-domains/_index.md create mode 100644 docs/v23.4.2/content/soa-domains/installguide/prepare-your-environment/_index.md create mode 100644 docs/v23.4.2/content/soa-domains/installguide/prerequisites/_index.md create mode 100644 docs/v23.4.2/content/soa-domains/patch_and_upgrade/_index.md create mode 100644 docs/v23.4.2/content/soa-domains/patch_and_upgrade/patch-an-image/index.md create mode 100644 docs/v23.4.2/content/soa-domains/patch_and_upgrade/upgrade-k8s-cluster.md create mode 100644 docs/v23.4.2/content/soa-domains/patch_and_upgrade/upgrade-operator-release.md create mode 100644 docs/v23.4.2/content/soa-domains/release-notes.md create mode 100644 docs/v23.4.2/content/soa-domains/troubleshooting/_index.md diff --git a/OracleSOASuite/kubernetes/charts/ingress-per-domain/templates/nginx-ingress-e2essl.yaml b/OracleSOASuite/kubernetes/charts/ingress-per-domain/templates/nginx-ingress-e2essl.yaml index a4889ee41..146e2696b 100755 --- a/OracleSOASuite/kubernetes/charts/ingress-per-domain/templates/nginx-ingress-e2essl.yaml +++ b/OracleSOASuite/kubernetes/charts/ingress-per-domain/templates/nginx-ingress-e2essl.yaml @@ -43,7 +43,7 @@ spec: pathType: ImplementationSpecific backend: service: - name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}-nginx-ssl' port: number: {{ .Values.wlsDomain.adminServerSSLPort }} {{- if or (eq .Values.domainType "soa") (eq .Values.domainType "soaosb") }} diff --git a/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-cm.tpl b/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-cm.tpl index 2994c0440..640a5ee03 100755 --- a/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-cm.tpl +++ b/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-cm.tpl @@ -1,4 +1,4 @@ -# Copyright (c) 2018, 2022, Oracle and/or its affiliates. +# Copyright (c) 2018, 2023, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. {{- define "operator.operatorConfigMap" }} diff --git a/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-dep.tpl b/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-dep.tpl index b56f661e7..4c23c79d3 100755 --- a/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-dep.tpl +++ b/OracleSOASuite/kubernetes/charts/weblogic-operator/templates/_operator-dep.tpl @@ -285,6 +285,10 @@ spec: affinity: {{- toYaml . | nindent 12 }} {{- end }} + {{- with .tolerations }} + tolerations: + {{- toYaml . | nindent 12 }} + {{- end }} containers: - name: "weblogic-operator-webhook" image: {{ .image | quote }} diff --git a/OracleSOASuite/kubernetes/common/utility.sh b/OracleSOASuite/kubernetes/common/utility.sh index 85dee85d4..1ec9fc1ba 100755 --- a/OracleSOASuite/kubernetes/common/utility.sh +++ b/OracleSOASuite/kubernetes/common/utility.sh @@ -969,11 +969,11 @@ getPodName() { detectPod() { ns=$1 startSecs=$SECONDS - maxWaitSecs=10 + maxWaitSecs=120 while [ -z "`${KUBERNETES_CLI:-kubectl} get pod -n ${ns} -o jsonpath={.items[0].metadata.name}`" ]; do if [ $((SECONDS - startSecs)) -lt $maxWaitSecs ]; then echo "Pod not found after $((SECONDS - startSecs)) seconds, retrying ..." - sleep 2 + sleep 5 else echo "[Error] Could not find Pod after $((SECONDS - startSecs)) seconds" exit 1 diff --git a/OracleSOASuite/kubernetes/create-soa-domain/domain-home-on-pv/create-domain-inputs.yaml b/OracleSOASuite/kubernetes/create-soa-domain/domain-home-on-pv/create-domain-inputs.yaml index 96195ac46..1757f32f4 100755 --- a/OracleSOASuite/kubernetes/create-soa-domain/domain-home-on-pv/create-domain-inputs.yaml +++ b/OracleSOASuite/kubernetes/create-soa-domain/domain-home-on-pv/create-domain-inputs.yaml @@ -1,4 +1,4 @@ -# Copyright (c) 2020, 2022, Oracle and/or its affiliates. +# Copyright (c) 2020, 2023, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. # The version of this inputs file. Do not modify. @@ -40,7 +40,7 @@ serverStartPolicy: IfNeeded configuredManagedServerCount: 5 # Number of managed servers to initially start for the domain -initialManagedServerReplicas: 2 +initialManagedServerReplicas: 1 # BEGIN: Configuration for SOA cluster # Below values applicable only for the domain types soa and soaosb diff --git a/OracleSOASuite/kubernetes/domain-lifecycle/README.md b/OracleSOASuite/kubernetes/domain-lifecycle/README.md index dd6c07e5f..0d32131f3 100755 --- a/OracleSOASuite/kubernetes/domain-lifecycle/README.md +++ b/OracleSOASuite/kubernetes/domain-lifecycle/README.md @@ -286,18 +286,19 @@ $ waitForDomain.sh -n my-namespace -d my-domain -p 0 Use this helper script for examining, changing permissions, or deleting the contents of the persistent volume (such as domain files or logs) for a WebLogic Domain on PV or Model in Image domain. The script launches a Kubernetes pod named 'pvhelper' using the provided persistent volume claim name and the mount path. You can run the 'kubectl exec' command to get a shell to the running pod container and run commands to examine or clean up the contents of shared directories on the persistent volume. -Use the 'kubectl delete pvhelper -n ' command to delete the Pod after it's no longer needed. +Use the 'kubectl delete pod pvhelper -n ' command to delete the Pod after it's no longer needed. Use the following command for script usage: ``` -$ domain-on-pv-helper.sh -h +$ pv-pvc-helper.sh -h ``` -The following is an example command to launch the helper pod with the PVC name `sample-domain1-weblogic-sample-pvc` and mount path `/shared`. +The following is an example command to launch the helper pod with the PVC name `sample-domain1-weblogic-sample-pvc` and mount path `/shared`. +Specifying the `-r` argument allows the script to run as the `root` user. ``` -$ domain-on-pv-helper.sh -n sample-domain1-ns -c sample-domain1-weblogic-sample-pvc -m /shared +$ pv-pvc-helper.sh -n sample-domain1-ns -c sample-domain1-weblogic-sample-pvc -m /shared -r ``` After the Pod is created, use the following command to get a shell to the running pod container. diff --git a/docs-source/content/soa-domains/_index.md b/docs-source/content/soa-domains/_index.md index 8a1cbe74f..36da573fa 100644 --- a/docs-source/content/soa-domains/_index.md +++ b/docs-source/content/soa-domains/_index.md @@ -25,7 +25,7 @@ The operator has several key features to assist you with deploying and managing #### Current production release -The current production release for the Oracle SOA Suite domains deployment on Kubernetes is [23.3.2](https://github.com/oracle/fmw-kubernetes/releases). This release uses the WebLogic Kubernetes Operator version [4.1.0](https://github.com/oracle/weblogic-kubernetes-operator/releases/tag/v4.1.0). +The current production release for the Oracle SOA Suite domains deployment on Kubernetes is [23.4.2](https://github.com/oracle/fmw-kubernetes/releases). This release uses the WebLogic Kubernetes Operator version [4.1.4](https://github.com/oracle/weblogic-kubernetes-operator/releases/tag/v4.1.4). #### Recent changes and known issues @@ -64,6 +64,7 @@ please consult this table of contents: To view documentation for an earlier release, see: +* [Version 23.3.2](https://oracle.github.io/fmw-kubernetes/23.3.2/soa-domains/) * [Version 23.2.2](https://oracle.github.io/fmw-kubernetes/23.2.2/soa-domains/) * [Version 23.1.2](https://oracle.github.io/fmw-kubernetes/23.1.2/soa-domains/) * [Version 22.4.2](https://oracle.github.io/fmw-kubernetes/22.4.2/soa-domains/) diff --git a/docs-source/content/soa-domains/adminguide/configure-load-balancer/apache.md b/docs-source/content/soa-domains/adminguide/configure-load-balancer/apache.md index 2986207d1..82e17c8ac 100644 --- a/docs-source/content/soa-domains/adminguide/configure-load-balancer/apache.md +++ b/docs-source/content/soa-domains/adminguide/configure-load-balancer/apache.md @@ -133,7 +133,7 @@ Refer to the [sample](https://github.com/oracle/docker-images/tree/main/OracleWe {{% /expand %}} -1. Create a PV and PVC (pv-claim-name) that can be used to store the custom_mod_wl_apache.conf. Refer to the [Sample](https://github.com/oracle/weblogic-kubernetes-operator/blob/v4.1.0/kubernetes/samples/scripts/create-weblogic-domain-pv-pvc/README.md) for creating a PV or PVC. +1. Create a PV and PVC (pv-claim-name) that can be used to store the custom_mod_wl_apache.conf. Refer to the [Sample](https://github.com/oracle/weblogic-kubernetes-operator/blob/v4.1.4/kubernetes/samples/scripts/create-weblogic-domain-pv-pvc/README.md) for creating a PV or PVC. #### Prepare the certificate and private key diff --git a/docs-source/content/soa-domains/adminguide/configure-load-balancer/nginx.md b/docs-source/content/soa-domains/adminguide/configure-load-balancer/nginx.md index 64a222f08..bbd463c3b 100644 --- a/docs-source/content/soa-domains/adminguide/configure-load-balancer/nginx.md +++ b/docs-source/content/soa-domains/adminguide/configure-load-balancer/nginx.md @@ -202,7 +202,7 @@ Follow these steps to set up NGINX as a load balancer for an Oracle SOA Suite do 1. Create an ingress for the domain in the domain namespace by using the sample Helm chart. Here path-based routing is used for ingress. Sample values for default configuration are shown in the file `${WORKDIR}/charts/ingress-per-domain/values.yaml`. By default, `type` is `TRAEFIK` , `sslType` is `NONSSL`, and `domainType` is `soa`. These values can be overridden by passing values through the command line or can be edited in the sample file `values.yaml`. If needed, you can update the ingress YAML file to define more path rules (in section `spec.rules.host.http.paths`) based on the domain application URLs that need to be accessed. Update the template YAML file for the NGINX load balancer located at `${WORKDIR}/charts/ingress-per-domain/templates/nginx-ingress.yaml`. - > Note: See [here](https://github.com/oracle/fmw-kubernetes/blob/v23.3.2/OracleSOASuite/kubernetes/charts/ingress-per-domain/README.md#configuration) for all the configuration parameters. + > Note: See [here](https://github.com/oracle/fmw-kubernetes/blob/v23.4.2/OracleSOASuite/kubernetes/charts/ingress-per-domain/README.md#configuration) for all the configuration parameters. ```bash $ cd ${WORKDIR} diff --git a/docs-source/content/soa-domains/adminguide/configure-load-balancer/traefik.md b/docs-source/content/soa-domains/adminguide/configure-load-balancer/traefik.md index 963efeb71..4f438c8de 100644 --- a/docs-source/content/soa-domains/adminguide/configure-load-balancer/traefik.md +++ b/docs-source/content/soa-domains/adminguide/configure-load-balancer/traefik.md @@ -156,7 +156,7 @@ Sample values for default configuration are shown in the file `${WORKDIR}/charts By default, `type` is `TRAEFIK`, `sslType` is `NONSSL`, and `domainType` is `soa`. These values can be overridden by passing values through the command line or can be edited in the sample file `values.yaml` based on the type of configuration (NONSSL, SSL, and E2ESSL). If needed, you can update the ingress YAML file to define more path rules (in section `spec.rules.host.http.paths`) based on the domain application URLs that need to be accessed. The template YAML file for the Traefik (ingress-based) load balancer is located at `${WORKDIR}/charts/ingress-per-domain/templates/traefik-ingress.yaml`. -> Note: See [here](https://github.com/oracle/fmw-kubernetes/blob/v23.3.2/OracleSOASuite/kubernetes/charts/ingress-per-domain/README.md#configuration) for all the configuration parameters. +> Note: See [here](https://github.com/oracle/fmw-kubernetes/blob/v23.4.2/OracleSOASuite/kubernetes/charts/ingress-per-domain/README.md#configuration) for all the configuration parameters. 1. Choose an appropriate `LOADBALANCER_HOSTNAME` for accessing the Oracle SOA Suite domain application URLs. diff --git a/docs-source/content/soa-domains/adminguide/monitoring-soa-domains.md b/docs-source/content/soa-domains/adminguide/monitoring-soa-domains.md index 4e0053268..f74051423 100644 --- a/docs-source/content/soa-domains/adminguide/monitoring-soa-domains.md +++ b/docs-source/content/soa-domains/adminguide/monitoring-soa-domains.md @@ -18,7 +18,7 @@ After the Oracle SOA Suite domain is set up, you can: Using the `WebLogic Monitoring Exporter` you can scrape runtime information from a running Oracle SOA Suite instance and monitor them using Prometheus and Grafana. #### Set up monitoring -Follow [these steps](https://github.com/oracle/fmw-kubernetes/blob/v23.3.2/OracleSOASuite/kubernetes/monitoring-service/README.md) to set up monitoring for an Oracle SOA Suite instance. For more details on WebLogic Monitoring Exporter, see [here](https://github.com/oracle/weblogic-monitoring-exporter). +Follow [these steps](https://github.com/oracle/fmw-kubernetes/blob/v23.4.2/OracleSOASuite/kubernetes/monitoring-service/README.md) to set up monitoring for an Oracle SOA Suite instance. For more details on WebLogic Monitoring Exporter, see [here](https://github.com/oracle/weblogic-monitoring-exporter). ### Publish WebLogic Server logs into Elasticsearch diff --git a/docs-source/content/soa-domains/appendix/quickstart-deployment-on-prem.md b/docs-source/content/soa-domains/appendix/quickstart-deployment-on-prem.md index 47b38ef27..66676c14f 100644 --- a/docs-source/content/soa-domains/appendix/quickstart-deployment-on-prem.md +++ b/docs-source/content/soa-domains/appendix/quickstart-deployment-on-prem.md @@ -451,15 +451,15 @@ Refer to the official [documentation](https://kubernetes.io/docs/setup/#producti 1. Create a working directory to set up the source code: ```bash - $ mkdir $HOME/soa_23.3.2 - $ cd $HOME/soa_23.3.2 + $ mkdir $HOME/soa_23.4.2 + $ cd $HOME/soa_23.4.2 ``` 1. Download the WebLogic Kubernetes Operator source code and Oracle SOA Suite Kubernetes deployment scripts from the SOA [repository](https://github.com/oracle/fmw-kubernetes.git). Required artifacts are available at `OracleSOASuite/kubernetes`. ``` bash $ git clone https://github.com/oracle/fmw-kubernetes.git - $ export WORKDIR=$HOME/soa_23.3.2/fmw-kubernetes/OracleSOASuite/kubernetes + $ export WORKDIR=$HOME/soa_23.4.2/fmw-kubernetes/OracleSOASuite/kubernetes ``` #### 3.2 Get required Docker images and add them to your local registry @@ -467,7 +467,7 @@ Refer to the official [documentation](https://kubernetes.io/docs/setup/#producti 1. Pull the WebLogic Kubernetes Operator image: ```shell - $ docker pull ghcr.io/oracle/weblogic-kubernetes-operator:4.1.0 + $ docker pull ghcr.io/oracle/weblogic-kubernetes-operator:4.1.4 ``` 1. Obtain the Oracle Database image and Oracle SOA Suite Docker image from the [Oracle Container Registry](https://container-registry.oracle.com): @@ -515,7 +515,7 @@ Use Helm to install and start the operator from the directory you just cloned: $ cd ${WORKDIR} $ helm install weblogic-kubernetes-operator charts/weblogic-operator \ --namespace opns \ - --set image=ghcr.io/oracle/weblogic-kubernetes-operator:4.1.0 \ + --set image=ghcr.io/oracle/weblogic-kubernetes-operator:4.1.4 \ --set serviceAccount=op-sa \ --wait ``` @@ -532,7 +532,7 @@ $ helm install weblogic-kubernetes-operator charts/weblogic-operator \ $ kubectl logs -n opns -c weblogic-operator deployments/weblogic-operator ``` -The WebLogic Kubernetes Operator v4.1.0 has been installed. Continue with the load balancer and Oracle SOA Suite domain setup. +The WebLogic Kubernetes Operator v4.1.4 has been installed. Continue with the load balancer and Oracle SOA Suite domain setup. ### 5. Install the Traefik (ingress-based) load balancer diff --git a/docs-source/content/soa-domains/create-or-update-image/_index.md b/docs-source/content/soa-domains/create-or-update-image/_index.md index d7be75c3a..586276293 100644 --- a/docs-source/content/soa-domains/create-or-update-image/_index.md +++ b/docs-source/content/soa-domains/create-or-update-image/_index.md @@ -121,10 +121,10 @@ After [setting up the WebLogic Image Tool]({{< relref "/soa-domains/create-or-up You must download the required Oracle SOA Suite installation binaries and patches as listed below from the [Oracle Software Delivery Cloud](https://edelivery.oracle.com/) and save them in a directory of your choice. In these steps, this directory is `download location`. -The installation binaries and patches required for release 23.3.2 are: +The installation binaries and patches required for release 23.4.2 are: * JDK: - * jdk-8u381-linux-x64.tar.gz + * jdk-8u391-linux-x64.tar.gz * Fusion Middleware Infrastructure installer: * fmw_12.2.1.4.0_infrastructure.jar @@ -139,18 +139,18 @@ In this release, Oracle B2B is not supported to be configured, but the installer {{% /notice %}} * Fusion Middleware Infrastructure patches: - * p28186730_1394213_Generic.zip (OPATCH 13.9.4.2.13 FOR EM 13.4, 13.5 AND FMW/WLS 12.2.1.3.0, 12.2.1.4.0 AND 14.1.1.0.0) - * p35557681_122140_Generic.zip (WLS PATCH SET UPDATE 12.2.1.4.230702) - * p35547646_122140_Generic.zip (FMW Thirdparty Bundle Patch 12.2.1.4.230628) - * p33950717_122140_Generic.zip (OPSS Bundle Patch 12.2.1.4.220311) - * p35159582_122140_Generic.zip (OWSM BUNDLE PATCH 12.2.1.4.230308) - * p35503128_122140_Generic.zip (ADF BUNDLE PATCH 12.2.1.4.2300615) - * p35505207_122140_Generic.zip (Coherence 12.2.1.4 Cumulative Patch 18 (12.2.1.4.18)) - * p33093748_122140_Generic.zip (FMW PLATFORM 12.2.1.4.0 SPU FOR APRCPU2021) - * p33639718_122140_Linux-x86-64.zip (ADR FOR WEBLOGIC SERVER 12.2.1.4.0 JULY CPU 2022) - * p32720458_122140_Generic.zip (JDBC One Off) - * p35380810_122140_Generic.zip (RDA release 23.3-20230718 for FMW 12.2.1.4.0) - * p35432543_122140_Generic.zip (WebCenter Core Bundle Patch 12.2.1.4.230525) + * p28186730_1394214_Generic.zip (OPATCH 13.9.4.2.14 FOR EM 13.4, 13.5 AND FMW/WLS 12.2.1.3.0, 12.2.1.4.0 AND 14.1.1.0.0) + * p35893811_122140_Generic.zip (WLS PATCH SET UPDATE 12.2.1.4.231010) + * p35882299_122140_Generic.zip (FMW Thirdparty Bundle Patch 12.2.1.4.231006) + * p33950717_122140_Generic.zip (OPSS BUNDLE PATCH 12.2.1.4.220311) + * p35868571_122140_Generic.zip (OWSM BUNDLE PATCH 12.2.1.4.231003) + * p35735469_122140_Generic.zip (ADF BUNDLE PATCH 12.2.1.4.230823) + * p35778804_122140_Generic.zip (Coherence 12.2.1.4 Cumulative Patch 19 (12.2.1.4.19)) + * p33093748_122140_Generic.zip (FMW PLATFORM 12.2.1.4.0 SPU FOR APRCPU2021) + * p35476067_122140_Linux-x86-64.zip (ADR FOR WEBLOGIC SERVER 12.2.1.4.0 CPU OCT 2023) + * p32720458_122140_Generic.zip (JDBC One Off) + * p35671137_122140_Generic.zip (RDA release 23.4-20231017 for FMW 12.2.1.4.0) + * p35751917_122140_Generic.zip (WebCenter Core Bundle Patch 12.2.1.4.230827 ) * p34065178_122140_Generic.zip (OVD One Off) * p34542329_122140_Generic.zip (EM One Off) * p34765492_122140_Generic.zip (EM One Off) @@ -158,11 +158,12 @@ In this release, Oracle B2B is not supported to be configured, but the installer * p34809489_122140_Generic.zip (JDEV One Off) * Oracle SOA Suite and Oracle Service Bus patches - * p35445981_122140_Generic.zip (SOA Bundle Patch 12.2.1.4.230530) - * p35347020_122140_Generic.zip (OSB BUNDLE PATCH 12.2.1.4.230501) - * p33404495_122140_Generic.zip (SOA One-off) - * p35720109_12214230501_Generic.zip (OSB One-off) - * p31713053_122140_Linux-x86-64.zip (One-off patch) + * p35748499_122140_Generic.zip (SOA Bundle Patch 12.2.1.4.230827) + * p35815693_122140_Generic.zip (OSB BUNDLE PATCH 12.2.1.4.230915) + * p33404495_122140_Generic.zip (SOA One-off) + * p32827327_122140_Generic.zip (OSB One-off) + * p32808126_122140_Generic.zip (SOA/ESS One-off) + * p31713053_122140_Linux-x86-64.zip (One-off patch) ##### Update required build files @@ -192,7 +193,7 @@ The following files in the code repository location `/ 1. Add a JDK package to the WebLogic Image Tool cache: ``` bash - $ imagetool cache addInstaller --type jdk --version 8u381 --path /jdk-8u381-linux-x64.tar.gz + $ imagetool cache addInstaller --type jdk --version 8u391 --path /jdk-8u391-linux-x64.tar.gz ``` 1. Add the downloaded installation binaries to the WebLogic Image Tool cache: @@ -210,61 +211,61 @@ The following files in the code repository location `/ 1. Add the downloaded OPatch patch to the WebLogic Image Tool cache: ``` bash - $ imagetool cache addEntry --key 28186730_13.9.4.2.13 --value /p28186730_1394213_Generic.zip + $ imagetool cache addEntry --key 28186730_13.9.4.2.14 --value /p28186730_1394214_Generic.zip ``` 1. Append the `--opatchBugNumber` flag and the OPatch patch key to the `create` command in the `buildArgs` file: ``` bash - --opatchBugNumber 28186730_13.9.4.2.13 + --opatchBugNumber 28186730_13.9.4.2.14 ``` 1. Add the downloaded product patches to the WebLogic Image Tool cache: - ``` bash - $ imagetool cache addEntry --key 28186730_13.9.4.2.13 --value /p28186730_1394213_Generic.zip - - $ imagetool cache addEntry --key 33639718_12.2.1.4.0 --value /p33639718_122140_Linux-x86-64.zip - + ``` bash $ imagetool cache addEntry --key 31713053_12.2.1.4.0 --value /p31713053_122140_Linux-x86-64.zip - $ imagetool cache addEntry --key 35347020_12.2.1.4.0 --value /p35347020_122140_Generic.zip - $ imagetool cache addEntry --key 32720458_12.2.1.4.0 --value /p32720458_122140_Generic.zip + $ imagetool cache addEntry --key 32808126_12.2.1.4.0 --value /p32808126_122140_Generic.zip + + $ imagetool cache addEntry --key 32827327_12.2.1.4.0 --value /p32827327_122140_Generic.zip + $ imagetool cache addEntry --key 33093748_12.2.1.4.0 --value /p33093748_122140_Generic.zip $ imagetool cache addEntry --key 33404495_12.2.1.4.0 --value /p33404495_122140_Generic.zip - $ imagetool cache addEntry --key 35159582_12.2.1.4.0 --value /p35159582_122140_Generic.zip + $ imagetool cache addEntry --key 33950717_12.2.1.4.0 --value /p33950717_122140_Generic.zip - $ imagetool cache addEntry --key 35505207_12.2.1.4.0 --value /p35505207_122140_Generic.zip + $ imagetool cache addEntry --key 34065178_12.2.1.4.0 --value /p34065178_122140_Generic.zip - $ imagetool cache addEntry --key 33950717_12.2.1.4.0 --value /p33950717_122140_Generic.zip + $ imagetool cache addEntry --key 34542329_12.2.1.4.0 --value /p34542329_122140_Generic.zip - $ imagetool cache addEntry --key 35503128_12.2.1.4.0 --value /p35503128_122140_Generic.zip + $ imagetool cache addEntry --key 34765492_12.2.1.4.0 --value /p34765492_122140_Generic.zip - $ imagetool cache addEntry --key 35445981_12.2.1.4.0 --value /p35445981_122140_Generic.zip + $ imagetool cache addEntry --key 34809489_12.2.1.4.0 --value /p34809489_122140_Generic.zip - $ imagetool cache addEntry --key 35557681_12.2.1.4.0 --value /p35557681_122140_Generic.zip + $ imagetool cache addEntry --key 35474754_12.2.1.4.0 --value /p35474754_122140_Generic.zip - $ imagetool cache addEntry --key 35547646_12.2.1.4.0 --value /p35547646_122140_Generic.zip + $ imagetool cache addEntry --key 35476067_12.2.1.4.0 --value /p35476067_122140_Linux-x86-64.zip - $ imagetool cache addEntry --key 34065178_12.2.1.4.0 --value /p34065178_122140_Generic.zip + $ imagetool cache addEntry --key 35671137_12.2.1.4.0 --value /p35671137_122140_Generic.zip - $ imagetool cache addEntry --key 35380810_12.2.1.4.0 --value /p35380810_122140_Generic.zip + $ imagetool cache addEntry --key 35735469_12.2.1.4.0 --value /p35735469_122140_Generic.zip - $ imagetool cache addEntry --key 35432543_12.2.1.4.0 --value /p35432543_122140_Generic.zip + $ imagetool cache addEntry --key 35748499_12.2.1.4.0 --value /p35748499_122140_Generic.zip - $ imagetool cache addEntry --key 34542329_12.2.1.4.0 --value /p34542329_122140_Generic.zip + $ imagetool cache addEntry --key 35751917_12.2.1.4.0 --value /p35751917_122140_Generic.zip - $ imagetool cache addEntry --key 34765492_12.2.1.4.0 --value /p34765492_122140_Generic.zip + $ imagetool cache addEntry --key 35778804_12.2.1.4.0 --value /p35778804_122140_Generic.zip - $ imagetool cache addEntry --key 35474754_12.2.1.4.0 --value /p35474754_122140_Generic.zip - - $ imagetool cache addEntry --key 34809489_12.2.1.4.0 --value / p34809489_122140_Generic.zip - - $ imagetool cache addEntry --key 35720109_12.2.1.4.0 --value / p35720109_12214230501_Generic.zip + $ imagetool cache addEntry --key 35815693_12.2.1.4.0 --value /p35815693_122140_Generic.zip + + $ imagetool cache addEntry --key 35868571_12.2.1.4.0 --value /p35868571_122140_Generic.zip + + $ imagetool cache addEntry --key 35882299_12.2.1.4.0 --value /p35882299_122140_Generic.zip + + $ imagetool cache addEntry --key 35893811_12.2.1.4.0 --value /p35893811_122140_Generic.zip ``` @@ -273,14 +274,14 @@ The following files in the code repository location `/ Sample `--patches` list for the product patches added in to the cache: ``` - --patches 33639718_12.2.1.4.0,31713053_12.2.1.4.0,35347020_12.2.1.4.0,32720458_12.2.1.4.0,33093748_12.2.1.4.0,33404495_12.2.1.4.0,35159582_12.2.1.4.0,35505207_12.2.1.4.0,33950717_12.2.1.4.0,35503128_12.2.1.4.0,35445981_12.2.1.4.0,35557681_12.2.1.4.0,35547646_12.2.1.4.0,34065178_12.2.1.4.0,35380810_12.2.1.4.0,35432543_12.2.1.4.0,34542329_12.2.1.4.0,34765492_12.2.1.4.0,35474754_12.2.1.4.0,34809489_12.2.1.4.0,35720109_12.2.1.4.0 + --patches 31713053_12.2.1.4.0,32720458_12.2.1.4.0,32808126_12.2.1.4.0,32827327_12.2.1.4.0,33093748_12.2.1.4.0,33404495_12.2.1.4.0,33950717_12.2.1.4.0,34065178_12.2.1.4.0,34542329_12.2.1.4.0,34765492_12.2.1.4.0,34809489_12.2.1.4.0,35474754_12.2.1.4.0,35476067_12.2.1.4.0,35671137_12.2.1.4.0,35735469_12.2.1.4.0,35748499_12.2.1.4.0,35751917_12.2.1.4.0,35778804_12.2.1.4.0,35815693_12.2.1.4.0,35868571_12.2.1.4.0,35882299_12.2.1.4.0,35893811_12.2.1.4.0 ``` Example `buildArgs` file after appending the OPatch patch and product patches: ``` create - --jdkVersion 8u381 + --jdkVersion 8u391 --type soa_osb_b2b --version 12.2.1.4.0 --tag oracle/soasuite:12.2.1.4.0 @@ -290,7 +291,7 @@ The following files in the code repository location `/ --additionalBuildCommands /docker-images/OracleSOASuite/imagetool/12.2.1.4.0/additionalBuildCmds.txt --additionalBuildFiles /docker-images/OracleSOASuite/dockerfiles/12.2.1.4/container-scripts --installerResponseFile /docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file,/docker-images/OracleSOASuite/dockerfiles/12.2.1.4/install/soasuite.response,/docker-images/OracleSOASuite/dockerfiles/12.2.1.4/install/osb.response,/docker-images/OracleSOASuite/dockerfiles/12.2.1.4/install/b2b.response - --patches 33639718_12.2.1.4.0,31713053_12.2.1.4.0,35347020_12.2.1.4.0,32720458_12.2.1.4.0,33093748_12.2.1.4.0,33404495_12.2.1.4.0,35159582_12.2.1.4.0,35505207_12.2.1.4.0,33950717_12.2.1.4.0,35503128_12.2.1.4.0,35445981_12.2.1.4.0,35557681_12.2.1.4.0,35547646_12.2.1.4.0,34065178_12.2.1.4.0,35380810_12.2.1.4.0,35432543_12.2.1.4.0,34542329_12.2.1.4.0,34765492_12.2.1.4.0,35474754_12.2.1.4.0,34809489_12.2.1.4.0,35720109_12.2.1.4.0 + --patches 31713053_12.2.1.4.0,32720458_12.2.1.4.0,32808126_12.2.1.4.0,32827327_12.2.1.4.0,33093748_12.2.1.4.0,33404495_12.2.1.4.0,33950717_12.2.1.4.0,34065178_12.2.1.4.0,34542329_12.2.1.4.0,34765492_12.2.1.4.0,34809489_12.2.1.4.0,35474754_12.2.1.4.0,35476067_12.2.1.4.0,35671137_12.2.1.4.0,35735469_12.2.1.4.0,35748499_12.2.1.4.0,35751917_12.2.1.4.0,35778804_12.2.1.4.0,35815693_12.2.1.4.0,35868571_12.2.1.4.0,35882299_12.2.1.4.0,35893811_12.2.1.4.0 ``` >Note: In the `buildArgs` file: > * `--jdkVersion` value must match the `--version` value used in the `imagetool cache addInstaller` command for `--type jdk`. diff --git a/docs-source/content/soa-domains/edg-guide/setup-edg.md b/docs-source/content/soa-domains/edg-guide/setup-edg.md index 8ad3b6548..806d321c3 100644 --- a/docs-source/content/soa-domains/edg-guide/setup-edg.md +++ b/docs-source/content/soa-domains/edg-guide/setup-edg.md @@ -35,6 +35,9 @@ This section provides recommended steps to set up Oracle SOA Suite enterprise de - Create the L4/TCP listener for the load balancer (LBR). - Create the LBR backend pool with the list of control plane nodes that will be added (do not use IPs, always use hostnames). + > **Note**: We recommend maintaining the values of the following `kube-api` backend pool parameters within the prescribed range to minimize the downtime while restarting the Kubernetes control pane or performing maintenance operations. + - Healthcheck interval : Within 1000 milliseconds + - Healthcheck timeout : Within 900 milliseconds - Enable the L4 LBR to route to the backend set/pool. > **Note**: It is important that this is an L4/TCP listener, not an HTTP/HTTPS listener. diff --git a/docs-source/content/soa-domains/installguide/create-soa-domains/_index.md b/docs-source/content/soa-domains/installguide/create-soa-domains/_index.md index 5c015bd42..6ae4e4cb6 100644 --- a/docs-source/content/soa-domains/installguide/create-soa-domains/_index.md +++ b/docs-source/content/soa-domains/installguide/create-soa-domains/_index.md @@ -53,7 +53,7 @@ The following parameters can be provided in the inputs file. | `imagePullPolicy` | Oracle SOA Suite Docker image pull policy. Valid values are `IfNotPresent`, `Always`, `Never`. | `IfNotPresent` | | `imagePullSecretName` | Name of the Kubernetes secret to access the Docker Store to pull the WebLogic Server Docker image. The presence of the secret will be validated when this parameter is specified. | | | `includeServerOutInPodLog` | Boolean value indicating whether to include the server .out to the pod's stdout. | `true` | -| `initialManagedServerReplicas` | Number of Managed Servers to initially start for the domain. | `2` | +| `initialManagedServerReplicas` | Number of Managed Servers to initially start for the domain. | `1` | | `javaOptions` | Java options for initiating the Administration Server and Managed Servers. A Java option can have references to one or more of the following predefined variables to obtain WebLogic Server domain information: `$(DOMAIN_NAME)`, `$(DOMAIN_HOME)`, `$(ADMIN_NAME)`, `$(ADMIN_PORT)`, and `$(SERVER_NAME)`. If `sslEnabled` is set to `true`, add `-Dweblogic.ssl.Enabled=true -Dweblogic.security.SSL.ignoreHostnameVerification=true` to allow the Managed Servers to connect to the Administration Server while booting up. In this environment, the demo certificate generated by the WebLogic Server contains a host name that is different from the runtime container's host name. | `-Dweblogic.StdoutDebugEnabled=false` | | `logHome` | The in-pod location for the domain log, server logs, server out, and Node Manager log files. If not specified, the value is derived from the `domainUID` as `/shared/logs/`. | `/u01/oracle/user_projects/domains/logs/soainfra` | | `soaManagedServerNameBase` | Base string used to generate Managed Server names in the SOA cluster. The default value is `soa_server`. This configuration parameter is applicable only for `soa` and `soaosb` domain types.| `soa_server` | @@ -106,23 +106,156 @@ The script will perform the following steps: offline WLST scripts to create the domain on the shared storage. * Run and wait for the job to finish. * Create a Kubernetes domain YAML file, `domain.yaml`, in the "output" directory that was created above. - This YAML file can be used to create the Kubernetes resource using the `kubectl create -f` +* Create a convenient utility script, `delete-domain-job.yaml`, to clean up the domain home + created by the create script. + +#### Post install tasks + +Review the Read Me file of each patch listed in the following MOS notes, depending on your Oracle Linux version. +- For Oracle Linux 7, see [35908803](https://support.oracle.com/epmos/faces/ui/patch/PatchDetail.jspx?patchId=35908803) +- For Oracle Linux 8, see [35915091](https://support.oracle.com/epmos/faces/ui/patch/PatchDetail.jspx?patchId=35915091) + +Also, for SOA bundle 12.2.1.4.230827 patch post install tasks, see [35748499](https://support.oracle.com/epmos/faces/ui/patch/PatchDetail.jspx?patchId=35748499). Refer **Section 6: Post-Installation Instructions** in the Read Me file for instructions related to purge and maven plugin. + +You can ignore the instructions to clean PS3 maven plugin files, since it will be automated as part of container image creation. + +Perform the following steps to purge. + +1. Create a YAML file named `soapostinstall.yaml` with the following sample. The necessary SQL scripts are mounted inside pod at "/soa_purge". + + > Note: Replace the domain namespace `soans` and SOA Suite image `soasuite:12.2.1.4` with the details specific to your environment in the following sample YAML. + + ``` + apiVersion: v1 + kind: Pod + metadata: + name: soapostinstall + namespace: soans + labels: + app.kubernetes.io/name: dbclient + spec: + containers: + - name: dbclient-container + image: ghcr.io/oracle/oraclelinux8-instantclient:21 + command: ['sh', '-c', 'echo The dbclient pod is running! && sleep 3600'] + volumeMounts: + - mountPath: "/soa_purge" + name: soa-shared-volume + initContainers: + - name: soa-oh + image: soasuite:12.2.1.4 + command: ['sh', '-c', "cp -rf /u01/oracle/soa/common/sql/soainfra/sql/oracle/122140/ /soa_purge"] + volumeMounts: + - mountPath: "/soa_purge" + name: soa-shared-volume + volumes: + - name: soa-shared-volume + ``` + +1. Apply the YAML to create a Kubernetes pod named `soapostinstall` in the `soans` namespace. + + ``` + $ kubectl apply -f soapostinstall.yaml + ``` + +1. Start a bash shell in the `soapostinstall` pod. + + ``` + $ kubectl exec -it -n soans soapostinstall -- bash + ``` + +1. A bash shell opens in the `soapostinstall` pod. + + ``` + [root@soapostinstall /]# + ``` + +1. Reload the SOA purge scripts as a SOAINFRA user. + + - Change the directory to the location where the scripts for post installation steps are stored. + + ``` + [root@soapostinstall /]# cd /soa_purge/122140/soa_purge12/ + ``` + + - Connect to the database as a SOAINFRA user. + + ``` + [root@soapostinstall /]# sqlplus /@ + ``` + + For example, if the schema prefix is `SOA1`, schema password is `Oradoc_db1` and connection string to database is `oracle-db.default.svc.cluster.local:1521/devpdb.k8s`, you can connect to database as SOAINFRA user using the following command. + + ``` + [root@soapostinstall /]# sqlplus SOA1_SOAINFRA/Oradoc_db1@oracle-db.default.svc.cluster.local:1521/devpdb.k8s + ``` + + - Execute the following script. + + ``` + SQL> @soa_purge_scripts.sql + + SQL> show errors + ``` + +1. Verify the SOA purge scripts. + + - Change the directory to location where the SOA purge scripts are stored. + + ``` + [root@soapostinstall /]# cd /soa_purge/122140/verify12/ + ``` + + - Connect to the database as a SOAINFRA user. + + ``` + [root@soapostinstall /]# sqlplus SOA1_SOAINFRA/Oradoc_db1@oracle-db.default.svc.cluster.local:1521/devpdb.k8s + ``` + + - Execute the following script. + + ``` + SQL> @soa_verify_scripts.sql + + SQL> show errors + ``` + +1. Recreate the auto purge jobs with new jobs added in 12.2.1.4.201210SOABP. + + - Change the directory to location where scripts are stored. + + ``` + [root@soapostinstall /]# cd /soa_purge/122140/soa_purge12/soa + ``` + + - Connect to the database as a SOAINFRA user. + + ``` + [root@soapostinstall /]# sqlplus SOA1_SOAINFRA/Oradoc_db1@oracle-db.default.svc.cluster.local:1521/devpdb.k8s + ``` + + - Execute the following script. + + ``` + SQL> @loadcompbasedpurgeprogram.sql + + SQL> show errors + ``` + +#### Start the domain + +The `domain.yaml` created by `create-domain.sh` script above has details about the Oracle SOA Suite Domain and Cluster Kubernetes resources. You can create Oracle SOA Suite Domain using the `kubectl create -f` or `kubectl apply -f` command: ``` $ kubectl apply -f /weblogic-domains//domain.yaml ``` -* Create a convenient utility script, `delete-domain-job.yaml`, to clean up the domain home - created by the create script. - - - The default domain created by the script has the following characteristics: * An Administration Server named `AdminServer` listening on port `7001`. * A configured cluster named `soa_cluster` of size 5. -* Two Managed Servers, named `soa_server1` and `soa_server2`, listening on port `8001`. +* Managed Server, named `soa_server1` listening on port `8001`. * Log files that are located in `/shared/logs/`. * SOA Infra, SOA Composer, and WorklistApp applications deployed. @@ -532,17 +665,14 @@ Enter the following command to see the pods running the servers: $ kubectl get pods -n NAMESPACE ``` -Here is an example of the output of this command. You can verify that an Administration Server and two Managed Servers for each cluster (SOA and Oracle Service Bus) are running for `soaosb` domain type. +Here is an example of the output of this command. You can verify that an Administration Server and a Managed Server for each cluster (SOA and Oracle Service Bus) are running for `soaosb` domain type. ``` $ kubectl get pods -n soans NAME READY STATUS RESTARTS AGE soainfra-adminserver 1/1 Running 0 53m soainfra-osb-server1 1/1 Running 0 50m -soainfra-osb-server2 1/1 Running 0 50m soainfra-soa-server1 1/1 Running 0 50m -soainfra-soa-server2 1/1 Running 0 50m - ``` #### Verify the services @@ -563,12 +693,12 @@ soainfra-adminserver ClusterIP None 30 soainfra-cluster-osb-cluster ClusterIP 10.100.138.57 9001/TCP,9002/TCP 51m soainfra-cluster-soa-cluster ClusterIP 10.99.117.240 8001/TCP,8002/TCP 51m soainfra-osb-server1 ClusterIP None 9001/TCP,9002/TCP 51m -soainfra-osb-server2 ClusterIP None 9001/TCP,9002/TCP 51m +soainfra-osb-server2 ClusterIP 10.108.50.145 9001/TCP,9002/TCP 51m soainfra-osb-server3 ClusterIP 10.108.71.8 9001/TCP,9002/TCP 51m soainfra-osb-server4 ClusterIP 10.100.1.144 9001/TCP,9002/TCP 51m soainfra-osb-server5 ClusterIP 10.108.57.147 9001/TCP,9002/TCP 51m soainfra-soa-server1 ClusterIP None 8001/TCP,8002/TCP 51m -soainfra-soa-server2 ClusterIP None 8001/TCP,8002/TCP 51m +soainfra-soa-server2 ClusterIP 10.97.165.179 8001/TCP,8002/TCP 51m soainfra-soa-server3 ClusterIP 10.98.160.126 8001/TCP,8002/TCP 51m soainfra-soa-server4 ClusterIP 10.105.164.133 8001/TCP,8002/TCP 51m soainfra-soa-server5 ClusterIP 10.109.168.179 8001/TCP,8002/TCP 51m diff --git a/docs-source/content/soa-domains/installguide/prepare-your-environment/_index.md b/docs-source/content/soa-domains/installguide/prepare-your-environment/_index.md index cb8fb8b4a..4f652db0b 100644 --- a/docs-source/content/soa-domains/installguide/prepare-your-environment/_index.md +++ b/docs-source/content/soa-domains/installguide/prepare-your-environment/_index.md @@ -60,7 +60,7 @@ Obtain dependent images and add them to your local registry. 1. Pull the operator image: ```bash - $ docker pull ghcr.io/oracle/weblogic-kubernetes-operator:4.1.0 + $ docker pull ghcr.io/oracle/weblogic-kubernetes-operator:4.1.4 ``` ### Set up the code repository to deploy Oracle SOA Suite domains @@ -69,14 +69,14 @@ Oracle SOA Suite domain deployment on Kubernetes leverages the WebLogic Kubernet 1. Create a working directory to set up the source code: ```bash - $ mkdir $HOME/soa_23.3.2 - $ cd $HOME/soa_23.3.2 + $ mkdir $HOME/soa_23.4.2 + $ cd $HOME/soa_23.4.2 ``` 1. Download the WebLogic Kubernetes Operator source code and Oracle SOA Suite Kubernetes deployment scripts from the SOA [repository](https://github.com/oracle/fmw-kubernetes.git). Required artifacts are available at `OracleSOASuite/kubernetes`. ``` bash $ git clone https://github.com/oracle/fmw-kubernetes.git - $ export WORKDIR=$HOME/soa_23.3.2/fmw-kubernetes/OracleSOASuite/kubernetes + $ export WORKDIR=$HOME/soa_23.4.2/fmw-kubernetes/OracleSOASuite/kubernetes ``` ### Obtain the Oracle SOA Suite Docker image @@ -102,7 +102,7 @@ The Oracle SOA Suite image with the latest bundle patch and required interim pat ``` 1. Download from My Oracle Support: - - Download patch [35729956](https://support.oracle.com/epmos/faces/ui/patch/PatchDetail.jspx?patchId=35729956) for Oracle Linux 7 based container image or [35730025](https://support.oracle.com/epmos/faces/ui/patch/PatchDetail.jspx?patchId=35730025) for Oracle Linux 8 based container image from My Oracle Support (MOS). + - Download patch [35908803](https://support.oracle.com/epmos/faces/ui/patch/PatchDetail.jspx?patchId=35908803) for Oracle Linux 7 based container image or [35915091](https://support.oracle.com/epmos/faces/ui/patch/PatchDetail.jspx?patchId=35915091) for Oracle Linux 8 based container image from My Oracle Support (MOS). - Unzip the downloaded patch zip file. - Load the image archive using the `docker load` command. @@ -112,12 +112,12 @@ The Oracle SOA Suite image with the latest bundle patch and required interim pat Loaded image: oracle/soasuite:12.2.1.4-jdk8-- $ ``` - - Run the `docker inspect` command to verify that the downloaded image is the latest released image. The value of label `com.oracle.weblogic.imagetool.buildid` must match to `05b585f1-81a3-4456-842e-1b1e40d099c9` for Oracle Linux 7 based container image and `1185b0f2-b076-47fc-8889-f98c2abc02ad` for Oracle Linux 8 based container image. + - Run the `docker inspect` command to verify that the downloaded image is the latest released image. The value of label `com.oracle.weblogic.imagetool.buildid` must match to `40af8c36-79ce-466e-8915-483547b6aa4b` for Oracle Linux 7 based container image and `abe2fde0-4f44-422b-a50f-23fdd257bcc1` for Oracle Linux 8 based container image. For example: ```bash - $ docker inspect --format='{{ index .Config.Labels "com.oracle.weblogic.imagetool.buildid" }}' oracle/soasuite:12.2.1.4-jdk8-ol7-230823.073333 - 05b585f1-81a3-4456-842e-1b1e40d099c9 + $ docker inspect --format='{{ index .Config.Labels "com.oracle.weblogic.imagetool.buildid" }}' oracle/soasuite:12.2.1.4-jdk8-ol7-231016.113116 + 40af8c36-79ce-466e-8915-483547b6aa4b $ ``` @@ -127,7 +127,7 @@ If you want to build and use an Oracle SOA Suite Docker image with any additiona ### Install the WebLogic Kubernetes Operator -The WebLogic Kubernetes Operator supports the deployment of Oracle SOA Suite domains in the Kubernetes environment. Follow the steps in [this document](https://github.com/oracle/weblogic-kubernetes-operator/blob/v4.1.0/documentation/site/content/quickstart/install.md#install-the-operator) to install the operator. +The WebLogic Kubernetes Operator supports the deployment of Oracle SOA Suite domains in the Kubernetes environment. Follow the steps in [this document](https://github.com/oracle/weblogic-kubernetes-operator/blob/v4.1.4/documentation/site/content/quickstart/install.md#install-the-operator) to install the operator. > Note: Optionally, you can follow [these steps](https://oracle.github.io/weblogic-kubernetes-operator/samples/elastic-stack/operator/) to send the contents of the operator's logs to Elasticsearch. In the following example commands to install the WebLogic Kubernetes Operator, `opns` is the namespace and `op-sa` is the service account created for the operator: @@ -187,7 +187,7 @@ For details, see [Prepare to run a domain](https://oracle.github.io/weblogic-kub $ ./create-weblogic-credentials.sh -u weblogic -p Welcome1 -n soans -d soainfra -s soainfra-domain-credentials ``` - For more details, see [this document](https://github.com/oracle/weblogic-kubernetes-operator/blob/v4.1.0/kubernetes/samples/scripts/create-weblogic-domain-credentials/README.md). + For more details, see [this document](https://github.com/oracle/weblogic-kubernetes-operator/blob/v4.1.4/kubernetes/samples/scripts/create-weblogic-domain-credentials/README.md). You can check the secret with the `kubectl get secret` command. diff --git a/docs-source/content/soa-domains/installguide/prerequisites/_index.md b/docs-source/content/soa-domains/installguide/prerequisites/_index.md index db82cc13b..c3f94196b 100644 --- a/docs-source/content/soa-domains/installguide/prerequisites/_index.md +++ b/docs-source/content/soa-domains/installguide/prerequisites/_index.md @@ -10,13 +10,13 @@ This section provides information about the system requirements, limitations, li ### System requirements for Oracle SOA Suite domains -Release 23.3.2 has the following system requirements: +Release 23.4.2 has the following system requirements: -* Kubernetes 1.23.4+, 1.24.0+, 1.25.0+ and 1.26.2+ (check with `kubectl version`). +* Kubernetes 1.24.0+, 1.25.0+, 1.26.2+, and 1.27.2+ (check with `kubectl version`). * Docker 19.03.11+ (check with `docker version`) or CRI-O 1.20.2+ (check with `crictl version | grep RuntimeVersion`). * Flannel networking v0.13.0-amd64 or later (check with `docker images | grep flannel`), Calico networking v3.16.1 or later. * Helm 3.10.2+ (check with `helm version --client --short`). -* WebLogic Kubernetes Operator 4.1.0 (see the [operator releases 4.1.0](https://github.com/oracle/weblogic-kubernetes-operator/releases/tag/v4.1.0) pages). +* WebLogic Kubernetes Operator 4.1.4 (see the [operator releases 4.1.4](https://github.com/oracle/weblogic-kubernetes-operator/releases/tag/v4.1.4) pages). * You must have the `cluster-admin` role to install the operator. The operator does not need the `cluster-admin` role at runtime. For more information, see the role-based access control (RBAC) [documentation](https://oracle.github.io/weblogic-kubernetes-operator/security/rbac/). * We do not currently support running SOA in non-Linux containers. diff --git a/docs-source/content/soa-domains/patch_and_upgrade/patch-an-image/index.md b/docs-source/content/soa-domains/patch_and_upgrade/patch-an-image/index.md index e53119e3d..b895c63c1 100644 --- a/docs-source/content/soa-domains/patch_and_upgrade/patch-an-image/index.md +++ b/docs-source/content/soa-domains/patch_and_upgrade/patch-an-image/index.md @@ -43,7 +43,7 @@ Before applying the patch, stop all servers in the domain: #### Update user permissions of the domain PV storage -The Oracle SOA Suite image for release 23.3.2 has an oracle user with UID 1000, with the default group set to `root`. Before applying the patched image, update the user permissions of the domain persistent volume (PV) to set the group to `root`: +The Oracle SOA Suite image for release 23.4.2 has an oracle user with UID 1000, with the default group set to `root`. Before applying the patched image, update the user permissions of the domain persistent volume (PV) to set the group to `root`: ``` $ sudo chown -R 1000:0 /scratch/k8s_dir/SOA diff --git a/docs-source/content/soa-domains/patch_and_upgrade/upgrade-k8s-cluster.md b/docs-source/content/soa-domains/patch_and_upgrade/upgrade-k8s-cluster.md index 5e7982e01..162b36ddb 100644 --- a/docs-source/content/soa-domains/patch_and_upgrade/upgrade-k8s-cluster.md +++ b/docs-source/content/soa-domains/patch_and_upgrade/upgrade-k8s-cluster.md @@ -23,6 +23,7 @@ It is expected that there will be a down time during the upgrade of the Kubernet For example, you can upgrade from 1.x to 1.x+1, but not from 1.x to 1.x+2. To upgrade a Kubernetes version, first all the master nodes of the Kubernetes cluster must be upgraded sequentially, followed by the sequential upgrade of each worker node. -* See [here](https://v1-23.docs.kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/) for Kubernetes official documentation to upgrade from 1.22 to 1.23 * See [here](https://v1-24.docs.kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/) for Kubernetes official documentation to upgrade from 1.23 to 1.24 * See [here](https://v1-25.docs.kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/) for Kubernetes official documentation to upgrade from 1.24 to 1.25 +* See [here](https://v1-26.docs.kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/) for Kubernetes official documentation to upgrade from 1.25 to 1.26 +* See [here](https://v1-27.docs.kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/) for Kubernetes official documentation to upgrade from 1.26 to 1.27 diff --git a/docs-source/content/soa-domains/patch_and_upgrade/upgrade-operator-release.md b/docs-source/content/soa-domains/patch_and_upgrade/upgrade-operator-release.md index 8a5e97903..75b34e353 100644 --- a/docs-source/content/soa-domains/patch_and_upgrade/upgrade-operator-release.md +++ b/docs-source/content/soa-domains/patch_and_upgrade/upgrade-operator-release.md @@ -13,7 +13,7 @@ To upgrade the WebLogic Kubernetes operator, use the `helm upgrade` command with $ cd ${WORKDIR} $ helm upgrade \ --reuse-values \ - --set image=ghcr.io/oracle/weblogic-kubernetes-operator:4.1.0 \ + --set image=ghcr.io/oracle/weblogic-kubernetes-operator:4.1.4 \ --namespace weblogic-operator-namespace \ --wait \ weblogic-kubernetes-operator \ @@ -26,7 +26,7 @@ $ helm upgrade \ When you upgrade a 3.x operator to 4.0, the upgrade process creates a WebLogic Domain resource conversion webhook deployment and its associated resources in the same namespace. If the conversion webhook deployment already exists in another namespace, then a new conversion webhook deployment is not created. The webhook automatically and transparently upgrades the existing WebLogic Domains from the 3.x schema to the 4.0 schema. For more information, see [WebLogic Domain resource conversion webhook](https://oracle.github.io/weblogic-kubernetes-operator/managing-operators/conversion-webhook/). -If you have a single WebLogic Kubernetes Operator per Kubernetes cluster (most common use case), you can upgrade directly from any 3.x operator release to 4.1.0. The Helm chart for 4.1.0 automatically installs the schema conversion webhook. +If you have a single WebLogic Kubernetes Operator per Kubernetes cluster (most common use case), you can upgrade directly from any 3.x operator release to 4.1.4. The Helm chart for 4.1.4 automatically installs the schema conversion webhook. If there is more than one WebLogic Kubernetes Operator in a single Kubernetes cluster: diff --git a/docs-source/content/soa-domains/release-notes.md b/docs-source/content/soa-domains/release-notes.md index 4f032a452..56af6cfb3 100644 --- a/docs-source/content/soa-domains/release-notes.md +++ b/docs-source/content/soa-domains/release-notes.md @@ -12,6 +12,7 @@ Review the latest changes and known issues for Oracle SOA Suite on Kubernetes. | Date | Version | Change | | --- | --- | --- | +|November 30, 2023 | 23.4.2 | Supports Oracle SOA Suite 12.2.1.4 domains deployment using October 2023 PSU and known bug fixes. Support for WebLogic Kubernetes Operator 4.1.4. Oracle SOA Suite 12.2.1.4 Docker image for this release can be downloaded from My Oracle Support (MOS patch [35908803](https://support.oracle.com/epmos/faces/ui/patch/PatchDetail.jspx?patchId=35908803) for Oracle Linux 7 and MOS patch [35915091](https://support.oracle.com/epmos/faces/ui/patch/PatchDetail.jspx?patchId=35915091) for Oracle Linux 8) and [container-registry.oracle.com](https://container-registry.oracle.com). |August 31, 2023 | 23.3.2 | Supports Oracle SOA Suite 12.2.1.4 domains deployment using July 2023 PSU and known bug fixes. Support for WebLogic Kubernetes Operator 4.1.0. Oracle SOA Suite 12.2.1.4 Docker image for this release can be downloaded from My Oracle Support (MOS patch [35729956](https://support.oracle.com/epmos/faces/ui/patch/PatchDetail.jspx?patchId=35729956) for Oracle Linux 7 and MOS patch [35730025](https://support.oracle.com/epmos/faces/ui/patch/PatchDetail.jspx?patchId=35730025) for Oracle Linux 8) and [container-registry.oracle.com](https://container-registry.oracle.com). |May 31, 2023 | 23.2.2 | Supports Oracle SOA Suite 12.2.1.4 domains deployment using April 2023 PSU and known bug fixes. Support for WebLogic Kubernetes Operator 4.0.6. Container images based on Oracle Linux 8 are now supported. Oracle SOA Suite 12.2.1.4 Docker image for this release can be downloaded from My Oracle Support (MOS patch [35269141](https://support.oracle.com/epmos/faces/ui/patch/PatchDetail.jspx?patchId=35269141) for Oracle Linux 7 and MOS patch [35285229 ](https://support.oracle.com/epmos/faces/ui/patch/PatchDetail.jspx?patchId=35285229) for Oracle Linux 8) and [container-registry.oracle.com](https://container-registry.oracle.com). |February 28, 2023 | 23.1.2 | Supports Oracle SOA Suite 12.2.1.4 domains deployment using January 2023 PSU and known bug fixes. Support for WebLogic Kubernetes Operator 4.0.4. Oracle SOA Suite 12.2.1.4 Docker image for this release can be downloaded from My Oracle Support (MOS patch [34980883](https://support.oracle.com/epmos/faces/ui/patch/PatchDetail.jspx?patchId=34980883)) and [container-registry.oracle.com](https://container-registry.oracle.com/). diff --git a/docs/23.4.2/404.html b/docs/23.4.2/404.html index 3fcfd6bdc..eaf150a62 100644 --- a/docs/23.4.2/404.html +++ b/docs/23.4.2/404.html @@ -9,13 +9,13 @@ 404 Page not found - - - - - - - + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + Apache web tier +

+ + + + + + +

This section provides information about how to install and configure the Apache web tier to load balance Oracle SOA Suite domain clusters. You can configure Apache web tier for non-SSL and SSL termination access of the application URL.

+

Follow these steps to set up the Apache web tier as a load balancer for an Oracle SOA Suite domain in a Kubernetes cluster:

+
    +
  1. Build the Apache web tier image
  2. +
  3. Create the Apache plugin configuration file
  4. +
  5. Prepare the certificate and private key
  6. +
  7. Install the Apache web tier Helm chart
  8. +
  9. Verify domain application URL access
  10. +
  11. Uninstall Apache web tier
  12. +
+

Build the Apache web tier image

+

Refer to the sample, to build the Apache web tier Docker image.

+

Create the Apache plugin configuration file

+
    +
  1. +

    The configuration file named custom_mod_wl_apache.conf should have all the URL routing rules for the Oracle SOA Suite applications deployed in the domain that needs to be accessible externally. Update this file with values based on your environment. The file content is similar to below.

    + +
    +
    + + + + + Click here to see the sample content of the configuration file custom_mod_wl_apache.conf for soa domain + + +
    + +
    +
  2. +
  3. +

    Create a PV and PVC (pv-claim-name) that can be used to store the custom_mod_wl_apache.conf. Refer to the Sample for creating a PV or PVC.

    +
  4. +
+

Prepare the certificate and private key

+
    +
  1. +

    (For the SSL termination configuration only) Run the following commands to generate your own certificate and private key using openssl.

    +
     $ cd ${WORKDIR}
    + $ cd charts/apache-samples/custom-sample
    + $ export VIRTUAL_HOST_NAME=WEBLOGIC_HOST
    + $ export SSL_CERT_FILE=WEBLOGIC_HOST.crt
    + $ export SSL_CERT_KEY_FILE=WEBLOGIC_HOST.key
    + $ sh certgen.sh
    +
    +

    NOTE: Replace WEBLOGIC_HOST with the host name on which Apache web tier is to be installed.

    +
    + +
    +
    + + + + + Click here to see the output of the certifcate generation + + +
    + +
    +
  2. +
  3. +

    Prepare input values for the Apache web tier Helm chart.

    +

    Run the following commands to prepare the input value file for the Apache web tier Helm chart.

    +
    $ base64 -i ${SSL_CERT_FILE} | tr -d '\n'
    +$ base64 -i ${SSL_CERT_KEY_FILE} | tr -d '\n'
    +$ touch input.yaml
    +

    Update the input parameters file, charts/apache-samples/custom-sample/input.yaml.

    + +
    +
    + + + + + Click here to see the snapshot of the sample input.yaml file + + +
    + +
    +
  4. +
+

Install the Apache web tier Helm chart

+
    +
  1. +

    Install the Apache web tier Helm chart to the domain namespace (for example soans) with the specified input parameters:

    +
    $ cd ${WORKDIR}/charts
    +$ helm install apache-webtier --values apache-samples/custom-sample/input.yaml --namespace soans apache-webtier --set image=oracle/apache:12.2.1.3
    +
  2. +
  3. +

    Check the status of the Apache web tier:

    +
     $ kubectl get all -n soans | grep apache
    +

    Sample output of the status of the Apache web tier:

    +
    pod/apache-webtier-apache-webtier-65f69dc6bc-zg5pj   1/1     Running     0          22h
    +service/apache-webtier-apache-webtier   NodePort       10.108.29.98     <none>        80:30305/TCP,4433:30443/TCP   22h
    +deployment.apps/apache-webtier-apache-webtier   1/1     1            1           22h
    +replicaset.apps/apache-webtier-apache-webtier-65f69dc6bc   1         1         1       22h
    +
  4. +
+

Verify domain application URL access

+

After the Apache web tier load balancer is running, verify that the domain applications are accessible through the load balancer port 30305/30443. The application URLs for domain of type soa are:

+
+

Note: Port 30305 is the LOADBALANCER-Non-SSLPORT and port 30443 is LOADBALANCER-SSLPORT.

+
+
NONSSL configuration
+
 http://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-Non-SSLPORT}/weblogic/ready
+ http://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-Non-SSLPORT}/console
+ http://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-Non-SSLPORT}/em
+ http://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-Non-SSLPORT}/soa-infra
+ http://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-Non-SSLPORT}/soa/composer
+ http://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-Non-SSLPORT}/integration/worklistapp
+
SSL configuration
+
https://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-SSLPORT}/weblogic/ready
+https://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-SSLPORT}/console
+https://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-SSLPORT}/em
+https://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-SSLPORT}/soa-infra
+https://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-SSLPORT}/soa/composer
+https://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-SSLPORT}/integration/worklistapp
+

Uninstall Apache web tier

+
$ helm delete apache-webtier -n soans
+
+ +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.2/soa-domains/adminguide/configure-load-balancer/index.html b/docs/23.4.2/soa-domains/adminguide/configure-load-balancer/index.html new file mode 100644 index 000000000..4819c0512 --- /dev/null +++ b/docs/23.4.2/soa-domains/adminguide/configure-load-balancer/index.html @@ -0,0 +1,5605 @@ + + + + + + + + + + + + Set up a load balancer :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + Set up a load balancer +

+ + + + + + + +

The WebLogic Kubernetes Operator supports ingress-based load balancers such as Traefik and NGINX (kubernetes/ingress-nginx). It also supports Apache web tier load balancer.

+ + + + + + + +
    + + + + + + + + + + + + + + + + + +

    +Traefik +

    + + + + + +

    Configure the ingress-based Traefik load balancer for Oracle SOA Suite domains.

    + + + + + + + + + + + + +

    +NGINX +

    + + + + + +

    Configure the ingress-based NGINX load balancer for Oracle SOA Suite domains.

    + + + + + + + + + + + + +

    +Apache web tier +

    + + + + + +

    Configure the Apache web tier load balancer for Oracle SOA Suite domains.

    + + + + + + + + +
+ + + + + + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.2/soa-domains/adminguide/configure-load-balancer/index.xml b/docs/23.4.2/soa-domains/adminguide/configure-load-balancer/index.xml new file mode 100644 index 000000000..cd06c9e68 --- /dev/null +++ b/docs/23.4.2/soa-domains/adminguide/configure-load-balancer/index.xml @@ -0,0 +1,49 @@ + + + + Set up a load balancer on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.2/soa-domains/adminguide/configure-load-balancer/ + Recent content in Set up a load balancer on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + Fri, 22 Feb 2019 15:44:42 -0500 + + + + + + Traefik + /fmw-kubernetes/23.4.2/soa-domains/adminguide/configure-load-balancer/traefik/ + Fri, 22 Feb 2019 15:44:42 -0500 + + /fmw-kubernetes/23.4.2/soa-domains/adminguide/configure-load-balancer/traefik/ + This section provides information about how to install and configure the ingress-based Traefik load balancer (version 2.2.1 or later for production deployments) to load balance Oracle SOA Suite domain clusters. You can configure Traefik for non-SSL, SSL termination, and end-to-end SSL access of the application URL. +Follow these steps to set up Traefik as a load balancer for an Oracle SOA Suite domain in a Kubernetes cluster: + Install the Traefik (ingress-based) load balancer Create an Ingress for the domain Verify domain application URL access Uninstall the Traefik ingress Uninstall Traefik Install the Traefik (ingress-based) load balancer Use Helm to install the Traefik (ingress-based) load balancer. + + + + NGINX + /fmw-kubernetes/23.4.2/soa-domains/adminguide/configure-load-balancer/nginx/ + Fri, 22 Feb 2019 15:44:42 -0500 + + /fmw-kubernetes/23.4.2/soa-domains/adminguide/configure-load-balancer/nginx/ + This section provides information about how to install and configure the ingress-based NGINX load balancer to load balance Oracle SOA Suite domain clusters. You can configure NGINX for non-SSL, SSL termination, and end-to-end SSL access of the application URL. +Follow these steps to set up NGINX as a load balancer for an Oracle SOA Suite domain in a Kubernetes cluster: +See the official installation document for prerequisites. + Install the NGINX load balancer for non-SSL and SSL termination configuration Generate secret for SSL access Install NGINX load balancer for end-to-end SSL configuration Configure NGINX to manage ingresses Verify domain application URL access Uninstall NGINX ingress Uninstall NGINX To get repository information, enter the following Helm commands: + + + + Apache web tier + /fmw-kubernetes/23.4.2/soa-domains/adminguide/configure-load-balancer/apache/ + Fri, 22 Feb 2019 15:44:42 -0500 + + /fmw-kubernetes/23.4.2/soa-domains/adminguide/configure-load-balancer/apache/ + This section provides information about how to install and configure the Apache web tier to load balance Oracle SOA Suite domain clusters. You can configure Apache web tier for non-SSL and SSL termination access of the application URL. +Follow these steps to set up the Apache web tier as a load balancer for an Oracle SOA Suite domain in a Kubernetes cluster: + Build the Apache web tier image Create the Apache plugin configuration file Prepare the certificate and private key Install the Apache web tier Helm chart Verify domain application URL access Uninstall Apache web tier Build the Apache web tier image Refer to the sample, to build the Apache web tier Docker image. + + + + \ No newline at end of file diff --git a/docs/23.4.2/soa-domains/adminguide/configure-load-balancer/nginx/index.html b/docs/23.4.2/soa-domains/adminguide/configure-load-balancer/nginx/index.html new file mode 100644 index 000000000..978a9cc66 --- /dev/null +++ b/docs/23.4.2/soa-domains/adminguide/configure-load-balancer/nginx/index.html @@ -0,0 +1,6005 @@ + + + + + + + + + + + + NGINX :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + NGINX +

+ + + + + + +

This section provides information about how to install and configure the ingress-based NGINX load balancer to load balance Oracle SOA Suite domain clusters. You can configure NGINX for non-SSL, SSL termination, and end-to-end SSL access of the application URL.

+

Follow these steps to set up NGINX as a load balancer for an Oracle SOA Suite domain in a Kubernetes cluster:

+

See the official installation document for prerequisites.

+
    +
  1. Install the NGINX load balancer for non-SSL and SSL termination configuration
  2. +
  3. Generate secret for SSL access
  4. +
  5. Install NGINX load balancer for end-to-end SSL configuration
  6. +
  7. Configure NGINX to manage ingresses
  8. +
  9. Verify domain application URL access
  10. +
  11. Uninstall NGINX ingress
  12. +
  13. Uninstall NGINX
  14. +
+

To get repository information, enter the following Helm commands:

+
  $ helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
+  $ helm repo update
+

Install the NGINX load balancer for non-SSL and SSL termination configuration

+
    +
  1. +

    Deploy the ingress-nginx controller by using Helm on the domain namespace:

    +
     $ helm install nginx-ingress -n soans \
    +        --set controller.service.type=NodePort \
    +        --set controller.admissionWebhooks.enabled=false \
    +        ingress-nginx/ingress-nginx
    +
    +
    +
    + + + + + Click here to see the sample output. + + +
    + +
    +
  2. +
+

Generate secret for SSL access

+
    +
  1. +

    For secured access (SSL and E2ESSL) to the Oracle SOA Suite application, create a certificate and generate secrets:

    +
     $ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /tmp/tls1.key -out /tmp/tls1.crt -subj "/CN=domain1.org"
    + $ kubectl -n soans create secret tls soainfra-tls-cert --key /tmp/tls1.key --cert /tmp/tls1.crt
    +
    +

    Note: The value of CN is the host on which this ingress is to be deployed and secret name should be <domainUID>-tls-cert.

    +
    +
  2. +
+

Install NGINX load balancer for end-to-end SSL configuration

+
    +
  1. +

    Deploy the ingress-nginx controller by using Helm on the domain namespace:

    +
     $ helm install nginx-ingress -n soans \
    +       --set controller.extraArgs.default-ssl-certificate=soans/soainfra-tls-cert \
    +       --set controller.service.type=NodePort \
    +       --set controller.admissionWebhooks.enabled=false \
    +       --set controller.extraArgs.enable-ssl-passthrough=true  \
    +        ingress-nginx/ingress-nginx
    +
    +
    +
    + + + + + Click here to see the sample output. + + +
    + +
    +
  2. +
  3. +

    Check the status of the deployed ingress controller:

    +
    $ kubectl --namespace soans get services | grep ingress-nginx-controller
    +

    Sample output:

    +
     nginx-ingress-ingress-nginx-controller   NodePort    10.106.186.235   <none>        80:32125/TCP,443:31376/TCP   19m
    +
  4. +
+

Configure NGINX to manage ingresses

+
    +
  1. +

    Choose an appropriate LOADBALANCER_HOSTNAME for accessing the Oracle SOA Suite domain application URLs.

    +
    $ export LOADBALANCER_HOSTNAME=<LOADBALANCER_HOSTNAME>
    +

    For example, if you are executing the commands from a master node terminal, where the master hostname is LOADBALANCER_HOSTNAME:

    +
    $ export LOADBALANCER_HOSTNAME=$(hostname -f)
    +
  2. +
  3. +

    Create an ingress for the domain in the domain namespace by using the sample Helm chart. Here path-based routing is used for ingress. Sample values for default configuration are shown in the file ${WORKDIR}/charts/ingress-per-domain/values.yaml. By default, type is TRAEFIK , sslType is NONSSL, and domainType is soa. These values can be overridden by passing values through the command line or can be edited in the sample file values.yaml.
    +If needed, you can update the ingress YAML file to define more path rules (in section spec.rules.host.http.paths) based on the domain application URLs that need to be accessed. Update the template YAML file for the NGINX load balancer located at ${WORKDIR}/charts/ingress-per-domain/templates/nginx-ingress.yaml.

    +
    +

    Note: See here for all the configuration parameters.

    +
    +
     $ cd ${WORKDIR}
    + $ helm install soa-nginx-ingress  charts/ingress-per-domain \
    +     --namespace soans \
    +     --values charts/ingress-per-domain/values.yaml \
    +     --set "nginx.hostname=${LOADBALANCER_HOSTNAME}" \
    +     --set type=NGINX
    +

    Sample output:

    +
    NAME: soa-nginx-ingress
    +LAST DEPLOYED: Fri Jul 24 09:34:03 2020
    +NAMESPACE: soans
    +STATUS: deployed
    +REVISION: 1
    +TEST SUITE: None
    +
  4. +
  5. +

    Install ingress-per-domain using Helm for SSL termination configuration:

    +
     $ cd ${WORKDIR}
    + $ helm install soa-nginx-ingress  charts/ingress-per-domain \
    +     --namespace soans \
    +     --values charts/ingress-per-domain/values.yaml \
    +     --set "nginx.hostname=${LOADBALANCER_HOSTNAME}" \
    +     --set type=NGINX --set sslType=SSL
    +

    Sample output:

    +
     NAME: soa-nginx-ingress
    + LAST DEPLOYED: Fri Jul 24 09:34:03 2020
    + NAMESPACE: soans
    + STATUS: deployed
    + REVISION: 1
    + TEST SUITE: None
    +
  6. +
  7. +

    Install ingress-per-domain using Helm for E2ESSL configuration.

    +
    +

    Note: To use the E2ESSL configuration, you must have created the Oracle SOA Suite domain with sslEnabled set to true. See Create Oracle SOA Suite domains.

    +
    +
     $ cd ${WORKDIR}
    + $ helm install soa-nginx-ingress  charts/ingress-per-domain \
    +     --namespace soans \
    +     --values charts/ingress-per-domain/values.yaml \
    +     --set type=NGINX --set sslType=E2ESSL
    +

    Sample output:

    +
     NAME: soa-nginx-ingress
    + LAST DEPLOYED: Fri Jul 24 09:34:03 2020
    + NAMESPACE: soans
    + STATUS: deployed
    + REVISION: 1
    + TEST SUITE: None
    +
  8. +
  9. +

    For NONSSL access to the Oracle SOA Suite application, get the details of the services by the ingress:

    +
    $ kubectl describe ingress soainfra-nginx -n soans
    +
    +
    +
    + + + + + Click here to see the sample output of the services supported by the above deployed ingress. + + +
    + +
    +
  10. +
  11. +

    For SSL access to the Oracle SOA Suite application, get the details of the services by the above deployed ingress:

    +
     $ kubectl describe ingress soainfra-nginx -n soans
    +
    +
    +
    + + + + + Click here to see the sample output of the services supported by the above deployed ingress. + + +
    + +
    +
  12. +
  13. +

    For E2ESSL access to the Oracle SOA Suite application, get the details of the services by the above deployed ingress:

    +
     $  kubectl describe ingress  soainfra-nginx-e2essl -n soans
    +
    +
    +
    + + + + + Click here to see the sample output of the services supported by the above deployed ingress. + + +
    + +
    +
  14. +
+

Verify domain application URL access

+
NONSSL configuration
+
    +
  • +

    Get the LOADBALANCER_NON_SSLPORT NodePort of NGINX using the command:

    +
    $ LOADBALANCER_NON_SSLPORT=$(kubectl --namespace soans  get services -o jsonpath="{.spec.ports[0].nodePort}" nginx-ingress-ingress-nginx-controller)
    +$ echo ${LOADBALANCER_NON_SSLPORT}
    +
  • +
  • +

    Verify that the Oracle SOA Suite domain application URLs are accessible through the LOADBALANCER_NON_SSLPORT:

    +
    http://${LOADBALANCER_HOSTNAME}:${LOADBALANCER_NON_SSLPORT}/weblogic/ready
    +http://${LOADBALANCER_HOSTNAME}:${LOADBALANCER_NON_SSLPORT}/console
    +http://${LOADBALANCER_HOSTNAME}:${LOADBALANCER_NON_SSLPORT}/em
    +http://${LOADBALANCER_HOSTNAME}:${LOADBALANCER_NON_SSLPORT}/soa-infra
    +http://${LOADBALANCER_HOSTNAME}:${LOADBALANCER_NON_SSLPORT}/soa/composer
    +http://${LOADBALANCER_HOSTNAME}:${LOADBALANCER_NON_SSLPORT}/integration/worklistapp
    +
  • +
+
SSL configuration
+
    +
  • +

    Get the LOADBALANCER_SSLPORT NodePort of NGINX using the command:

    +
    $ LOADBALANCER_SSLPORT=$(kubectl --namespace soans  get services -o jsonpath="{.spec.ports[1].nodePort}" nginx-ingress-ingress-nginx-controller)
    +$ echo ${LOADBALANCER_SSLPORT}
    +
  • +
  • +

    Verify that the Oracle SOA Suite domain application URLs are accessible through the LOADBALANCER_SSLPORT:

    +
    https://${LOADBALANCER_HOSTNAME}:${LOADBALANCER_SSLPORT}/weblogic/ready
    +https://${LOADBALANCER_HOSTNAME}:${LOADBALANCER_SSLPORT}/console
    +https://${LOADBALANCER_HOSTNAME}:${LOADBALANCER_SSLPORT}/em
    +https://${LOADBALANCER_HOSTNAME}:${LOADBALANCER_SSLPORT}/soa-infra
    +https://${LOADBALANCER_HOSTNAME}:${LOADBALANCER_SSLPORT}/soa/composer
    +https://${LOADBALANCER_HOSTNAME}:${LOADBALANCER_SSLPORT}/integration/worklistapp
    +
  • +
+
E2ESSL configuration
+
    +
  • +

    To access the SOA Suite domain application URLs from a remote browser, update the browser host config file /etc/hosts (In Windows, C:\Windows\System32\Drivers\etc\hosts) with the IP address of the host on which the ingress is deployed with below entries:

    +
    X.X.X.X  admin.org
    +X.X.X.X  soa.org
    +X.X.X.X  osb.org
    +
    +

    Note:

    +
      +
    • The value of X.X.X.X is the host IP address on which this ingress is deployed.
    • +
    • If you are behind any corporate proxy, make sure to update the browser proxy settings appropriately to access the host names updated /etc/hosts file.
    • +
    +
    +
  • +
  • +

    Get the LOADBALANCER_SSLPORT NodePort of NGINX using the command:

    +
    $ LOADBALANCER_SSLPORT=$(kubectl --namespace soans  get services -o jsonpath="{.spec.ports[1].nodePort}" nginx-ingress-ingress-nginx-controller)
    +$ echo ${LOADBALANCER_SSLPORT}
    +
  • +
  • +

    Verify that the Oracle SOA Suite domain application URLs are accessible through LOADBALANCER_SSLPORT:

    +
    https://admin.org:${LOADBALANCER_SSLPORT}/weblogic/ready
    +https://admin.org:${LOADBALANCER_SSLPORT}/console
    +https://admin.org:${LOADBALANCER_SSLPORT}/em
    +https://soa.org:${LOADBALANCER_SSLPORT}/soa-infra
    +https://soa.org:${LOADBALANCER_SSLPORT}/soa/composer
    +https://soa.org:${LOADBALANCER_SSLPORT}/integration/worklistapp
    +
    +
  • +
+
+

Note: This is the default host name. If you have updated the host name in values.yaml, then use the updated values.

+
+

Uninstall NGINX ingress

+

Uninstall and delete the ingress-nginx deployment:

+
$ helm delete soa-nginx-ingress  -n soans
+

Uninstall NGINX

+
$ helm delete nginx-ingress -n soans
+
+ +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.2/soa-domains/adminguide/configure-load-balancer/traefik/index.html b/docs/23.4.2/soa-domains/adminguide/configure-load-balancer/traefik/index.html new file mode 100644 index 000000000..d21ee15b5 --- /dev/null +++ b/docs/23.4.2/soa-domains/adminguide/configure-load-balancer/traefik/index.html @@ -0,0 +1,6001 @@ + + + + + + + + + + + + Traefik :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + Traefik +

+ + + + + + +

This section provides information about how to install and configure the ingress-based Traefik load balancer (version 2.2.1 or later for production deployments) to load balance Oracle SOA Suite domain clusters. You can configure Traefik for non-SSL, SSL termination, and end-to-end SSL access of the application URL.

+

Follow these steps to set up Traefik as a load balancer for an Oracle SOA Suite domain in a Kubernetes cluster:

+
    +
  1. Install the Traefik (ingress-based) load balancer
  2. +
  3. Create an Ingress for the domain
  4. +
  5. Verify domain application URL access
  6. +
  7. Uninstall the Traefik ingress
  8. +
  9. Uninstall Traefik
  10. +
+

Install the Traefik (ingress-based) load balancer

+
    +
  1. +

    Use Helm to install the Traefik (ingress-based) load balancer. +Use the values.yaml file in the sample but set kubernetes.namespaces specifically.

    +
     $ cd ${WORKDIR}
    + $ kubectl create namespace traefik
    + $ helm repo add traefik https://helm.traefik.io/traefik --force-update
    +

    Sample output:

    +
     "traefik" has been added to your repositories
    +
  2. +
  3. +

    Install Traefik:

    +
     $ helm install traefik  traefik/traefik \
    +      --namespace traefik \
    +      --values charts/traefik/values.yaml \
    +      --set  "kubernetes.namespaces={traefik}" \
    +      --set "service.type=NodePort" --wait
    +
    +
    +
    + + + + + Click here to see the sample output. + + +
    + +
    +

    A sample values.yaml for deployment of Traefik:

    +
    image:
    +name: traefik
    +pullPolicy: IfNotPresent
    +ingressRoute:
    +dashboard:
    +   enabled: true
    +   # Additional ingressRoute annotations (e.g. for kubernetes.io/ingress.class)
    +   annotations: {}
    +   # Additional ingressRoute labels (e.g. for filtering IngressRoute by custom labels)
    +   labels: {}
    +providers:
    +kubernetesCRD:
    +   enabled: true
    +kubernetesIngress:
    +   enabled: true
    +   # IP used for Kubernetes Ingress endpoints
    +ports:
    +traefik:
    +   port: 9000
    +   expose: true
    +   # The exposed port for this service
    +   exposedPort: 9000
    +   # The port protocol (TCP/UDP)
    +   protocol: TCP
    +web:
    +   port: 8000
    +   # hostPort: 8000
    +   expose: true
    +   exposedPort: 30305
    +   nodePort: 30305
    +   # The port protocol (TCP/UDP)
    +   protocol: TCP
    +   # Use nodeport if set. This is useful if you have configured Traefik in a
    +   # LoadBalancer
    +   # nodePort: 32080
    +   # Port Redirections
    +   # Added in 2.2, you can make permanent redirects via entrypoints.
    +   # https://docs.traefik.io/routing/entrypoints/#redirection
    +   # redirectTo: websecure
    +websecure:
    +   port: 8443
    +#    # hostPort: 8443
    +   expose: true
    +   exposedPort: 30443
    +   # The port protocol (TCP/UDP)
    +   protocol: TCP
    +   nodePort: 30443
    +
  4. +
  5. +

    Verify the Traefik status and find the port number of the SSL and non-SSL services:

    +
     $ kubectl get all -n traefik
    +
    +
    +
    + + + + + Click here to see the sample output. + + +
    + +
    +
  6. +
  7. +

    Access the Traefik dashboard through the URL http://<MASTERNODE-HOSTNAME>:31288, with the HTTP host traefik.example.com:

    +
    $ curl -H "host: <MASTERNODE-HOSTNAME>" http://<MASTERNODE-HOSTNAME>:31288/dashboard/
    +
    +

    Note: Make sure that you specify a fully qualified node name for <MASTERNODE-HOSTNAME>

    +
    +
  8. +
  9. +

    Configure Traefik to manage ingresses created in this namespace, where traefik is the Traefik namespace and soans is the namespace of the domain:

    +
  10. +
+
    $ helm upgrade traefik traefik/traefik --namespace traefik     --reuse-values \
+    --set "kubernetes.namespaces={traefik,soans}"
+
+
+
+ + + + + Click here to see the sample output. + + +
+ +
+

Create an ingress for the domain

+

Create an ingress for the domain in the domain namespace by using the sample Helm chart. Here path-based routing is used for ingress. +Sample values for default configuration are shown in the file ${WORKDIR}/charts/ingress-per-domain/values.yaml. +By default, type is TRAEFIK, sslType is NONSSL, and domainType is soa. These values can be overridden by passing values through the command line or can be edited in the sample file values.yaml based on the type of configuration (NONSSL, SSL, and E2ESSL).
+If needed, you can update the ingress YAML file to define more path rules (in section spec.rules.host.http.paths) based on the domain application URLs that need to be accessed. The template YAML file for the Traefik (ingress-based) load balancer is located at ${WORKDIR}/charts/ingress-per-domain/templates/traefik-ingress.yaml.

+
+

Note: See here for all the configuration parameters.

+
+
    +
  1. +

    Choose an appropriate LOADBALANCER_HOSTNAME for accessing the Oracle SOA Suite domain application URLs.

    +
    $ export LOADBALANCER_HOSTNAME=<LOADBALANCER_HOSTNAME>
    +

    For example, if you are executing the commands from a master node terminal, where the master hostname is LOADBALANCER_HOSTNAME:

    +
    $ export LOADBALANCER_HOSTNAME=$(hostname -f)
    +
  2. +
  3. +

    Install ingress-per-domain using Helm for NONSSL configuration:

    +
     $ cd ${WORKDIR}
    + $ helm install soa-traefik-ingress  \
    +     charts/ingress-per-domain \
    +     --namespace soans \
    +     --values charts/ingress-per-domain/values.yaml \
    +     --set "traefik.hostname=${LOADBALANCER_HOSTNAME}"
    +

    Sample output:

    +
      NAME: soa-traefik-ingress
    +  LAST DEPLOYED: Mon Jul 20 11:44:13 2020
    +  NAMESPACE: soans
    +  STATUS: deployed
    +  REVISION: 1
    +  TEST SUITE: None
    +
  4. +
  5. +

    For secured access (SSL termination and E2ESSL) to the Oracle SOA Suite application, create a certificate, and generate a Kubernetes secret:

    +
     $ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /tmp/tls1.key -out /tmp/tls1.crt -subj "/CN=*"
    + $ kubectl -n soans create secret tls soainfra-tls-cert --key /tmp/tls1.key --cert /tmp/tls1.crt
    +
  6. +
  7. +

    Create the Traefik TLSStore custom resource.

    +

    In case of SSL termination, Traefik should be configured to use the user-defined SSL certificate. If the user-defined SSL certificate is not configured, Traefik will create a default SSL certificate. To configure a user-defined SSL certificate for Traefik, use the TLSStore custom resource. The Kubernetes secret created with the SSL certificate should be referenced in the TLSStore object. Run the following command to create the TLSStore:

    +
    $ cat <<EOF | kubectl apply -f -
    +apiVersion: traefik.containo.us/v1alpha1
    +kind: TLSStore
    +metadata:
    +  name: default
    +  namespace: soans
    +spec:
    +  defaultCertificate:
    +    secretName:  soainfra-tls-cert   
    +EOF
    +
  8. +
  9. +

    Install ingress-per-domain using Helm for SSL configuration.

    +

    The Kubernetes secret name should be updated in the template file.

    +

    The template file also contains the following annotations:

    +
     traefik.ingress.kubernetes.io/router.entrypoints: websecure
    + traefik.ingress.kubernetes.io/router.tls: "true"
    + traefik.ingress.kubernetes.io/router.middlewares: soans-wls-proxy-ssl@kubernetescrd
    +

    The entry point for SSL termination access and the Middleware name should be updated in the annotation. The Middleware name should be in the form <namespace>-<middleware name>@kubernetescrd.

    +
     $ cd ${WORKDIR}
    + $ helm install soa-traefik-ingress  \
    +     charts/ingress-per-domain \
    +     --namespace soans \
    +     --values charts/ingress-per-domain/values.yaml \
    +     --set "traefik.hostname=${LOADBALANCER_HOSTNAME}" \
    +     --set sslType=SSL
    +

    Sample output:

    +
      NAME: soa-traefik-ingress
    +  LAST DEPLOYED: Mon Jul 20 11:44:13 2020
    +  NAMESPACE: soans
    +  STATUS: deployed
    +  REVISION: 1
    +  TEST SUITE: None
    +
    +
  10. +
  11. +

    Install ingress-per-domain using Helm for E2ESSL configuration.

    +
    +

    Note: To use the E2ESSL configuration, you must have created the Oracle SOA Suite domain with sslEnabled set to true. See Create Oracle SOA Suite domains for details.

    +
    +
     $ cd ${WORKDIR}
    + $ helm install soa-traefik-ingress  \
    +     charts/ingress-per-domain \
    +     --namespace soans \
    +     --values charts/ingress-per-domain/values.yaml \
    +     --set sslType=E2ESSL
    +

    Sample output:

    +
     NAME: soa-traefik-ingress
    + LAST DEPLOYED: Fri Apr  9 09:47:27 2021
    + NAMESPACE: soans
    + STATUS: deployed
    + REVISION: 1
    + TEST SUITE: None
    +
  12. +
  13. +

    For NONSSL access to the Oracle SOA Suite application, get the details of the services by the ingress:

    +
      $ kubectl describe ingress soainfra-traefik -n soans
    +
    +
    +
    + + + + + Click here to see all services supported by the above deployed ingress. + + +
    + +
    +
  14. +
  15. +

    For SSL access to the Oracle SOA Suite application, get the details of the services by the above deployed ingress:

    +
     $ kubectl describe ingress soainfra-traefik -n soans
    +
    +
    +
    + + + + + Click here to see all services supported by the above deployed ingress. + + +
    + +
    +
  16. +
  17. +

    For E2ESSL access to the Oracle SOA Suite application, get the details of the services by the above deployed ingress:

    +
     $ kubectl describe IngressRouteTCP soainfra-traefik -n soans	 
    +
    +
    +
    + + + + + Click here to see all services supported by the above deployed ingress. + + +
    + +
    +
  18. +
  19. +

    To confirm that the load balancer noticed the new ingress and is successfully routing to the domain server pods, you can send a request to the URL for the “WebLogic ReadyApp framework”, which should return an HTTP 200 status code, as follows:

    +
     $ curl -v http://${LOADBALANCER_HOSTNAME}:${LOADBALANCER_PORT}/weblogic/ready
    + *   Trying 149.87.129.203...
    + > GET http://${LOADBALANCER_HOSTNAME}:${LOADBALANCER_PORT}/weblogic/ready HTTP/1.1
    + > User-Agent: curl/7.29.0
    + > Accept: */*
    + > Proxy-Connection: Keep-Alive
    + > host: ${LOADBALANCER_HOSTNAME}
    + >
    + < HTTP/1.1 200 OK
    + < Date: Sat, 14 Mar 2020 08:35:03 GMT
    + < Vary: Accept-Encoding
    + < Content-Length: 0
    + < Proxy-Connection: Keep-Alive
    + <
    + * Connection #0 to host localhost left intact
    +
  20. +
+

Verify domain application URL access

+
For NONSSL configuration
+

After setting up the Traefik (ingress-based) load balancer, verify that the domain application URLs are accessible through the non-SSL load balancer port 30305 for HTTP access. The sample URLs for Oracle SOA Suite domain of type soa are:

+
    http://${LOADBALANCER_HOSTNAME}:${LOADBALANCER_NON_SSLPORT}/weblogic/ready
+    http://${LOADBALANCER_HOSTNAME}:${LOADBALANCER_NON_SSLPORT}/console
+    http://${LOADBALANCER_HOSTNAME}:${LOADBALANCER_NON_SSLPORT}/em
+    http://${LOADBALANCER_HOSTNAME}:${LOADBALANCER_NON_SSLPORT}/soa-infra
+    http://${LOADBALANCER_HOSTNAME}:${LOADBALANCER_NON_SSLPORT}/soa/composer
+    http://${LOADBALANCER_HOSTNAME}:${LOADBALANCER_NON_SSLPORT}/integration/worklistapp
+
For SSL configuration
+

After setting up the Traefik (ingress-based) load balancer, verify that the domain applications are accessible through the SSL load balancer port 30443 for HTTPS access. The sample URLs for Oracle SOA Suite domain of type soa are:

+
    https://${LOADBALANCER_HOSTNAME}:${LOADBALANCER_SSLPORT}/weblogic/ready
+    https://${LOADBALANCER_HOSTNAME}:${LOADBALANCER_SSLPORT}/console
+    https://${LOADBALANCER_HOSTNAME}:${LOADBALANCER_SSLPORT}/em
+    https://${LOADBALANCER_HOSTNAME}:${LOADBALANCER_SSLPORT}/soa-infra
+    https://${LOADBALANCER_HOSTNAME}:${LOADBALANCER_SSLPORT}/soa/composer
+    https://${LOADBALANCER_HOSTNAME}:${LOADBALANCER_SSLPORT}/integration/worklistapp
+
For E2ESSL configuration
+

After setting up the Traefik (ingress-based) load balancer, verify that the domain applications are accessible through the SSL load balancer port 30443 for HTTPS access.

+
    +
  • +

    To access the application URLs from the browser, update /etc/hosts on the browser host (in Windows, C:\Windows\System32\Drivers\etc\hosts) with the entries below

    +
    X.X.X.X  admin.org
    +X.X.X.X  soa.org
    +X.X.X.X  osb.org
    +
    +

    Note: The value of X.X.X.X is the host ipaddress on which this ingress is deployed.

    +
    +
    +

    Note: If you are behind any corporate proxy, make sure to update the browser proxy settings appropriately to access the host names updated /etc/hosts file.

    +
    +
  • +
+

The sample URLs for Oracle SOA Suite domain of type soa are:

+
  https://admin.org:${LOADBALANCER_SSLPORT}/weblogic/ready
+  https://admin.org:${LOADBALANCER_SSLPORT}/console
+  https://admin.org:${LOADBALANCER_SSLPORT}/em
+  https://soa.org:${LOADBALANCER_SSLPORT}/soa-infra
+  https://soa.org:${LOADBALANCER_SSLPORT}/soa/composer
+  https://soa.org:${LOADBALANCER_SSLPORT}/integration/worklistapp
+

Uninstall the Traefik ingress

+

Uninstall and delete the ingress deployment:

+
$ helm delete soa-traefik-ingress  -n soans
+

Uninstall Traefik

+
$ helm delete traefik -n traefik
+
+ +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.2/soa-domains/adminguide/configuring-custom-ssl-certificates/index.html b/docs/23.4.2/soa-domains/adminguide/configuring-custom-ssl-certificates/index.html new file mode 100644 index 000000000..8937972ee --- /dev/null +++ b/docs/23.4.2/soa-domains/adminguide/configuring-custom-ssl-certificates/index.html @@ -0,0 +1,5773 @@ + + + + + + + + + + + + Configure SSL certificates :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + Configure SSL certificates +

+ + + + + + +

Secure Socket Layer (SSL) provides a secured communication for data sent over unsecured networks. In an SSL termination scenario, you can configure SSL between the client browser and the load balancer in your Oracle SOA Suite instance to ensure that applications are accessed securely. In an SSL end-to-end scenario, an Oracle SOA Suite domain is configured to use a self-signed SSL certificate that was generated during domain creation. Clients will typically receive a message indicating that the signing CA for the certificate is unknown and not trusted.

+

This section provides details on how to create and configure custom (CA-issued) SSL certificates for Oracle SOA Suite domains in both SSL end-to-end and SSL termination scenarios.

+ +

Create custom SSL certificates in an SSL end-to-end scenario

+

These steps describe how to replace the identity and trust keystore of an Oracle SOA Suite domain with a custom identity and custom trust keystore and register with digital certificates procured from any third party authority.

+

In this documentation, the registered domain is mydomain.com and the CA signed certificates are taken from mydomain.

+

Create a custom identity and custom trust keystore and generate a certificate signing request (CSR)

+

To create a custom identity and custom trust keystore and generate a CSR:

+
    +
  1. +

    Log in to the Enterprise Manager (EM) Console and access the Keystores page by opening WebLogic Domain > Security > Keystore.

    +
  2. +
  3. +

    Under the system stripe, click Create Keystore to create a new keystore.

    +
  4. +
  5. +

    Provide the following details for custom identity:

    +

    Keystore Name: custIdentity +Protection: Select the Password option. +Keystore Password: Enter the password. +Confirm Password: Confirm the password.

    +
  6. +
  7. +

    Click Create Keystore to create another new keystore.

    +
  8. +
  9. +

    Provide the following details for custom trust:

    +
      +
    • Keystore Name: custTrust
    • +
    • Protection: Select the Password option.
    • +
    • Keystore Password: Enter the password.
    • +
    • Confirm Password: Confirm the password.
    • +
    +

    Custom Identity and Trust Keystores

    +
  10. +
  11. +

    Click Manage on the custIdentity keystore name and provide the password that you specified previously.

    +
  12. +
  13. +

    Click Generate Keypair to create a new key pair, and provide the following details for custIdentity with alias as custIdentity and password:

    +
      +
    • Alias Name: custIdentity
    • +
    • Common Name: Common name, for example, soak8s.mydomain.com (Registered domain name)
    • +
    • Organizational Unit: Name of the organizational unit
    • +
    • Organization: Organization name
    • +
    • Enter City, State, and Country names
    • +
    • Key Type: RSA
    • +
    • Key Size: 2048
    • +
    • Password: Enter the password
    • +
    +
  14. +
  15. +

    Click OK to generate the keypair.

    +
  16. +
  17. +

    Select the newly created keypair and click Generate CSR.

    +
  18. +
  19. +

    Export the created CSR, share it with Certificate Authority, such as digicert CA, and get root, intermediate, and signed certificates. +The certificate is generated for the domain name you used in the Common Name field.

    +
  20. +
+

It is not mandatory to create identity and trust keystore under the system stripe that comes with default provisioning. You can create a new custom stripe and create identity and trust keystores under it.

+

Share the CSR with CA to get CA-signed certificates

+
    +
  1. +

    Select the new keypair under the custIdentity and click Generate CSR.

    +
  2. +
  3. +

    Export the created CSR and share it with the Certificate Authority and get root, intermediate, and signed certificates. The certificate is generated for the domain name you used in the Common Name field.

    +
  4. +
  5. +

    Download the certificates shared in the zip file from the CA. +The zip file contains one of the following:

    +
      +
    • the three certificates individually - root, intermediate, and signed certificates
    • +
    • root and intermediate certificates in one chain and signed certificate separately
    • +
    +
  6. +
  7. +

    Double-click the certificate chain for root and intermediate certificates. You can see the full chain when you click on the certification path.

    +
  8. +
  9. +

    Extract the root and intermediate certificates individually by going to the certification path, select the certificate to be extracted (root or intermediate) and click View Certificate.

    +
  10. +
  11. +

    On the View Certificates pop-up, select the Details tab and click Copy to File.

    +
  12. +
  13. +

    In the Certificate Export wizard, click Next, then select Base 64 encoded X.509 (CER), and then click Next. Export the certificate.

    +
  14. +
  15. +

    Name the exported certificate as root and intermediate certificates respectively.

    +
  16. +
+

Import CA certificates

+

Certificate Authority (CA) certificates must be imported in the following order: first the signed server certificate, then the intermediate certificate, and then the root certificate.

+

To import CA certificates:

+
    +
  1. +

    Use WLST commands to import the certificate chain in the identity keystore (custIdentity):

    +

    a. Combine the three certificates into a single text file called chain.pem in the following order: signed server certificate, followed by intermediate certificate, followed by root certificate:

    +
    -----BEGIN CERTIFICATE-----
    +<signed server certificate>
    +-----END CERTIFICATE-----
    +-----BEGIN CERTIFICATE-----
    +<intermediate certificate>
    +-----END CERTIFICATE-----
    +-----BEGIN CERTIFICATE-----
    +<root certificate>
    +-----END CERTIFICATE-----
    +

    b. Place the chain.pem in /tmp from where you will be executing the kubectl commands (for example, on the master node).

    +

    c. Enter the following command to change the file ownership to 1000:1000 user/group:

    +
    $ sudo chown 1000:1000 /tmp/chain.pem
    +

    d. Copy /tmp/chain.pem into the Administration Server pod (for example, soainfra-adminserver):

    +
    $ kubectl cp /tmp/chain.pem soans/soainfra-adminserver:/tmp/chain.pem
    +

    e. Exec into the Administration Server pod to perform all operations:

    +
    $ kubectl exec -it soainfra-adminserver -n soans -- bash
    +

    f. Start WLST and access the Oracle Platform Security Services (OPSS) key store service:

    +
    $ cd /u01/oracle/oracle_common/common/bin/
    +$ ./wlst.sh
    +:
    +:
    +wls:/offline> connect("weblogic","Welcome1","t3://soainfra-adminserver:7001")
    +:
    +:
    +wls:/soainfra/serverConfig/> svc = getOpssService(name='KeyStoreService')
    +

    g. Use the WLST importKeyStoreCertificate command to import chain.pem:

    +
    svc.importKeyStoreCertificate(appStripe='stripe', name='keystore', password='password', alias='alias', keypassword='keypassword', type='entrytype',filepath='absolute_file_path')
    +

    For example:

    +
    wls:/soainfra/serverConfig/> svc.importKeyStoreCertificate(appStripe='system', name='custIdentity', password=welcome1, alias='custIdentity', keypassword='welcome1', type='CertificateChain', filepath='/tmp/chain.pem')
    +

    e. Exit WLST:

    +
    exit()
    +
  2. +
  3. +

    Use Oracle Enterprise Manager to import the certificate chain into the trust keystore (custTrust):

    +

    a. Log in to the Enterprise Manager Console and access the Keystores page by opening WebLogic domain > Security > Keystore.

    +

    b. Select the trust keystore (custTrust) and click Manage.

    +

    c. Click Import Certificate and import the certificates in this order:

    +
      +
    • +

      the signed server certificate as a trusted certificate (alias mySignedCert)

      +
    • +
    • +

      the intermediate certificate from CA as a trusted certificate (alias myInterCA)

      +
    • +
    • +

      the root certificate from CA as a trusted certificate (alias myRootCA)

      +
    • +
    +
  4. +
+

Synchronize the local keystore with the security store

+

Synchronize keystores to synchronize information between the domain home and the Oracle Platform Security Services (OPSS) store in the database.

+

To synchronize keystores:

+
    +
  1. Exec into the Administration server pod (for example, soainfra-adminserver): +
    $ kubectl exec -it soainfra-adminserver -n soans -- bash
    +
  2. +
  3. Start WLST and access the Oracle Platform Security Services (OPSS) keystore service: +
    $ cd /u01/oracle/oracle_common/common/bin/
    +$ ./wlst.sh
    +:
    +:
    +wls:/offline> connect("weblogic","Welcome1","t3://soainfra-adminserver:7001")
    +:
    +:
    +wls:/soainfra/serverConfig/> svc = getOpssService(name='KeyStoreService')
    +
  4. +
  5. Enter the following commands to synchronize the custom identity and custom trust keystores: +
    +

    Note: This step is necessary only if you are using the system stripe. You do not need to synchronize the keystores if you are using a custom stripe.

    +
    +
    wls:/soainfra/serverConfig/> svc.listKeyStoreAliases(appStripe="system", name="custIdentity", password=" ****", type="*")
    +wls:/soainfra/serverConfig/> syncKeyStores(appStripe='system',keystoreFormat='KSS')
    +wls:/soainfra/serverConfig/> svc.listKeyStoreAliases (appStripe="system", name="myKSSTrust", password="****", type="*")
    +wls:/soainfra/serverConfig/> syncKeyStores(appStripe='system',keystoreFormat='KSS')
    +
  6. +
+

Update the WebLogic keystores with custom identity and trust

+

To update the WebLogic keystores with custom identity and custom trust:

+
    +
  1. +

    In the WebLogic Server Administration Console, open Servers > AdminServer > Configurations > Keystores tab.

    +
  2. +
  3. +

    Change the Keystores to Custom Identity and Custom Trust and Save.

    +
  4. +
  5. +

    Provide the values for Custom Identity:

    +
      +
    • Custom Identity Keystore: kss://system/custidentity
    • +
    • Custom Identity KeyStore Type: KSS
    • +
    • Custom Identity PassPhrase: enter password given while creating the custIdentity keystore.
    • +
    • Confirm Custom Identity PassPhrase: reenter the password.
    • +
    +
  6. +
  7. +

    Provide the values for Custom Trust:

    +
      +
    • Custom Trust Keystore: kss://system/custTrust
    • +
    • Custom Trust KeyStore Type: KSS
    • +
    • Custom Trust PassPhrase: enter password given while creating the custTrust keystore.
    • +
    • Confirm Custom Trust PassPhrase: reenter the password.
    • +
    +
  8. +
  9. +

    Click Save and then Activate changes.

    +
  10. +
  11. +

    Open the SSL tab and provide the following details:

    +
      +
    • Private Key Alias: custIdentity (this is the alias given while creating the key pair in the custIdentity keystore.)
    • +
    • Private Key PassPhrase: enter password given while creating the key pair under the custIdentity keystore.
    • +
    • Confirm Private Key PassPhrase: reenter the password.
    • +
    +
  12. +
  13. +

    In the Advanced section, change Hostname Verification to None. Click Save and Activate changes.

    +
  14. +
  15. +

    Repeat steps 1 to 7 for all Managed Servers.

    +
  16. +
  17. +

    Restart the domain.

    +
  18. +
  19. +

    Once the servers are up and running, you can check if the SSL URLs show the updated certificates.

    +
  20. +
+

For more details, refer to:

+
    +
  1. Administering Oracle SOA Cloud Service
  2. +
  3. Administering Oracle Fusion Middleware
  4. +
+

Create custom SSL certificates in an SSL termination at a load balancer

+

This section provides references to configure a custom SSL certificate at a load balancer.

+

There are multiple CA vendors in the marketplace today, each offering different levels of service at varying price points. Research and choose a CA vendor that meets your service-level and budget requirements.

+

For a CA vendor to issue you a CA-issued SSL certificate, you must provide the following information:

+
    +
  • Your custom domain name.
  • +
  • Public information associated with the domain confirming you as the owner.
  • +
  • Email address associated with the custom domain for verification.
  • +
+

Create a Certificate Signing Request (CSR) for your load balancer and submit the CSR to the CA vendor. After receiving the CA-issued certificate, refer to Administering Oracle SOA Cloud Service to import the CA-issued SSL certificate to the load balancer. If you are using openssl to create the certificates, you can refer to Manually Generate a Certificate Signing Request (CSR) Using OpenSSL to submit the CSR to the CA vendor.

+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.2/soa-domains/adminguide/deploying-composites/deploy-artifacts/index.html b/docs/23.4.2/soa-domains/adminguide/deploying-composites/deploy-artifacts/index.html new file mode 100644 index 000000000..7f35a264f --- /dev/null +++ b/docs/23.4.2/soa-domains/adminguide/deploying-composites/deploy-artifacts/index.html @@ -0,0 +1,5956 @@ + + + + + + + + + + + + Deploy using composites in a persistent volume or image :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + Deploy using composites in a persistent volume or image +

+ + + + + + +

Learn how to deploy Oracle SOA Suite and Oracle Service Bus composite applications artifacts in a Kubernetes persistent volume or in an image to an Oracle SOA Suite environment deployed using a WebLogic Kubernetes Operator.

+

The deployment methods described in Deploy using JDeveloper and Deploy using Maven and Ant are manual processes. If you have the deployment artifacts (archives) already built, then you can package them either into a Kubernetes persistent volume or in an image and use this automated process to deploy the artifacts to an Oracle SOA Suite domain.

+

Prepare to use the deploy artifacts script

+

The sample scripts for deploying artifacts are available at ${WORKDIR}/create-soa-domain/domain-home-on-pv/

+

You must edit deploy-artifacts-inputs.yaml (or a copy of it) to provide the details of your domain and artifacts. +Refer to the configuration parameters below to understand the information that you must provide in this file.

+

Configuration parameters

+

The following parameters can be provided in the inputs file.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterDefinitionDefault
adminPortPort number of the Administration Server inside the Kubernetes cluster.7001
adminServerNameName of the Administration Server.AdminServer
domainUIDUnique ID that is used to identify the domain. This ID cannot contain any characters that are not valid in a Kubernetes service name.soainfra
domainTypeType of the domain. Mandatory input for Oracle SOA Suite domains. You must provide one of the supported domain type values: soa (deploys artifacts into an Oracle SOA Suite domain), osb (deploys artifacts into an Oracle Service Bus domain), or soaosb (deploys artifacts into both Oracle SOA Suite and Oracle Service Bus domains).soa
soaClusterNameName of the SOA WebLogic Server cluster instance in the domain. By default, the cluster name is soa_cluster. This configuration parameter is applicable only for soa and soaosb domain types.soa_cluster
imageSOA Suite Docker image. The artifacts deployment process requires Oracle SOA Suite 12.2.1.4. Refer to Obtain the Oracle SOA Suite Docker image for details on how to obtain or create the image.soasuite:12.2.1.4
imagePullPolicyOracle SOA Suite Docker image pull policy. Valid values are IfNotPresent, Always, Never.IfNotPresent
imagePullSecretNameName of the Kubernetes secret to access the Docker Store to pull the Oracle SOA Suite Docker image. The presence of the secret will be validated when this parameter is specified.
weblogicCredentialsSecretNameName of the Kubernetes secret for the Administration Server’s user name and password. If not specified, then the value is derived from the domainUID as <domainUID>-weblogic-credentials.soainfra-domain-credentials
namespaceKubernetes namespace in which the domain was created.soans
artifactsSourceTypeThe deploy artifacts source type. Set to PersistentVolume for deploy artifacts available in a persistent volume and Image for deploy artifacts available as an image.Image
persistentVolumeClaimNameName of the persistent volume claim created that hosts the deployment artifacts. If not specified, the value is derived from the domainUID as <domainUID>-deploy-artifacts-pvc.soainfra-deploy-artifacts-pvc
artifactsImageDeploy artifacts image. Required if artifactsSourceType is Image.artifacts:12.2.1.4
artifactsImagePullPolicyDeploy artifacts image pull policy. Valid values are IfNotPresent, Always, Never.IfNotPresent
artifactsImagePullSecretNameName of the Kubernetes secret to access the deploy artifacts image. The presence of the secret will be validated when this parameter is specified.
deployScriptFilesDirDirectory on the host machine to locate the required files to deploy artifacts to the Oracle SOA Suite domain, including the script that is specified in the deployScriptName parameter. By default, this directory is set to the relative path deploy.deploy
deployScriptsMountPathMount path where the deploy artifacts scripts are located inside a pod. The deploy-artifacts.sh script creates a Kubernetes job to run the script (specified by the deployScriptName parameter) in a Kubernetes pod to deploy the artifacts. Files in the deployScriptFilesDir directory are mounted to this location in the pod, so that the Kubernetes pod can use the scripts and supporting files to deploy artifacts./u01/weblogic
deployScriptNameScript that the deploy artifacts script uses to deploy artifacts to the Oracle SOA Suite domain. For Oracle SOA Suite, the script placed in the soa directory is used. For Oracle Service Bus, the script placed in the osb directory is used. The deploy-artifacts.sh script creates a Kubernetes job to run this script to deploy artifacts. The script is located in the in-pod directory that is specified by the deployScriptsMountPath parameter.deploy.sh
soaArtifactsArchivePathDirectory inside container where Oracle SOA Suite archives are placed./u01/sarchives
osbArtifactsArchivePathDirectory inside container where Oracle Service Bus archives are placed./u01/sbarchives
+

The sample demonstrates how to deploy Oracle SOA Suite composites or Oracle Service Bus applications to an Oracle SOA Suite domain home.

+

Run the deploy artifacts script

+

Run the deploy artifacts script, specifying your inputs file and an output directory to store the +generated artifacts:

+
$ ./deploy-artifacts.sh \
+  -i deploy-artifacts-inputs.yaml \
+  -o <path to output-directory>
+

The script performs the following steps:

+
    +
  • Creates a directory for the generated Kubernetes YAML files for the artifacts deployment process if it does not +already exist. The path name is <path to output-directory>/weblogic-domains/<domainUID>/<YYYYMMDD-hhmmss>. +If the directory already exists, its contents must be removed before running this script.
  • +
  • Creates a Kubernetes job that starts a utility Oracle SOA Suite container and run +scripts to deploy artifacts provided either in an image or in a persistent volume.
  • +
+
Deploy artifacts from an image
+
    +
  1. +

    Create an image with artifacts

    +

    a. A sample Dockerfile to create the artifacts in an image is available at $WORKDIR/create-soa-domain/domain-home-on-pv/deploy-docker-file. This expects the Oracle SOA Suite related archives to be available in the soa directory and Oracle Service Bus archives to be available in the osb directory.

    +

    b. Create the soa directory and copy the Oracle SOA Suite archives to be deployed to the directory:

    +
    $ cd $WORKDIR/create-soa-domain/domain-home-on-pv/deploy-docker-file
    +$ mkdir soa
    +$ cp /path/sca_sampleBPEL.jar soa
    +

    c. Create the osb directory and copy the Oracle Service Bus archives to be deployed to the directory:

    +
    $ cd $WORKDIR/create-soa-domain/domain-home-on-pv/deploy-docker-file
    +$ mkdir osb
    +$ cp /path/simple_sbconfig.jar osb
    +

    d. Create the image using build.sh. This script creates the image with default tag 12.2.1.4 (artifacts:12.2.1.4):

    +
    $ cd $WORKDIR/create-soa-domain/domain-home-on-pv/deploy-docker-file
    +$ ./build.sh  -h
    +  Usage: build.sh -t [tag]
    +  Builds a Docker Image with Oracle SOA/OSB artifacts
    +  Parameters:
    +     -h: view usage
    +     -t: tag for image, default is 12.2.1.4
    +
    +
    +
    + + + + + Click here to see sample output of script with tag 12.2.1.4-v1 + + +
    + +
    +
  2. +
  3. +

    Update the image details in deploy-artifacts-inputs.yaml for parameter artifactsImage and invoke deploy-artifacts.sh to perform deployment of artifacts.

    + +
    +
    + + + + + Click here to see sample output of deployment for domainType of soaosb + + +
    + +
    +
    +

    Note: When you are running the script for domainType soaosb, a deployment pod is created with two containers, one for Oracle SOA Suite artifacts deployments and another for Oracle Service Bus artifacts deployments. When the deployment completes for one container while other container is still running, the pod status will move from Ready to NotReady. Once both the deployments complete successfully, the status of the pod moves to Completed.

    +
    +
  4. +
+
Deploy artifacts from a persistent volume
+
    +
  1. +

    Copy the artifacts for Oracle SOA Suite to the soa directory and Oracle Service Bus to the osb directory at the share location. +For example, with location /share, artifacts for Oracle SOA Suite are in /share/soa and Oracle Service Bus are in /share/osb.

    +
    $ ls /share/soa
    +sca_sampleBPEL.jar
    +$
    +$ ls /share/osb/
    +simple_sbconfig.jar
    +$
    +
  2. +
  3. +

    Create a PersistentVolume with the sample provided (artifacts-pv.yaml):

    +
    apiVersion: v1
    +kind: PersistentVolume
    +metadata:
    +  name: soainfra-deploy-artifacts-pv
    +spec:
    +  storageClassName: deploy-storage-class
    +  capacity:
    +    storage: 10Gi
    +  accessModes:
    +    - ReadOnlyMany
    +  persistentVolumeReclaimPolicy: Retain
    +  hostPath:
    +    path: "/share"
    +
    $ kubectl apply -f artifacts-pv.yaml
    +
  4. +
  5. +

    Create a PersistentVolumeClaim with the sample provided (artifacts-pvc.yaml):

    +
    apiVersion: v1
    +kind: PersistentVolumeClaim
    +metadata:
    +  name: soainfra-deploy-artifacts-pvc
    +  namespace: soans
    +spec:
    +  storageClassName: deploy-storage-class
    +  accessModes:
    +    - ReadOnlyMany
    +  resources:
    +    requests:
    +      storage: 10Gi
    +
    $ kubectl apply -f artifacts-pvc.yaml
    +
  6. +
  7. +

    Update the artifactsSourceType to PersistentVolume and provide the name for persistentVolumeClaimName in deploy-artifacts-inputs.yaml.

    +
  8. +
  9. +

    Invoke deploy-artifacts.sh to deploy artifacts for artifacts present in persistentVolumeClaimName.

    + +
    +
    + + + + + Click here to see sample output of deployment for domainType of soaosb + + +
    + +
    +
    +

    Note: When you are running the script for domainType of soaosb, a deployment pod is created with two containers, one for Oracle SOA Suite artifacts deployments and one for Oracle Service Bus artifacts deployments. When the deployment completes for one container while other container is still running, the pod status moves from Ready to NotReady. Once both the deployments successfully complete, the status of the pod moves to Completed.

    +
    +
  10. +
+

Verify the deployment logs

+

To confirm the deployment of artifacts was successful, verify the output using the kubectl logs command:

+
+

Note: Replace <YYYYMMDD-hhmmss>, <domainUID> and <namespace> with values for your environment.

+
+

For Oracle SOA Suite artifacts:

+
$ kubectl logs job.batch/<domainUID>-deploy-artifacts-job-<YYYYMMDD-hhmmss> -n <namespace>  soa-deploy-artifacts-job
+

For Oracle Service Bus artifacts:

+
$ kubectl logs job.batch/<domainUID>-deploy-artifacts-job-<YYYYMMDD-hhmmss> -n <namespace>  osb-deploy-artifacts-job
+
+ +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.2/soa-domains/adminguide/deploying-composites/deploy-using-maven-ant/index.html b/docs/23.4.2/soa-domains/adminguide/deploying-composites/deploy-using-maven-ant/index.html new file mode 100644 index 000000000..967b93893 --- /dev/null +++ b/docs/23.4.2/soa-domains/adminguide/deploying-composites/deploy-using-maven-ant/index.html @@ -0,0 +1,5751 @@ + + + + + + + + + + + + Deploy using Maven and Ant :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + Deploy using Maven and Ant +

+ + + + + + +

Learn how to deploy Oracle SOA Suite and Oracle Service Bus composite applications using the Maven and Ant based approach in an Oracle SOA Suite in WebLogic Kubernetes Operator environment.

+

Before deploying composite applications, we need to create a Kubernetes pod in the same cluster where the Oracle SOA Suite domain is running, so that composite applications can be deployed using the internal Kubernetes Service for the Administration Server URL.

+

Place the SOA/Oracle Service Bus composite project at a share location (for example at /share/soa-deploy) mounted at /composites inside container. +Make sure to provide oracle user ( uid: 1000 and gid: 0) permission to directory /share/soa-deploy, so that it is accessible and writable inside the container.

+
$ sudo chown -R 1000:0 /share/soa-deploy
+

Follow the steps in this section to create a container and then use it to deploy Oracle SOA Suite and Oracle Service Bus composite applications using Maven or Ant.

+

Create a composite deployment container

+

Before creating a Kubernetes pod, make sure that the Oracle SOA Suite Docker image is available on a node, or you can create an image pull secret so that the pod can pull the Docker image on the host where it gets created.

+
    +
  1. +

    Create an image pull secret to pull image soasuite:12.2.1.4 by the Kubernetes pod:

    +
    $ kubectl create secret docker-registry image-secret -n soans --docker-server=your-registry.com --docker-username=xxxxxx --docker-password=xxxxxxx  --docker-email=my@company.com
    +
  2. +
  3. +

    Create a PersistentVolume and PersistentVolumeClaim (soadeploy-pv.yaml and soadeploy-pvc.yaml) with sample composites for build and deploy placed at /share/soa-deploy.

    +

    a) Create a PersistentVolume with the sample provided (soadeploy-pv.yaml), which uses NFS (you can use hostPath or any other supported PV type):

    +
    apiVersion: v1
    +kind: PersistentVolume
    +metadata:
    +  name: soadeploy-pv
    +spec:
    +  storageClassName: soadeploy-storage-class
    +  capacity:
    +   storage: 10Gi
    +  accessModes:
    +    - ReadWriteMany
    +  # Valid values are Retain, Delete or Recycle
    +  persistentVolumeReclaimPolicy: Retain
    +  # hostPath:
    +  nfs:
    +    server: X.X.X.X
    +    path: "/share/soa-deploy"
    +

    b) Apply the YAML:

    +
    $ kubectl apply -f soadeploy-pv.yaml
    +

    c) Create a PersistentVolumeClaim (soadeploy-pvc.yaml):

    +
    kind: PersistentVolumeClaim
    +apiVersion: v1
    +metadata:
    +  name: soadeploy-pvc
    +  namespace: soans
    +spec:
    +  storageClassName: soadeploy-storage-class
    +  accessModes:
    +    - ReadWriteMany
    +  resources:
    +    requests:
    +      storage: 10Gi      
    +

    d) Apply the YAML:

    +
    $ kubectl apply -f soadeploy-pvc.yaml
    +
  4. +
  5. +

    Create a composite deploy pod using soadeploy.yaml to mount the composites inside pod at /composites:

    +
    apiVersion: v1
    +kind: Pod
    +metadata:
    +  labels:
    +    run: soadeploy
    +  name: soadeploy
    +  namespace: soans
    +spec:
    +  imagePullSecrets:
    +  - name: image-secret
    +  containers:
    +  - image: soasuite:12.2.1.4
    +    name: soadeploy
    +    env:
    +    - name: M2_HOME
    +      value: /u01/oracle/oracle_common/modules/org.apache.maven_3.2.5
    +    command: ["/bin/bash", "-c", "echo 'export PATH=$PATH:$M2_HOME/bin' >> $HOME/.bashrc; sleep infinity"]
    +    imagePullPolicy: IfNotPresent
    +    volumeMounts:
    +    - name: mycomposite
    +      mountPath: /composites
    +  volumes:
    +  - name: mycomposite
    +    persistentVolumeClaim:
    +       claimName: soadeploy-pvc
    +
  6. +
  7. +

    Create the pod:

    +
    $ kubectl apply -f soadeploy.yaml
    +
  8. +
  9. +

    Once the Kubernetes pod is deployed, exec into the pod to perform Maven/Ant based build and deploy:

    +
    $ kubectl exec -it -n soans soadeploy -- bash
    +
  10. +
+

Maven based build and deploy

+
+

Note: Make sure to execute these commands inside the soadeploy pod.

+
+

Set up proxy details for Maven to pull dependencies from the internet.

+

If your environment is not running behind a proxy, then skip this step. Otherwise, replace REPLACE-WITH-PROXY-HOST, REPLACE-WITH-PROXY-PORT and the value for nonProxyHosts attribute per your environment and create the settings.xml:

+
$ mkdir $HOME/.m2
+$ cat <<EOF > $HOME/.m2/settings.xml
+<settings>
+<proxies>
+<proxy>
+<active>true</active>
+<protocol>http</protocol>
+<host>REPLACE-WITH-PROXY-HOST</host>
+<port>REPLACE-WITH-PROXY-PORT</port>
+<nonProxyHosts>soainfra-cluster-soa-cluster|soainfra-adminserver</nonProxyHosts>
+</proxy>
+</proxies>
+</settings>
+EOF
+

For Oracle SOA Suite composite applications

+
    +
  1. +

    Set up the environment for Maven:

    +
    #Perform Maven Sync
    +$ cd /u01/oracle/oracle_common/plugins/maven/com/oracle/maven/oracle-maven-sync/12.2.1/
    +$ mvn install:install-file \
    +    -DpomFile=oracle-maven-sync-12.2.1.pom \
    +    -Dfile=oracle-maven-sync-12.2.1.jar
    +
    +#install Maven plugin
    +$ mvn help:describe \
    +    -Dplugin=com.oracle.maven:oracle-maven-sync \
    +    -Ddetail
    +
    +#push libraries into internal repository
    +$ mvn com.oracle.maven:oracle-maven-sync:push \
    +    -DoracleHome=/u01/oracle \
    +    -DtestingOnly=false
    +
    +$ mvn archetype:crawl \
    +    -Dcatalog=$HOME/.m2/archetype-catalog.xml \
    +    -DarchetypeArtifactId=oracle-soa-application \
    +    -DarchetypeVersion=12.2.1-4-0
    +
  2. +
  3. +

    Build the SOA Archive (SAR) for your sample deployment available at /composites/mavenproject/my-soa-app:

    +
    $ cd /composites/mavenproject/my-soa-app
    +$ mvn package
    +

    The SAR will be generated at /composites/mavenproject/my-soa-app/my-project/target/sca_my-project.jar.

    +
  4. +
  5. +

    Deploy into the Oracle SOA Suite instance. For example, if the instance URL is http://soainfra-cluster-soa-cluster:8001 with credentials username: weblogic and password: Welcome1, enter the following commands:

    +
    $ cd /composites/mavenproject/my-soa-app
    +$ mvn pre-integration-test \
    +    -DoracleServerUrl=http://soainfra-cluster-soa-cluster:8001 \
    +    -DsarLocation=/composites/mavenproject/my-soa-app/my-project/target/sca_my-project.jar \
    +    -Doverwrite=true \
    +    -DforceDefault=true \
    +    -Dcomposite.partition=default \
    +    -Duser=weblogic  -Dpassword=Welcome1
    +
  6. +
+

For Oracle Service Bus composite applications

+
    +
  1. +

    Set up the environment for Maven:

    +
    #Perform Maven Sync
    +$ cd /u01/oracle/oracle_common/plugins/maven/com/oracle/maven/oracle-maven-sync/12.2.1/
    +$ mvn install:install-file \
    +    -DpomFile=oracle-maven-sync-12.2.1.pom \
    +    -Dfile=oracle-maven-sync-12.2.1.jar
    +
    +#push libraries into internal repository
    +$ mvn com.oracle.maven:oracle-maven-sync:push \
    +    -DoracleHome=$ORACLE_HOME
    +$ mvn archetype:crawl \
    +    -Dcatalog=$HOME/.m2/archetype-catalog.xml
    +
    +#Verify the mvn setup
    +$ mvn help:describe \
    +    -DgroupId=com.oracle.servicebus.plugin \
    +    -DartifactId=oracle-servicebus-plugin \
    +    -Dversion=12.2.1-4-0
    +
  2. +
  3. +

    Build the Oracle Service Bus Archive (sbconfig.sbar)

    +

    Build sbconfig.sbar for your sample deployment, available at /composites/mavenproject/HelloWorldSB:

    +
    $ cd /composites/mavenproject/HelloWorldSB
    +$ mvn com.oracle.servicebus.plugin:oracle-servicebus-plugin:package
    +

    The Oracle Service Bus Archive (SBAR) will be generated at /composites/mavenproject/HelloWorldSB/.data/maven/sbconfig.sbar.

    +
  4. +
  5. +

    Deploy the generated sbconfig.sbar into the Oracle Service Bus instance. For example, if the Administration URL is http://soainfra-adminserver:7001 with credentials username: weblogic and password: Welcome1, enter the following commands: +:

    +
    $ cd /composites/mavenproject/HelloWorldSB
    +$ mvn pre-integration-test   \
    +    -DoracleServerUrl=t3://soainfra-adminserver:7001 \
    +    -DoracleUsername=weblogic -DoraclePassword=Welcome1
    +
  6. +
+

Ant based build and deploy

+
+

Note: Make sure to execute these commands inside the soadeploy pod.

+
+

For Oracle SOA Suite composite applications

+
    +
  1. +

    Build an Oracle SOA Suite composite application using Ant. For example, if the composite application to be deployed is available at /composites/antproject/Project, enter the following commands:

    +
    $ cd /u01/oracle/soa/bin
    +$ ant -f ant-sca-package.xml \
    +      -DcompositeDir=/composites/antproject/Project \
    +      -DcompositeName=Project \
    +      -Drevision=0.1
    +

    The SOA Archive is generated at /composites/antproject/Project/deploy/sca_Project_rev0.1.jar, which will be used for deploying.

    +
  2. +
  3. +

    Deploy into the Oracle SOA Suite instance using Ant:

    +
    $ cd /u01/oracle/soa/bin
    +$ ant -f ant-sca-deploy.xml \
    +      -DserverURL=http://soainfra-cluster-soa-cluster:8001  \
    +      -DsarLocation=/composites/antproject/Project/deploy/sca_Project_rev0.1.jar \
    +      -Doverwrite=true \
    +      -Duser=weblogic -Dpassword=Welcome1
    +
  4. +
+

For Oracle Service Bus composite applications

+

See Developing Services Using Oracle Service Bus to deploy Oracle Service Bus composite applications using Ant.

+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.2/soa-domains/adminguide/deploying-composites/index.html b/docs/23.4.2/soa-domains/adminguide/deploying-composites/index.html new file mode 100644 index 000000000..58885aa35 --- /dev/null +++ b/docs/23.4.2/soa-domains/adminguide/deploying-composites/index.html @@ -0,0 +1,5608 @@ + + + + + + + + + + + + Deploy composite applications :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + Deploy composite applications +

+ + + + + + + +

Learn how to deploy the composite applications for Oracle SOA Suite and Oracle Service Bus domains.

+ + + + + + + +
    + + + + + + + + + + + + + + + + + +

    +Deploy using JDeveloper +

    + + + + + +

    Deploy Oracle SOA Suite and Oracle Service Bus composite applications from Oracle JDeveloper to Oracle SOA Suite in the WebLogic Kubernetes Operator environment.

    + + + + + + + + + + + + +

    +Deploy using Maven and Ant +

    + + + + + +

    Deploy Oracle SOA Suite and Oracle Service Bus composite applications using the Maven and Ant based approach in an Oracle SOA Suite deployment.

    + + + + + + + + + + + + +

    +Deploy using composites in a persistent volume or image +

    + + + + + +

    Deploy Oracle SOA Suite and Oracle Service Bus composite applications artifacts in a persistent volume or in an image.

    + + + + + + + + +
+ + + + + + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.2/soa-domains/adminguide/deploying-composites/index.xml b/docs/23.4.2/soa-domains/adminguide/deploying-composites/index.xml new file mode 100644 index 000000000..7afa41c02 --- /dev/null +++ b/docs/23.4.2/soa-domains/adminguide/deploying-composites/index.xml @@ -0,0 +1,46 @@ + + + + Deploy composite applications on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.2/soa-domains/adminguide/deploying-composites/ + Recent content in Deploy composite applications on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + Thu, 18 Apr 2019 06:46:23 -0500 + + + + + + Deploy using JDeveloper + /fmw-kubernetes/23.4.2/soa-domains/adminguide/deploying-composites/supportjdev/ + Fri, 22 Feb 2019 15:44:42 -0500 + + /fmw-kubernetes/23.4.2/soa-domains/adminguide/deploying-composites/supportjdev/ + Learn how to deploy Oracle SOA Suite and Oracle Service Bus composite applications from Oracle JDeveloper (running outside the Kubernetes network) to an Oracle SOA Suite instance in the WebLogic Kubernetes Operator environment. +Use JDeveloper for development and test environments only. For a production environment, you should deploy using Application Control and WLST methods. + Deploy Oracle SOA Suite and Oracle Service Bus composite applications to Oracle SOA Suite from JDeveloper To deploy Oracle SOA Suite and Oracle Service Bus composite applications from Oracle JDeveloper, the Administration Server must be configured to expose a T3 channel. + + + + Deploy using Maven and Ant + /fmw-kubernetes/23.4.2/soa-domains/adminguide/deploying-composites/deploy-using-maven-ant/ + Fri, 22 Feb 2019 15:44:42 -0500 + + /fmw-kubernetes/23.4.2/soa-domains/adminguide/deploying-composites/deploy-using-maven-ant/ + Learn how to deploy Oracle SOA Suite and Oracle Service Bus composite applications using the Maven and Ant based approach in an Oracle SOA Suite in WebLogic Kubernetes Operator environment. +Before deploying composite applications, we need to create a Kubernetes pod in the same cluster where the Oracle SOA Suite domain is running, so that composite applications can be deployed using the internal Kubernetes Service for the Administration Server URL. + + + + Deploy using composites in a persistent volume or image + /fmw-kubernetes/23.4.2/soa-domains/adminguide/deploying-composites/deploy-artifacts/ + Tue, 19 Oct 2021 12:04:42 -0500 + + /fmw-kubernetes/23.4.2/soa-domains/adminguide/deploying-composites/deploy-artifacts/ + Learn how to deploy Oracle SOA Suite and Oracle Service Bus composite applications artifacts in a Kubernetes persistent volume or in an image to an Oracle SOA Suite environment deployed using a WebLogic Kubernetes Operator. +The deployment methods described in Deploy using JDeveloper and Deploy using Maven and Ant are manual processes. If you have the deployment artifacts (archives) already built, then you can package them either into a Kubernetes persistent volume or in an image and use this automated process to deploy the artifacts to an Oracle SOA Suite domain. + + + + \ No newline at end of file diff --git a/docs/23.4.2/soa-domains/adminguide/deploying-composites/supportjdev/index.html b/docs/23.4.2/soa-domains/adminguide/deploying-composites/supportjdev/index.html new file mode 100644 index 000000000..178652b02 --- /dev/null +++ b/docs/23.4.2/soa-domains/adminguide/deploying-composites/supportjdev/index.html @@ -0,0 +1,5686 @@ + + + + + + + + + + + + Deploy using JDeveloper :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + Deploy using JDeveloper +

+ + + + + + +

Learn how to deploy Oracle SOA Suite and Oracle Service Bus composite applications from Oracle JDeveloper (running outside the Kubernetes network) to an Oracle SOA Suite instance in the WebLogic Kubernetes Operator environment.

+ +

Use JDeveloper for development and test environments only. For a production environment, you should deploy using Application Control and WLST methods.

+
+ +

Deploy Oracle SOA Suite and Oracle Service Bus composite applications to Oracle SOA Suite from JDeveloper

+

To deploy Oracle SOA Suite and Oracle Service Bus composite applications from Oracle JDeveloper, the Administration Server must be configured to expose a T3 channel. The WebLogic Kubernetes Operator provides an option to expose a T3 channel for the Administration Server using the exposeAdminT3Channel setting during domain creation, then the matching T3 service can be used to connect. By default, when exposeAdminT3Channel is set, the WebLogic Kubernetes Operator environment exposes the NodePort for the T3 channel of the NetworkAccessPoint at 30012 (use t3ChannelPort to configure the port to a different value).

+

If you miss enabling exposeAdminT3Channel during domain creation, follow Expose a T3/T3S Channel for the Administration Server to expose a T3 channel manually.

+

SOA Composite Deployment Model

+

Prerequisites

+
    +
  1. +

    Get the Kubernetes cluster master address and verify the T3 port that will be used for creating application server connections. Use the following command to get the T3 port:

    +
    $ kubectl get service <domainUID>-<AdministrationServerName>-external -n  <namespace>-o jsonpath='{.spec.ports[0].nodePort}'
    +

    For example:

    +
    $ kubectl get service soainfra-adminserver-external -n  soans -o jsonpath='{.spec.ports[0].nodePort}'
    +
  2. +
  3. +

    Oracle SOA Suite in the WebLogic Kubernetes Operator environment is deployed in a Reference Configuration domain. If a SOA project is developed in Classic mode JDeveloper displays a Mismatch notification in the Deploy Composite Wizard. By default, JDeveloper is in Classic mode. To develop SOA projects in Reference Configuration mode, you must manually enable this feature in JDeveloper: +a. From the File menu, select Tools, then Preferences. +b. Select Reference Configuration Settings. +c. Select Enable Reference Configuration settings in adapters.

    +

    Enable Reference Configuration

    +
  4. +
  5. +

    JDeveloper needs to access the Servers during deployment. In the WebLogic Kubernetes Operator environment, Administration and Managed Servers are pods and cannot be accessed directly by JDeveloper. As a workaround, you must configure the reachability of the Managed Servers:

    + +

    The Managed Server T3 port is not exposed by default and opening this will have a security risk as the authentication method here is based on a userid/password. It is not recommended to do this on production instances.

    +
    + +
      +
    • +

      Decide on an external IP address to be used to configure access to the Managed Servers. Master or worker node IP address can be used to configure Managed Server reachability. In these steps, the Kubernetes cluster master IP is used for demonstration.

      +
    • +
    • +

      Get the pod names of the Administration Server and Managed Servers (that is, <domainUID>-<server name>), which will be used to map in /etc/hosts.

      +
    • +
    • +

      Update /etc/hosts (or in Windows, C:\Windows\System32\Drivers\etc\hosts) on the host where JDeveloper is running with the entries below, where

      +
      <Master IP> <Administration Server pod name>
      +<Master IP> <Managed Server1 pod name>
      +<Master IP> <Managed Server2 pod name>
      +

      Sample /etc/hosts entries looks as follows, where X.X.X.X is the master node IP address:

      +
      X.X.X.X soainfra-adminserver
      +X.X.X.X soainfra-soa-server1  
      +X.X.X.X soainfra-soa-server2
      +
    • +
    • +

      Get the Kubernetes service name of the Oracle SOA Suite cluster to access externally with the master IP (or external IP):

      +
      $ kubectl get service <domainUID>-cluster-<soa-cluster> -n <namespace>
      +

      For example:

      +
      $ kubectl get service soainfra-cluster-soa-cluster -n soans
      +
    • +
    • +

      Create a Kubernetes service to expose the Oracle SOA Suite cluster service (<domainUID>-cluster-<soa-cluster>) externally with same port as the Managed Server:

      +
      $ kubectl expose service  <domainUID>-cluster-<soa-cluster> --name <domainUID>-<soa-cluster>-ext --external-ip=<Master IP> -n <namespace>
      +

      For example:

      +
      $ kubectl expose service  soainfra-cluster-soa-cluster --name soainfra-cluster-soa-cluster-ext --external-ip=X.X.X.X -n soans
      +
      +

      In a production environment, exposing the SOA cluster service with an external IP address is not recommended, as it can cause message drops on the SOA Managed Servers.

      +
      + +
    • +
    +
  6. +
+

Create an application server connection in JDeveloper

+
    +
  1. +

    Create a new application server connection (for example wls-k8s-op-connection) in JDeveloper: +Create Application Server Connection

    +
  2. +
  3. +

    In the configuration page, provide the WebLogic Hostname as the Kubernetes Master Address.

    +
  4. +
  5. +

    Update the Port as the T3 port (default is 30012) obtained in Prerequisites.

    +
  6. +
  7. +

    Enter the WebLogic Domain value (domainUID).

    +
  8. +
  9. +

    Test the connection to verify it is successful. +Create Application Server Connection

    +
  10. +
+

Deploy SOA composite applications using JDeveloper

+
    +
  1. +

    In JDeveloper, right-click the SOA project you want to deploy and select Deploy to display the deployment wizard. +Deploy Project

    +
  2. +
  3. +

    In the Deployment Action page, select Deploy to Application Server and click Next. +Deployment Action

    +
  4. +
  5. +

    In the Deployment Configuration page, select the appropriate options and click Next. +Deployment Configuration

    +
  6. +
  7. +

    In the Select server page, select the application server connection (wls-k8s-op-connection) that was created earlier and click Next. +Application Servers

    +
  8. +
  9. +

    If the Prerequisites were configured correctly, the lookup discovers the Managed Servers for deploying the composite. +Look Up Server

    +
  10. +
  11. +

    Using the application server connection, the Managed Servers (Oracle SOA Suite cluster) are listed on the SOA Servers page. Select the Oracle SOA Suite cluster and click Next. +Target Server

    +
  12. +
  13. +

    On the Summary page, click Finish to start deploying the composites to the Oracle SOA Suite cluster. +Deploy Summary

    +

    Deploying Progress

    +
  14. +
  15. +

    Verify logs on JDeveloper to confirm successful deployment. +Deploying Status

    +
  16. +
  17. +

    Enter the soa-infra URLs in a browser to confirm the composites are deployed on both servers of the Oracle SOA Suite cluster. +SOA URL 1

    +

    SOA URL 2

    +
  18. +
+

Deploy Oracle Service Bus composite applications using JDeveloper

+
    +
  1. +

    In JDeveloper, right-click the Oracle Service Bus project you want to deploy and select Deploy to display the deployment wizard. +Deploy Project

    +
  2. +
  3. +

    In the Deployment Action page, select Deploy to Application Server and click Next. +Deployment Action

    +
  4. +
  5. +

    In the Select Server page, select the application server connection (wls-k8s-op-connection) that was created earlier and click Next. +Application Servers

    +
  6. +
  7. +

    On the Summary page, click Finish to start deploying the composites to the Oracle Service Bus cluster. +Deploy Summary

    +

    Deploying Progress

    +
  8. +
  9. +

    In JDeveloper, verify logs to confirm successful deployment. +Deploying Status

    +
  10. +
  11. +

    In the Oracle Service Bus Console, click Launch Test Console to verify that the Oracle Service Bus composite application is deployed successfully. +Service Bus console

    +
  12. +
+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.2/soa-domains/adminguide/enable-additional-url-access/index.html b/docs/23.4.2/soa-domains/adminguide/enable-additional-url-access/index.html new file mode 100644 index 000000000..3195f1ba0 --- /dev/null +++ b/docs/23.4.2/soa-domains/adminguide/enable-additional-url-access/index.html @@ -0,0 +1,5601 @@ + + + + + + + + + + + + Enable additional URL access :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + Enable additional URL access +

+ + + + + + +

This section provides information about how to extend an existing ingress (Non-SSL and SSL termination) to enable additional application URL access for Oracle SOA Suite domains.

+

The ingress per domain created in the steps in Set up a load balancer exposes the application paths defined in template YAML files present at ${WORKDIR}/charts/ingress-per-domain/templates/.

+

To extend an existing ingress with additional application URL access:

+
    +
  1. +

    Update the template YAML file at ${WORKDIR}/charts/ingress-per-domain/templates/ to define additional path rules.

    +

    For example, to extend an existing NGINX-based ingress with additional paths /path1 and /path2 of an Oracle SOA Suite cluster, update nginx-ingress-nonssl.yaml, nginx-ingress-ssl.yaml, or nginx-ingress-e2essl.yaml accordingly with additional paths:

    +
    # Copyright (c) 2020, 2022, Oracle and/or its affiliates.
    +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
    +{{- if eq .Values.type "NGINX" }}
    +{{- if (eq .Values.sslType "NONSSL") }}
    +---
    +apiVersion: networking.k8s.io/v1
    +kind: Ingress
    +.
    +.
    +spec:
    +  rules:
    +  - host: '{{ .Values.nginx.hostname }}'
    +    http:
    +      paths:
    +      # Add new paths -- start
    +      - path: /path1
    +        backend:
    +          service:
    +            name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}'
    +            port:
    +              number: {{ .Values.wlsDomain.soaManagedServerPort  }}
    +      - path: /path2
    +        backend:
    +          service:
    +            name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}'
    +            port:
    +              number: {{ .Values.wlsDomain.soaManagedServerPort  }}
    +      # Add new paths -- end
    +      - path: /console
    +        backend:
    +.
    +.
    +{{- end }}
    +
  2. +
  3. +

    Get the Helm release name for the ingress installed in your domain namespace:

    +
    $ helm ls -n <domain_namespace>
    +

    For example, in the soans namespace:

    +
    $ helm ls -n soans
    +

    Sample output, showing the Helm release name for a NGINX-based ingress as soa-nginx-ingress:

    +
    NAME                  NAMESPACE       REVISION        UPDATED                               STATUS        CHART                 APP VERSION
    +soa-nginx-ingress     soans            1        2021-02-17 13:42:03.252742314 +0000 UTC  deployed  ingress-per-domain-0.1.0     1.0
    +$
    +
  4. +
  5. +

    To extend the existing ingress per domain with additional paths defined in the template YAML, use the helm upgrade command:

    +
    $ cd ${WORKDIR}
    +$ helm upgrade <helm_release_for_ingress> \
    +    charts/ingress-per-domain \
    +    --namespace <domain_namespace> \
    +    --reuse-values
    +
    +

    Note: helm_release_for_ingress is the ingress name used in the corresponding helm install command for the ingress installation.

    +
    +

    Sample command for a NGINX-based ingress soa-nginx-ingress in the soans namespace:

    +
    $ cd ${WORKDIR}
    +$ helm upgrade soa-nginx-ingress \
    +    charts/ingress-per-domain \
    +    --namespace soans \
    +    --reuse-values
    +

    This will upgrade the existing ingress to pick up the additional paths updated in the template YAML.

    +
  6. +
  7. +

    Verify that additional paths are updated into the existing ingress.

    +

    a. Get the existing ingress deployed in the domain namespace:

    +
    $ kubectl get ingress -n <domain_namespace>
    +

    For example, in the soans namespace:

    +
    $ kubectl get ingress -n soans
    +

    Sample output, showing the existing ingress as soainfra-nginx:

    +
    NAME               CLASS    HOSTS         ADDRESS        PORTS     AGE
    +soainfra-nginx   <none>   domain1.org  10.109.211.160   80, 443    xxd
    +

    b. Describe the ingress object and verify that new paths are available and pointing to desired backends.

    +

    Sample command and output, showing path and backend details for /path1 and /path2:

    +
    $ kubectl describe ingress soainfra-nginx -n soans|grep path
    +                                     /path1                     soainfra-cluster-soa-cluster:8001 (172.17.0.19:8001,172.17.0.20:8001)
    +                                     /path2                     soainfra-cluster-soa-cluster:8001 (172.17.0.19:8001,172.17.0.20:8001)
    +
  8. +
+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.2/soa-domains/adminguide/enablingt3/index.html b/docs/23.4.2/soa-domains/adminguide/enablingt3/index.html new file mode 100644 index 000000000..8b7ed686e --- /dev/null +++ b/docs/23.4.2/soa-domains/adminguide/enablingt3/index.html @@ -0,0 +1,5916 @@ + + + + + + + + + + + + Expose the T3/T3S protocol :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + Expose the T3/T3S protocol +

+ + + + + + + +

Oracle strongly recommends that you do not expose non-HTTPS traffic (T3/T3s/LDAP/IIOP/IIOPs) outside of the external firewall. You can control this access using a combination of network channels and firewalls.

+
+ +

You can create T3/T3S channels and the corresponding Kubernetes service to expose the T3/T3S protocol for the Administration Server and Managed Servers in an Oracle SOA Suite domain.

+

The WebLogic Kubernetes Operator provides an option to expose a T3 channel for the Administration Server using the exposeAdminT3Channel setting during domain creation, then the matching T3 service can be used to connect. By default, when exposeAdminT3Channel is set, the WebLogic Kubernetes Operator environment exposes the NodePort for the T3 channel of the NetworkAccessPoint at 30012 (use t3ChannelPort to configure the port to a different value).

+

If you miss enabling exposeAdminT3Channel during domain creation, follow these steps to create a T3 channel for Administration Server manually.

+

Exposing SOA Managed Server T3 Ports

+

Expose a T3/T3S Channel for the Administration Server

+

To create a custom T3/T3S channel for the Administration Server that has a listen port listen_port and a paired public port public_port:

+
    +
  1. +

    Create t3_admin_config.py with the following content:

    +
    admin_pod_name = sys.argv[1]
    +admin_port = sys.argv[2]
    +user_name = sys.argv[3]
    +password = sys.argv[4]
    +listen_port = sys.argv[5]
    +public_port = sys.argv[6]
    +public_address = sys.argv[7]
    +AdminServerName = sys.argv[8]
    +channelType = sys.argv[9]
    +print('custom admin_pod_name : [%s]' % admin_pod_name);
    +print('custom admin_port : [%s]' % admin_port);
    +print('custom user_name : [%s]' % user_name);
    +print('custom password : ********');
    +print('public address : [%s]' % public_address);
    +print('channel listen port : [%s]' % listen_port);
    +print('channel public listen port : [%s]' % public_port);
    +connect(user_name, password, 't3://' + admin_pod_name + ':' + admin_port)
    +edit()
    +startEdit()
    +cd('/')
    +cd('Servers/%s/' % AdminServerName )
    +if channelType == 't3':
    +   create('T3Channel_AS','NetworkAccessPoint')
    +   cd('NetworkAccessPoints/T3Channel_AS')
    +   set('Protocol','t3')
    +   set('ListenPort',int(listen_port))
    +   set('PublicPort',int(public_port))
    +   set('PublicAddress', public_address)
    +   print('Channel T3Channel_AS added')
    +elif channelType == 't3s':	  
    +   create('T3SChannel_AS','NetworkAccessPoint')
    +   cd('NetworkAccessPoints/T3SChannel_AS')
    +   set('Protocol','t3s')
    +   set('ListenPort',int(listen_port))
    +   set('PublicPort',int(public_port))
    +   set('PublicAddress', public_address)
    +   set('HttpEnabledForThisProtocol', true)
    +   set('OutboundEnabled', false)
    +   set('Enabled', true)
    +   set('TwoWaySSLEnabled', true)
    +   set('ClientCertificateEnforced', false)
    +else:
    +   print('channelType [%s] not supported',channelType)  
    +activate()
    +disconnect()
    +
  2. +
  3. +

    Copy t3_admin_config.py into the domain home (for example, /u01/oracle/user_projects/domains/soainfra) of the Administration Server pod (for example, soainfra-adminserver in soans namespace).

    +
     $ kubectl cp t3_admin_config.py soans/soainfra-adminserver:/u01/oracle/user_projects/domains/soainfra
    +
    +
  4. +
  5. +

    Run wlst.sh t3_admin_config.py by using exec into the Administration Server pod with the following parameters:

    +
      +
    • admin_pod_name: soainfra-adminserver # Administration Server pod
    • +
    • admin_port: 7001
    • +
    • user_name: weblogic
    • +
    • password: Welcome1 # weblogic password
    • +
    • listen_port: 30014 # New port for T3 Administration Server
    • +
    • public_port: 30014 # Kubernetes NodePort which will be used to expose T3 port externally
    • +
    • public_address:
    • +
    • AdminServerName: AdminServer # Give administration Server name
    • +
    • channelType: t3 # t3 or t3s protocol channel
    • +
    +
    $ kubectl exec -it <Administration Server pod> -n <namespace> -- /u01/oracle/oracle_common/common/bin/wlst.sh  <domain_home>/t3_admin_config.py <Administration Server pod>  <Administration Server port>  weblogic <password for weblogic> <t3 port on Administration Server> <t3 nodeport> <master_ip> <AdminServerName> <channelType t3 or t3s>
    +

    For example:

    +
    $ kubectl exec -it soainfra-adminserver -n soans -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/user_projects/domains/soainfra/t3_admin_config.py soainfra-adminserver  7001 weblogic Welcome1 30014 30014 xxx.xxx.xxx.xxx AdminServer t3
    +
  6. +
  7. +

    Create t3_admin_svc.yaml with the following contents to expose T3 at NodePort 30014 for domainName and domainUID as soainfra and domain deployed in soans namespace:

    +
    +

    Note: For T3S, replace NodePort 30014 with the appropriate value used with public_port while creating the T3S channel using wlst.sh in the previous step.

    +
    +
    apiVersion: v1
    +kind: Service
    +metadata:
    +   name: soainfra-adminserver-t3-external
    +   namespace: soans
    +   labels:
    +     weblogic.serverName: AdminServer
    +     weblogic.domainName: soainfra
    +     weblogic.domainUID: soainfra
    +spec:
    +  type: NodePort
    +  selector:
    +    weblogic.domainName: soainfra
    +    weblogic.domainUID: soainfra
    +    weblogic.serverName: AdminServer
    +  ports:
    +  - name: t3adminport
    +    protocol: TCP
    +    port: 30014
    +    targetPort: 30014
    +    nodePort: 30014
    +
  8. +
  9. +

    Create the NodePort service for port 30014:

    +
    $ kubectl create -f t3_admin_svc.yaml
    +
  10. +
  11. +

    Verify that you can access T3 for the Administration Server with the following URL:

    +
    t3://<master_ip>:30014
    +
  12. +
  13. +

    Similarly, you can access T3S as follows:

    +

    a. First get the certificates from the Administration Server to be used for secured (T3S) connection from the client. You can export the certificate from the Administration Server with WLST commands. For example, to export the default demoidentity:

    +
    +

    Note: If you are using the custom SSL certificate, replace the steps accordingly.

    +
    +
    $ kubectl exec -it soainfra-adminserver -n soans -- bash
    +$ /u01/oracle/oracle_common/common/bin/wlst.sh
    +$ connect('weblogic','Welcome1','t3://soainfra-adminserver:7001')
    +$ svc = getOpssService(name='KeyStoreService')
    +$ svc.exportKeyStoreCertificate(appStripe='system', name='demoidentity', password='DemoIdentityKeyStorePassPhrase', alias='DemoIdentity', type='Certificate', filepath='/tmp/cert.txt/')
    +

    These steps download the certificate at /tmp/cert.txt.

    +

    b. Use the same certificates from the client side and connect using t3s. For example:

    +
    $ export JAVA_HOME=/u01/jdk
    +$ keytool -import -v -trustcacerts -alias soadomain -file cert.txt -keystore $JAVA_HOME/jre/lib/security/cacerts -keypass changeit -storepass changeit
    +$ export WLST_PROPERTIES="-Dweblogic.security.SSL.ignoreHostnameVerification=true"
    +$ cd $ORACLE_HOME/oracle_common/common/bin
    +$ ./wlst.sh
    +  Initializing WebLogic Scripting Tool (WLST) ...
    +  Welcome to WebLogic Server Administration Scripting Shell
    +  Type help() for help on available commands
    +$ wls:/offline> connect('weblogic','Welcome1','t3s://<Master IP address>:30014')
    +
  14. +
+

Expose T3/T3S for Managed Servers

+

To create a custom T3/T3S channel for all Managed Servers, with a listen port listen_port and a paired public port public_port:

+
    +
  1. +

    Create t3_ms_config.py with the following content:

    +
    admin_pod_name = sys.argv[1]
    +admin_port = sys.argv[2]
    +user_name = sys.argv[3]
    +password = sys.argv[4]
    +listen_port = sys.argv[5]
    +public_port = sys.argv[6]
    +public_address = sys.argv[7]
    +managedNameBase = sys.argv[8]
    +ms_count = sys.argv[9]
    +channelType = sys.argv[10]
    +print('custom host : [%s]' % admin_pod_name);
    +print('custom port : [%s]' % admin_port);
    +print('custom user_name : [%s]' % user_name);
    +print('custom password : ********');
    +print('public address : [%s]' % public_address);
    +print('channel listen port : [%s]' % listen_port);
    +print('channel public listen port : [%s]' % public_port);
    +
    +connect(user_name, password, 't3://' + admin_pod_name + ':' + admin_port)
    +
    +edit()
    +startEdit()
    +for index in range(0, int(ms_count)):
    +  cd('/')
    +  msIndex = index+1
    +  cd('/')
    +  name = '%s%s' % (managedNameBase, msIndex)
    +  cd('Servers/%s/' % name )
    +  if channelType == 't3':
    +    create('T3Channel_MS','NetworkAccessPoint')
    +    cd('NetworkAccessPoints/T3Channel_MS')
    +    set('Protocol','t3')
    +    set('ListenPort',int(listen_port))
    +    set('PublicPort',int(public_port))
    +    set('PublicAddress', public_address)
    +    print('Channel T3Channel_MS added ...for ' + name)
    +  elif channelType == 't3s':	  
    +    create('T3SChannel_MS','NetworkAccessPoint')
    +    cd('NetworkAccessPoints/T3SChannel_MS')
    +    set('Protocol','t3s')
    +    set('ListenPort',int(listen_port))
    +    set('PublicPort',int(public_port))
    +    set('PublicAddress', public_address)
    +    set('HttpEnabledForThisProtocol', true)
    +    set('OutboundEnabled', false)
    +    set('Enabled', true)
    +    set('TwoWaySSLEnabled', true)
    +    set('ClientCertificateEnforced', false)
    +    print('Channel T3SChannel_MS added ...for ' + name)
    +  else:
    +    print('Protocol [%s] not supported' % channelType)  	
    +activate()
    +disconnect()
    +
  2. +
  3. +

    Copy t3_ms_config.py into the domain home (for example, /u01/oracle/user_projects/domains/soainfra) of the Administration Server pod (for example, soainfra-adminserver in soans namespace).

    +
    $ kubectl cp t3_ms_config.py soans/soainfra-adminserver:/u01/oracle/user_projects/domains/soainfra
    +
  4. +
  5. +

    Run wlst.sh t3_ms_config.py by exec into the Administration Server pod with the following parameters:

    +
      +
    • admin_pod_name: soainfra-adminserver # Administration Server pod
    • +
    • admin_port: 7001
    • +
    • user_name: weblogic
    • +
    • password: Welcome1 # weblogic password
    • +
    • listen_port: 30016 # New port for T3 Managed Servers
    • +
    • public_port: 30016 # Kubernetes NodePort which will be used to expose T3 port externally
    • +
    • public_address:
    • +
    • managedNameBase: soa_server # Give Managed Server base name. For osb_cluster this will be osb_server
    • +
    • ms_count: 5 # Number of configured Managed Servers
    • +
    • channelType: t3 # channelType is t3 or t3s
    • +
    +
    $ kubectl exec -it <Administration Server pod> -n <namespace> -- /u01/oracle/oracle_common/common/bin/wlst.sh  <domain_home>/t3_ms_config.py <Administration Server pod>  <Administration Server port>  weblogic <password for weblogic> <t3 port on Managed Server> <t3 nodeport> <master_ip> <managedNameBase> <ms_count> <channelType t3 or t3s>
    +

    For example:

    +
    $ kubectl exec -it soainfra-adminserver -n soans -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/user_projects/domains/soainfra/t3_ms_config.py soainfra-adminserver  7001 weblogic Welcome1 30016 30016 xxx.xxx.xxx.xxx soa_server 5 t3
    +
  6. +
  7. +

    Create t3_ms_svc.yaml with the following contents to expose T3 at Managed Server port 30016 for domainName, domainUID as soainfra, and clusterName as soa_cluster for the SOA cluster. Similarly, you can create the Kubernetes service with clusterName as osb_cluster for an Oracle Service Bus cluster:

    +
    +

    Note: For T3S, replace NodePort 30016 with the appropriate value used with public_port while creating the T3S channel using wlst.sh in the previous step.

    +
    +
    apiVersion: v1
    +kind: Service
    +metadata:
    +   name: soainfra-soa-cluster-t3-external
    +   namespace: soans
    +   labels:
    +     weblogic.clusterName: soa_cluster
    +     weblogic.domainName: soainfra
    +     weblogic.domainUID: soainfra
    +spec:
    +  type: NodePort
    +  selector:
    +    weblogic.domainName: soainfra
    +    weblogic.domainUID: soainfra
    +    weblogic.clusterName: soa_cluster
    +  ports:
    +  - name: t3soaport
    +    protocol: TCP
    +    port: 30016
    +    targetPort: 30016
    +    nodePort: 30016
    +
  8. +
  9. +

    Create the NodePort service for port 30016:

    +
    $ kubectl create -f t3_ms_svc.yaml
    +
  10. +
  11. +

    Verify that you can access T3 for the Managed Server with the following URL:

    +
    t3://<master_ip>:30016
    +
  12. +
  13. +

    Similarly, you can access T3S as follows:

    +

    a. First get the certificates from the Administration Server to be used for secured (t3s) connection from client. You can export the certificate from the Administration Server with wlst commands. Sample commands to export the default demoidentity:

    +
    +

    Note: In case you are using the custom SSL certificate, replaces the steps accordingly

    +
    +
    $ kubectl exec -it soainfra-adminserver -n soans -- bash
    +$ /u01/oracle/oracle_common/common/bin/wlst.sh
    +$ connect('weblogic','Welcome1','t3://soainfra-adminserver:7001')
    +$ svc = getOpssService(name='KeyStoreService')
    +$ svc.exportKeyStoreCertificate(appStripe='system', name='demoidentity', password='DemoIdentityKeyStorePassPhrase', alias='DemoIdentity', type='Certificate', filepath='/tmp/cert.txt/')
    +

    The above steps download the certificate at /tmp/cert.txt.

    +

    b. Use the same certificates from the client side and connect using t3s. For example:

    +
    $ export JAVA_HOME=/u01/jdk
    +$ keytool -import -v -trustcacerts -alias soadomain -file cert.txt -keystore $JAVA_HOME/jre/lib/security/cacerts -keypass changeit -storepass changeit
    +$ export WLST_PROPERTIES="-Dweblogic.security.SSL.ignoreHostnameVerification=true"
    +$ cd $ORACLE_HOME/oracle_common/common/bin
    +$ ./wlst.sh
    +  Initializing WebLogic Scripting Tool (WLST) ...
    +  Welcome to WebLogic Server Administration Scripting Shell
    +  Type help() for help on available commands
    +$ wls:/offline> connect('weblogic','Welcome1','t3s://<Master IP address>:30016')
    +
  14. +
+

Remove T3/T3S configuration

+

For Administration Server

+
    +
  1. +

    Create t3_admin_delete.py with the following content:

    +
    admin_pod_name = sys.argv[1]
    +admin_port = sys.argv[2]
    +user_name = sys.argv[3]
    +password = sys.argv[4]
    +AdminServerName = sys.argv[5]
    +channelType = sys.argv[6]
    +print('custom admin_pod_name : [%s]' % admin_pod_name);
    +print('custom admin_port : [%s]' % admin_port);
    +print('custom user_name : [%s]' % user_name);
    +print('custom password : ********');
    +connect(user_name, password, 't3://' + admin_pod_name + ':' + admin_port)
    +edit()
    +startEdit()
    +cd('/')
    +cd('Servers/%s/' % AdminServerName )
    +if channelType == 't3':
    +   delete('T3Channel_AS','NetworkAccessPoint')
    +elif channelType == 't3s':
    +   delete('T3SChannel_AS','NetworkAccessPoint')
    +else:
    +   print('channelType [%s] not supported',channelType)
    +activate()
    +disconnect()
    +
  2. +
  3. +

    Copy t3_admin_delete.py into the domain home (for example, /u01/oracle/user_projects/domains/soainfra) of the Administration Server pod (for example, soainfra-adminserver in soans namespace).

    +
    $ kubectl cp t3_admin_delete.py soans/soainfra-adminserver:/u01/oracle/user_projects/domains/soainfra
    +
  4. +
  5. +

    Run wlst.sh t3_admin_delete.py by exec into the Administration Server pod with the following parameters:

    +
      +
    • admin_pod_name: soainfra-adminserver # Administration Server pod
    • +
    • admin_port: 7001
    • +
    • user_name: weblogic
    • +
    • password: Welcome1 # weblogic password
    • +
    • AdminServerName: AdminServer # Give administration Server name
    • +
    • channelType: t3 # T3 channel
    • +
    +
    $ kubectl exec -it <Administration Server pod> -n <namespace> -- /u01/oracle/oracle_common/common/bin/wlst.sh  <domain_home>/t3_admin_delete.py <Administration Server pod>  <Administration Server port>  weblogic <password for weblogic> <AdminServerName> <protocol t3 or t3s>
    +

    For example:

    +
    $ kubectl exec -it soainfra-adminserver -n soans -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/user_projects/domains/soainfra/t3_admin_delete.py soainfra-adminserver 7001 weblogic Welcome1 AdminServer t3
    +
  6. +
  7. +

    Delete the NodePort service for port 30014:

    +
    $ kubectl delete -f t3_admin_svc.yaml
    +
  8. +
+

For Managed Servers

+

These steps delete the custom T3/T3S channel created by Expose T3/T3S for Managed Servers for all Managed Servers.

+
    +
  1. +

    Create t3_ms_delete.py with the following content:

    +
    admin_pod_name = sys.argv[1]
    +admin_port = sys.argv[2]
    +user_name = sys.argv[3]
    +password = sys.argv[4]
    +managedNameBase = sys.argv[5]
    +ms_count = sys.argv[6]
    +channelType = sys.argv[7]
    +print('custom host : [%s]' % admin_pod_name);
    +print('custom port : [%s]' % admin_port);
    +print('custom user_name : [%s]' % user_name);
    +print('custom password : ********');
    +connect(user_name, password, 't3://' + admin_pod_name + ':' + admin_port)
    +edit()
    +startEdit()
    +for index in range(0, int(ms_count)):
    +  cd('/')
    +  msIndex = index+1
    +  cd('/')
    +  name = '%s%s' % (managedNameBase, msIndex)
    +  cd('Servers/%s/' % name )
    +  if channelType == 't3':
    +    delete('T3Channel_MS','NetworkAccessPoint')
    +  elif channelType == 't3s':
    +    delete('T3SChannel_MS','NetworkAccessPoint')
    +  else:
    +    print('Protocol [%s] not supported' % channelType)
    +activate()
    +disconnect()
    +
  2. +
  3. +

    Copy t3_ms_delete.py into the domain home (for example, /u01/oracle/user_projects/domains/soainfra) of the Administration Server pod (for example, soainfra-adminserver in soans namespace).

    +
    $ kubectl cp t3_ms_delete.py soans/soainfra-adminserver:/u01/oracle/user_projects/domains/soainfra
    +
  4. +
  5. +

    Run wlst.sh t3_ms_delete.py by exec into the Administration Server pod with the following parameters:

    +
      +
    • admin_pod_name: soainfra-adminserver # Administration Server pod
    • +
    • admin_port: 7001
    • +
    • user_name: weblogic
    • +
    • password: Welcome1 # weblogic password
    • +
    • managedNameBase: soa_server # Give Managed Server base name. For osb_cluster this will be osb_server
    • +
    • ms_count: 5 # Number of configured Managed Servers
    • +
    • channelType: t3 # channelType is t3 or t3s
    • +
    +
    $ kubectl exec -it <Administration Server pod> -n <namespace> -- /u01/oracle/oracle_common/common/bin/wlst.sh  <domain_home>/t3_ms_delete.py <Administration Server pod>  <Administration Server port>  weblogic <password for weblogic> <t3 port on Managed Server> <t3 nodeport> <master_ip> <managedNameBase> <ms_count> <channelType t3 or t3s>
    +

    For example:

    +
    $ kubectl exec -it soainfra-adminserver -n soans -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/user_projects/domains/soainfra/t3_ms_delete.py soainfra-adminserver 7001 weblogic Welcome1 soa_server 5 t3
    +
  6. +
  7. +

    Delete the NodePort service for port 30016 (or the NodePort used while creating the Kubernetes service):

    +
    $ kubectl delete -f t3_ms_svc.yaml
    +
  8. +
+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.2/soa-domains/adminguide/index.html b/docs/23.4.2/soa-domains/adminguide/index.html new file mode 100644 index 000000000..4faefc1dc --- /dev/null +++ b/docs/23.4.2/soa-domains/adminguide/index.html @@ -0,0 +1,5705 @@ + + + + + + + + + + + + Administration Guide :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + Administration Guide +

+ + + + + + + +

Administer Oracle SOA Suite domains in Kubernetes.

+ + + + + + + +
    + + + + + + + + + + + + + + + + + + + +

    +Set up a load balancer +

    + + + + + +

    Configure different load balancers for Oracle SOA Suite domains.

    + + + + + + + + + + + + +

    +Enable additional URL access +

    + + + + + +

    Extend an existing ingress to enable additional application URL access for Oracle SOA Suite domains.

    + + + + + + + + + + + + +

    +Configure SSL certificates +

    + + + + + +

    Create and configure custom SSL certificates for Oracle SOA Suite domains.

    + + + + + + + + + + + + +

    +Monitor a domain and publish logs +

    + + + + + +

    Monitor an Oracle SOA Suite domain and publish the WebLogic Server logs to Elasticsearch.

    + + + + + + + + + + + + +

    +Expose the T3/T3S protocol +

    + + + + + +

    Create a T3/T3S channel and the corresponding Kubernetes service to expose the T3/T3S protocol for the Administration Server and Managed Servers in an Oracle SOA Suite domain.

    + + + + + + + + + + + + +

    +Deploy composite applications +

    + + + + + +

    Deploy composite applications for Oracle SOA Suite and Oracle Service Bus domains.

    + + + + + + + + + + + + +

    +Persist adapter customizations +

    + + + + + +

    Persist the customizations done for Oracle SOA Suite adapters.

    + + + + + + + + + + + + +

    +Perform WLST operations +

    + + + + + +

    Perform WLST administration operations using a helper pod running in the same Kubernetes cluster as the Oracle SOA Suite domain.

    + + + + + + + + +
+ + + + + + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.2/soa-domains/adminguide/index.xml b/docs/23.4.2/soa-domains/adminguide/index.xml new file mode 100644 index 000000000..dcb41286d --- /dev/null +++ b/docs/23.4.2/soa-domains/adminguide/index.xml @@ -0,0 +1,80 @@ + + + + Administration Guide on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.2/soa-domains/adminguide/ + Recent content in Administration Guide on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + Thu, 18 Apr 2019 06:46:23 -0500 + + + + + + Enable additional URL access + /fmw-kubernetes/23.4.2/soa-domains/adminguide/enable-additional-url-access/ + Fri, 22 Feb 2019 15:44:42 -0500 + + /fmw-kubernetes/23.4.2/soa-domains/adminguide/enable-additional-url-access/ + This section provides information about how to extend an existing ingress (Non-SSL and SSL termination) to enable additional application URL access for Oracle SOA Suite domains. +The ingress per domain created in the steps in Set up a load balancer exposes the application paths defined in template YAML files present at ${WORKDIR}/charts/ingress-per-domain/templates/. +To extend an existing ingress with additional application URL access: + Update the template YAML file at ${WORKDIR}/charts/ingress-per-domain/templates/ to define additional path rules. + + + + Configure SSL certificates + /fmw-kubernetes/23.4.2/soa-domains/adminguide/configuring-custom-ssl-certificates/ + Fri, 22 Feb 2019 15:44:42 -0500 + + /fmw-kubernetes/23.4.2/soa-domains/adminguide/configuring-custom-ssl-certificates/ + Secure Socket Layer (SSL) provides a secured communication for data sent over unsecured networks. In an SSL termination scenario, you can configure SSL between the client browser and the load balancer in your Oracle SOA Suite instance to ensure that applications are accessed securely. In an SSL end-to-end scenario, an Oracle SOA Suite domain is configured to use a self-signed SSL certificate that was generated during domain creation. Clients will typically receive a message indicating that the signing CA for the certificate is unknown and not trusted. + + + + Monitor a domain and publish logs + /fmw-kubernetes/23.4.2/soa-domains/adminguide/monitoring-soa-domains/ + Fri, 22 Feb 2019 15:44:42 -0500 + + /fmw-kubernetes/23.4.2/soa-domains/adminguide/monitoring-soa-domains/ + After the Oracle SOA Suite domain is set up, you can: + Monitor the Oracle SOA Suite instance using Prometheus and Grafana Publish WebLogic Server logs into Elasticsearch Publish SOA server diagnostics logs into Elasticsearch Monitor the Oracle SOA Suite instance using Prometheus and Grafana Using the WebLogic Monitoring Exporter you can scrape runtime information from a running Oracle SOA Suite instance and monitor them using Prometheus and Grafana. + + + + Expose the T3/T3S protocol + /fmw-kubernetes/23.4.2/soa-domains/adminguide/enablingt3/ + Fri, 22 Feb 2019 15:44:42 -0500 + + /fmw-kubernetes/23.4.2/soa-domains/adminguide/enablingt3/ + Oracle strongly recommends that you do not expose non-HTTPS traffic (T3/T3s/LDAP/IIOP/IIOPs) outside of the external firewall. You can control this access using a combination of network channels and firewalls. + You can create T3/T3S channels and the corresponding Kubernetes service to expose the T3/T3S protocol for the Administration Server and Managed Servers in an Oracle SOA Suite domain. +The WebLogic Kubernetes Operator provides an option to expose a T3 channel for the Administration Server using the exposeAdminT3Channel setting during domain creation, then the matching T3 service can be used to connect. + + + + Persist adapter customizations + /fmw-kubernetes/23.4.2/soa-domains/adminguide/persisting-soa-adapters-customizations/ + Fri, 22 Feb 2019 15:44:42 -0500 + + /fmw-kubernetes/23.4.2/soa-domains/adminguide/persisting-soa-adapters-customizations/ + The lifetime for any customization done in a file on a server pod is up to the lifetime of that pod. The changes are not persisted once the pod goes down or is restarted. +For example, the following configuration updates DbAdapter.rar to create a new connection instance and creates data source CoffeeShop on the Administration Console for the same with jdbc/CoffeeShopDS. +File location: /u01/oracle/soa/soa/connectors/DbAdapter.rar +&lt;connection-instance&gt; &lt;jndi-name&gt;eis/DB/CoffeeShop&lt;/jndi-name&gt; &lt;connection-properties&gt; &lt;properties&gt; &lt;property&gt; &lt;name&gt;XADataSourceName&lt;/name&gt; &lt;value&gt;jdbc/CoffeeShopDS&lt;/value&gt; &lt;/property&gt; &lt;property&gt; &lt;name&gt;DataSourceName&lt;/name&gt; &lt;value&gt;&lt;/value&gt; &lt;/property&gt; &lt;property&gt; &lt;name&gt;PlatformClassName&lt;/name&gt; &lt;value&gt;org. + + + + Perform WLST operations + /fmw-kubernetes/23.4.2/soa-domains/adminguide/performing-wlst-operations/ + Fri, 22 Feb 2019 15:44:42 -0500 + + /fmw-kubernetes/23.4.2/soa-domains/adminguide/performing-wlst-operations/ + You can use the WebLogic Scripting Tool (WLST) to manage a domain running in a Kubernetes cluster. Some of the many ways to do this are provided here. +If the Administration Server was configured to expose a T3 channel using exposeAdminT3Channel when creating the domain, refer to Use WLST. +If you do not want to expose additional ports and perform WLST administration operations using the existing Kubernetes services created by the WebLogic Server Kubernetes operator, then follow this documentation. + + + + \ No newline at end of file diff --git a/docs/23.4.2/soa-domains/adminguide/monitoring-soa-domains/index.html b/docs/23.4.2/soa-domains/adminguide/monitoring-soa-domains/index.html new file mode 100644 index 000000000..50395808f --- /dev/null +++ b/docs/23.4.2/soa-domains/adminguide/monitoring-soa-domains/index.html @@ -0,0 +1,5620 @@ + + + + + + + + + + + + Monitor a domain and publish logs :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+ +
+ +
+ +
+ +

+ + Monitor a domain and publish logs +

+ + + + + + +

After the Oracle SOA Suite domain is set up, you can:

+ +

Monitor the Oracle SOA Suite instance using Prometheus and Grafana

+

Using the WebLogic Monitoring Exporter you can scrape runtime information from a running Oracle SOA Suite instance and monitor them using Prometheus and Grafana.

+

Set up monitoring

+

Follow these steps to set up monitoring for an Oracle SOA Suite instance. For more details on WebLogic Monitoring Exporter, see here.

+

Publish WebLogic Server logs into Elasticsearch

+

WebLogic Server logs can be published to Elasticsearch using Fluentd. See Fluentd configuration steps.

+

Publish SOA server diagnostics logs into Elasticsearch

+

This section shows you how to publish diagnostics logs to Elasticsearch and view them in Kibana. For publishing operator logs, see this sample.

+

Prerequisites

+

If you have not already set up Elasticsearch and Kibana for logs collection, refer to this document and complete the setup.

+

Publish to Elasticsearch

+

The diagnostics or other logs can be pushed to Elasticsearch server using logstash pod. The logstash pod should have access to the shared domain home or the log location. In case of the Oracle SOA Suite domain, the persistent volume of the domain home can be used in the logstash pod. To create the logstash pod, follow these steps:

+
    +
  1. +

    Get the domain home persistence volume claim details of the domain home of the Oracle SOA Suite domain. The following command lists the persistent volume claim details in the namespace - soans. In the example below, the persistent volume claim is soainfra-domain-pvc:

    +
    $ kubectl get pvc -n soans   
    +

    Sample output:

    +
    NAME                  STATUS   VOLUME               CAPACITY   ACCESS MODES   STORAGECLASS                    AGE
    +soainfra-domain-pvc   Bound    soainfra-domain-pv   10Gi       RWX            soainfra-domain-storage-class   xxd
    +
  2. +
  3. +

    Create the logstash configuration file (logstash.conf). Below is a sample logstash configuration to push diagnostic logs of all servers available at DOMAIN_HOME/servers/<server_name>/logs/-diagnostic.log:

    +
    input {                                                                                                                
    +  file {                                                                                                               
    +    path => "/u01/oracle/user_projects/domains/soainfra/servers/**/logs/*-diagnostic.log"                                          
    +    start_position => beginning                                                                                        
    +  }                                                                                                                    
    +}                                                                                                                         
    +filter {                                                                                                               
    +  grok {                                                                                                               
    +    match => [ "message", "<%{DATA:log_timestamp}> <%{WORD:log_level}> <%{WORD:thread}> <%{HOSTNAME:hostname}> <%{HOSTNAME:servername}> <%{DATA:timer}> <<%{DATA:kernel}>> <> <%{DATA:uuid}> <%{NUMBER:timestamp}> <%{DATA:misc}> <%{DATA:log_number}> <%{DATA:log_message}>" ]                                                                                        
    +  }                                                                                                                    
    +}                                                                                                                         
    +output {                                                                                                               
    +  elasticsearch {                                                                                                      
    +    hosts => ["elasticsearch.default.svc.cluster.local:9200"]                                                          
    +  }                                                                                                                    
    +}
    +
  4. +
  5. +

    Copy the logstash.conf into /u01/oracle/user_projects/domains so that it can be used for logstash deployment, using the Administration Server pod (for example soainfra-adminserver pod in namespace soans):

    +
    $ kubectl cp logstash.conf  soans/soainfra-adminserver:/u01/oracle/user_projects/domains --namespace soans
    +
  6. +
  7. +

    Create a deployment YAML (logstash.yaml) for the logstash pod using the domain home persistence volume claim. Make sure to point the logstash configuration file to the correct location (for example, copy logstash.conf to /u01/oracle/user_projects/domains/logstash.conf) and also the correct domain home persistence volume claim. Below is a sample logstash deployment YAML:

    +
    apiVersion: apps/v1
    +kind: Deployment
    +metadata:
    +  name: logstash-soa
    +  namespace: soans
    +spec:
    +  selector:
    +    matchLabels:
    +      app: logstash-soa
    +  template: # create pods using pod definition in this template
    +    metadata:
    +      labels:
    +        app: logstash-soa
    +    spec:
    +      volumes:
    +      - name: soainfra-domain-storage-volume
    +        persistentVolumeClaim:
    +          claimName: soainfra-domain-pvc
    +      - name: shared-logs
    +        emptyDir: {}
    +      containers:
    +      - name: logstash
    +        image: logstash:6.6.0
    +        command: ["/bin/sh"]
    +        args: ["/usr/share/logstash/bin/logstash", "-f", "/u01/oracle/user_projects/domains/logstash.conf"]
    +        imagePullPolicy: IfNotPresent
    +        volumeMounts:
    +        - mountPath: /u01/oracle/user_projects
    +          name: soainfra-domain-storage-volume
    +        - name: shared-logs
    +          mountPath: /shared-logs
    +        ports:
    +        - containerPort: 5044
    +          name: logstash
    +
  8. +
  9. +

    Deploy logstash to start publish logs to Elasticsearch:

    +
    $ kubectl create -f  logstash.yaml
    +
  10. +
  11. +

    Now, you can view the diagnostics logs using Kibana with index pattern “logstash-*”.

    +
  12. +
+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.2/soa-domains/adminguide/performing-wlst-operations/index.html b/docs/23.4.2/soa-domains/adminguide/performing-wlst-operations/index.html new file mode 100644 index 000000000..77a1f27dc --- /dev/null +++ b/docs/23.4.2/soa-domains/adminguide/performing-wlst-operations/index.html @@ -0,0 +1,5720 @@ + + + + + + + + + + + + Perform WLST operations :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + Perform WLST operations +

+ + + + + + +

You can use the WebLogic Scripting Tool (WLST) to manage a domain running in a Kubernetes cluster. Some of the many ways to do this are provided here.

+

If the Administration Server was configured to expose a T3 channel using exposeAdminT3Channel when creating the domain, refer to Use WLST.

+

If you do not want to expose additional ports and perform WLST administration operations using the existing Kubernetes services created by the WebLogic Server Kubernetes operator, then follow this documentation. Here we will be creating and using a helper pod in the same Kubernetes cluster as the Oracle SOA Suite domain to perform WLST operations.

+
+

Note: To avoid any misconfigurations, Oracle recommends that you do not use the Administration Server pod directly for WLST operations.

+
+
    +
  1. Create a Kubernetes helper pod
  2. +
  3. Perform WLST operations
  4. +
  5. Sample WLST operations
  6. +
+

Create a Kubernetes helper pod

+

Before creating a Kubernetes helper pod, make sure that the Oracle SOA Suite Docker image is available on the node, or you can create an image pull secret so that the pod can pull the Docker image on the host where it gets created.

+
    +
  1. +

    Create an image pull secret to pull image soasuite:12.2.1.4 by the helper pod.

    +

    Note: Skip this step if you are not using an image pull secret.

    +
    $ kubectl create secret docker-registry <secret-name> --namespace soans \
    +  --docker-server=<docker-registry-name> \
    +  --docker-username=<docker-user> \
    +  --docker-password=<docker-user> \
    +  --docker-email=<email-id>
    +

    For example:

    +
    $ kubectl create secret docker-registry image-secret --namespace soans \
    +   --docker-server=your-registry.com \
    +   --docker-username=xxxxxx \
    +   --docker-password=xxxxxxx  \
    +   --docker-email=my@company.com
    +
  2. +
  3. +

    Create a helper pod.

    +
    $ kubectl run helper \
    +  --image <image_name> \
    +  --namespace <domain_namespace> \
    +  --overrides='{ "apiVersion": "v1", "spec": { "imagePullSecrets": [{"name": "<secret-name>"}] } }' \
    +  -- sleep infinity
    +

    For example:

    +
    $ kubectl run helper \
    +  --image soasuite:12.2.1.4 \
    +  --namespace soans \
    +  --overrides='{ "apiVersion": "v1", "spec": { "imagePullSecrets": [{"name": "image-secret"}] } }' \
    +  -- sleep infinity
    +
    +

    Note: If you are not using the image pull secret, remove --overrides='{ "apiVersion": "v1", "spec": { "imagePullSecrets": [{"name": "<secret-name>"}] } }' .

    +
    +
  4. +
+

Perform WLST operations

+

Once the Kubernetes helper pod is deployed, you can exec into the pod, connect to servers using t3 or t3s and perform WLST operations. By default, t3s is not enabled for the Administration Server or Managed Servers. If you enabled SSL with sslEnabled when creating the domain, then you can use t3s to perform WLST operations.

+
Interactive mode
+
    +
  1. +

    Start a bash shell in the helper pod:

    +
    $ kubectl exec -it helper -n <domain_namespace> -- /bin/bash
    +

    For example:

    +
    $ kubectl exec -it helper -n soans -- /bin/bash
    +

    This opens a bash shell in the running helper pod:

    +
    [oracle@helper oracle]$
    +
    +
    +
  2. +
  3. +

    Invoke WLST:

    +
    [oracle@helper oracle]$ cd $ORACLE_HOME/oracle_common/common/bin
    +[oracle@helper bin]$ ./wlst.sh
    +

    The output will look similar to the following:

    +
    [oracle@helper bin]$ ./wlst.sh
    +
    +Initializing WebLogic Scripting Tool (WLST) ...
    +
    +Jython scans all the jar files it can find at first startup. Depending on the system, this process may take a few minutes to complete, and WLST may not return a prompt right away.
    +
    +Welcome to WebLogic Server Administration Scripting Shell
    +
    +Type help() for help on available commands
    +
    +wls:/offline>
    +
    +
    +
  4. +
  5. +

    Connect using t3:

    +

    a. To connect to the Administration Server or Managed Servers using t3, you can use the Kubernetes services created by the WebLogic Server Kubernetes operator:

    +
    wls:/offline> connect('weblogic','<password>','t3://<domainUID>-<WebLogic Server Name>:<Server Port>')
    +

    For example, if the domainUID is soainfra, Administration Server name is AdminServer, and Administration Server port is 7001, then you can connect to the Administration Server using t3:

    +
    wls:/offline> connect('weblogic','<password>','t3://soainfra-adminserver:7001')
    +

    The output will look similar to the following:

    +
    wls:/offline> connect('weblogic','<password>','t3://soainfra-adminserver:7001')
    +Connecting to t3://soainfra-adminserver:7001 with userid weblogic ...
    +Successfully connected to Admin Server "AdminServer" that belongs to domain "soainfra".
    +
    +Warning: An insecure protocol was used to connect to the server.
    +To ensure on-the-wire security, the SSL port or Admin port should be used instead.
    +
    +wls:/soainfra/serverConfig/>
    +

    b. To connect a WebLogic Server cluster (SOA or Oracle Service Bus) using t3, you can use the Kubernetes services created by the WebLogic Server Kubernetes operator:

    +
    wls:/offline> connect('weblogic','<password>','t3://<domainUID>-cluster-<Cluster name>:<Managed Server Port>')
    +

    For example, if the domainUID is soainfra, SOA cluster name is soa-cluster, and SOA Managed Server port is 8001, then you can connect to SOA Cluster using t3:

    +
    wls:/offline> connect('weblogic','<password>','t3://soainfra-cluster-soa-cluster:8001')
    +

    The output will look similar to the following:

    +
    wls:/offline> connect('weblogic','<password>','t3://soainfra-cluster-soa-cluster:8001')
    +Connecting to t3://soainfra-cluster-soa-cluster:8001 with userid weblogic ...
    +Successfully connected to Managed Server "soa_server1" that belongs to domain "soainfra".
    +
    +Warning: An insecure protocol was used to connect to the server.
    +To ensure on-the-wire security, the SSL port or Admin port should be used instead.
    +
    +wls:/soainfra/serverConfig/>
    +
  6. +
  7. +

    Connect using t3s.

    +

    If you enabled SSL with sslEnabled when creating the domain, then you can use t3s to perform WLST operations:

    +

    a. Obtain the certificate from the Administration Server to be used for a secured (t3s) connection from the client by exporting the certificate from the Administration Server using WLST commands. Sample commands to export the default demoidentity:

    +
    [oracle@helper oracle]$ cd $ORACLE_HOME/oracle_common/common/bin
    +[oracle@helper bin]$ ./wlst.sh
    +.
    +.
    +wls:/offline> connect('weblogic','<password>','t3://soainfra-adminserver:7001')
    +.
    +.
    +wls:/soainfra/serverConfig/> svc = getOpssService(name='KeyStoreService')
    +wls:/soainfra/serverConfig/> svc.exportKeyStoreCertificate(appStripe='system', name='demoidentity', password='DemoIdentityKeyStorePassPhrase', alias='DemoIdetityKeyStorePassPhrase', type='Certificate', filepath='/tmp/cert.txt/')
    +

    These commands download the certificate for the default demoidentity certificate at /tmp/cert.txt.

    +

    b. Import the certificate to the Java trust store:

    +
    [oracle@helper oracle]$ export JAVA_HOME=/u01/jdk
    +[oracle@helper oracle]$ keytool -import -v -trustcacerts -alias soadomain -file /tmp/cert.txt -keystore $JAVA_HOME/jre/lib/security/cacerts -keypass changeit -storepass changeit
    +

    c. Connect to WLST and set the required environment variable before connecting using t3s:

    +
    [oracle@helper oracle]$ export WLST_PROPERTIES="-Dweblogic.security.SSL.ignoreHostnameVerification=true"
    +[oracle@helper oracle]$ cd $ORACLE_HOME/oracle_common/common/bin
    +[oracle@helper bin]$ ./wlst.sh
    +

    d. Access t3s for the Administration Server.

    +

    For example, if the domainUID is soainfra, Administration Server name is AdminServer, and Administration Server SSL port is 7002, connect to the Administration Server as follows:

    +
    wls:/offline> connect('weblogic','<password>','t3s://soainfra-adminserver:7002')
    +

    e. Access t3s for the SOA cluster.

    +

    For example, if the domainUID is soainfra, SOA cluster name is soa-cluster, and SOA Managed Server SSL port is 8002, connect to the SOA cluster as follows:

    +
    wls:/offline> connect('weblogic','<password>','t3s://soainfra-cluster-soa-cluster:8002')
    +
  8. +
+
Script mode
+

In script mode, scripts contain WLST commands in a text file with a .py file extension (for example, mywlst.py). Before invoking WLST using the script file, you must copy the .py file into the helper pod.

+

To copy the .py file into the helper pod using WLST operations in script mode:

+
    +
  1. +

    Create a .py file containing all the WLST commands.

    +
  2. +
  3. +

    Copy the .py file into the helper pod:

    +
    $ kubectl cp <filename>.py <domain namespace>/helper:<directory>
    +

    For example:

    +
    $ kubectl cp mywlst.py soans/helper:/u01/oracle
    +
  4. +
  5. +

    Run wlst.sh on the .py file by exec into the helper pod:

    +
    $ kubectl exec -it helper -n <domain_namespace> -- /bin/bash
    +[oracle@helper oracle]$ cd $ORACLE_HOME/oracle_common/common/bin
    +[oracle@helper oracle]$ ./wlst.sh <directory>/<filename>.py
    +
  6. +
+

Note: Refer to Interactive mode for details on how to connect using t3 or t3s.

+

Sample WLST operations

+

For a full list of WLST operations, refer to WebLogic Server WLST Online and Offline Command Reference.

+
Display servers
+
$ kubectl exec -it helper -n soans -- /bin/bash
+[oracle@helper oracle]$ cd $ORACLE_HOME/oracle_common/common/bin
+[oracle@helper bin]$ ./wlst.sh
+
+Initializing WebLogic Scripting Tool (WLST) ...
+
+Jython scans all the jar files it can find at first startup. Depending on the system, this process may take a few minutes to complete, and WLST may not return a prompt right away.
+
+Welcome to WebLogic Server Administration Scripting Shell
+
+Type help() for help on available commands
+
+wls:/offline> connect('weblogic','Welcome1','t3://soainfra-adminserver:7001')
+Connecting to t3://soainfra-adminserver:7001 with userid weblogic ...
+Successfully connected to Admin Server "AdminServer" that belongs to domain "soainfra".
+
+Warning: An insecure protocol was used to connect to the server.
+To ensure on-the-wire security, the SSL port or Admin port should be used instead.
+
+wls:/soainfra/serverConfig/>  cd('/Servers')
+wls:/soainfra/serverConfig/Servers> ls()
+dr--   AdminServer
+dr--   osb_server1
+dr--   osb_server2
+dr--   osb_server3
+dr--   osb_server4
+dr--   osb_server5
+dr--   soa_server1
+dr--   soa_server2
+dr--   soa_server3
+dr--   soa_server4
+dr--   soa_server5
+
+wls:/soainfra/serverConfig/Servers>
+
+ +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.2/soa-domains/adminguide/persisting-soa-adapters-customizations/index.html b/docs/23.4.2/soa-domains/adminguide/persisting-soa-adapters-customizations/index.html new file mode 100644 index 000000000..da2a62077 --- /dev/null +++ b/docs/23.4.2/soa-domains/adminguide/persisting-soa-adapters-customizations/index.html @@ -0,0 +1,5591 @@ + + + + + + + + + + + + Persist adapter customizations :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + Persist adapter customizations +

+ + + + + + +

The lifetime for any customization done in a file on a server pod is up to the lifetime of that pod. The changes are not persisted once the pod goes down or is restarted.

+

For example, the following configuration updates DbAdapter.rar to create a new connection instance and creates data source CoffeeShop on the Administration Console for the same with jdbc/CoffeeShopDS.

+

File location: /u01/oracle/soa/soa/connectors/DbAdapter.rar

+
<connection-instance>
+  <jndi-name>eis/DB/CoffeeShop</jndi-name>
+  <connection-properties>
+    <properties>
+      <property>
+        <name>XADataSourceName</name>
+        <value>jdbc/CoffeeShopDS</value>
+      </property>
+      <property>
+        <name>DataSourceName</name>
+	    <value></value>
+      </property>
+      <property>
+        <name>PlatformClassName</name>
+	    <value>org.eclipse.persistence.platform.database.Oracle10Platform</value>
+      </property>
+    </properties>
+   </connection-properties>
+</connection-instance>
+

If you need to persist the customizations for any of the adapter files under the SOA Oracle Home in the server pod, use one of the following methods.

+

Method 1: Customize the Adapter file using the WebLogic Administration Console:

+
    +
  1. +

    Log in to the WebLogic Administration Console, and go to Deployments > ABC.rar > Configuration > Outbound Connection Pools.

    +
  2. +
  3. +

    Click New to create a new connection, then provide a new connection name, and click Finish.

    +
  4. +
  5. +

    Go back to the new connection, update the properties as required, and save.

    +
  6. +
  7. +

    Under Deployments, select ABC.rar, then Update.

    +

    This step asks for the Plan.xml location. This location by default will be in ${ORACLE_HOME}/soa/soa which is not under Persistent Volume (PV). Therefore, provide the domain’s PV location such as {DOMAIN_HOME}/soainfra/servers.
    +Now the Plan.xml will be persisted under this location for each Managed Server.

    +
  8. +
+

Method 2: Customize the Adapter file on the Worker Node:

+
    +
  1. +

    Copy ABC.rar from the server pod to a PV path:

    +
    $ kubectl cp <namespace>/<SOA Managed Server pod name>:<full path of .rar file>  <destination path inside PV>
    +

    For example:

    +
    $ kubectl cp soans/soainfra-soa-server1:/u01/oracle/soa/soa/connectors/ABC.rar ${DockerVolume}/domains/soainfra/servers/ABC.rar
    +

    or +do a normal file copy between these locations after entering (using kubectl exec) in to the Managed Server pod.

    +
  2. +
  3. +

    Unrar ABC.rar.

    +
  4. +
  5. +

    Update the new connection details in the weblogic-ra.xml file under META_INF.

    +
  6. +
  7. +

    In the WebLogic Administration Console, under Deployments, select ABC.rar, then Update.

    +
  8. +
  9. +

    Select the ABC.rar path as the new location, which is ${DOMAIN_HOME}/user_projects/domains/soainfra/servers/ABC.rar and click Update.

    +
  10. +
  11. +

    Verify that the plan.xml or updated .rar should be persisted in the PV.

    +
  12. +
+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.2/soa-domains/appendix/docker-k8s-hardening/index.html b/docs/23.4.2/soa-domains/appendix/docker-k8s-hardening/index.html new file mode 100644 index 000000000..6ff1cd30c --- /dev/null +++ b/docs/23.4.2/soa-domains/appendix/docker-k8s-hardening/index.html @@ -0,0 +1,5550 @@ + + + + + + + + + + + + Security hardening :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + Security hardening +

+ + + + + + +

Securing a Kubernetes cluster involves hardening on multiple fronts - securing the API servers, etcd, nodes, container images, container run-time, and the cluster network. Apply principles of defense in depth, principle of least privilege, and minimize the attack surface. Use security tools such as Kube-Bench to verify the cluster’s security posture. Since Kubernetes is evolving rapidly refer to Kubernetes Security Overview for the latest information on securing a Kubernetes cluster. Also ensure the deployed Docker containers follow the Docker Security guidance.

+

This section provides references on how to securely configure Docker and Kubernetes.

+

References

+
    +
  1. +

    Docker hardening

    + +
  2. +
  3. +

    Kubernetes hardening

    + +
  4. +
  5. +

    Security best practices for Oracle WebLogic Server Running in Docker and Kubernetes

    + +
  6. +
+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.2/soa-domains/appendix/index.html b/docs/23.4.2/soa-domains/appendix/index.html new file mode 100644 index 000000000..03b430ca3 --- /dev/null +++ b/docs/23.4.2/soa-domains/appendix/index.html @@ -0,0 +1,5598 @@ + + + + + + + + + + + + Appendix :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + Appendix +

+ + + + + + + +

This section provides information on miscellaneous tasks related to Oracle SOA Suite domains deployment on Kubernetes.

+ + + + + + + +
    + + + + + + + + + + + + + + + + + +

    +Domain resource sizing +

    + + + + + +

    Describes the resourse sizing information for Oracle SOA Suite domains setup on Kubernetes cluster.

    + + + + + + + + + + + + +

    +Quick start deployment on-premise +

    + + + + + +

    Describes how to quickly get an Oracle SOA Suite domain instance running (using the defaults, nothing special) for development and test purposes.

    + + + + + + + + + + + + +

    +Security hardening +

    + + + + + +

    Review resources for the Docker and Kubernetes cluster hardening.

    + + + + + + + + +
+ + + + + + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.2/soa-domains/appendix/index.xml b/docs/23.4.2/soa-domains/appendix/index.xml new file mode 100644 index 000000000..1793ce1f6 --- /dev/null +++ b/docs/23.4.2/soa-domains/appendix/index.xml @@ -0,0 +1,43 @@ + + + + Appendix on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.2/soa-domains/appendix/ + Recent content in Appendix on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + Thu, 18 Jun 2020 15:27:38 -0500 + + + + + + Domain resource sizing + /fmw-kubernetes/23.4.2/soa-domains/appendix/soa-cluster-sizing-info/ + Fri, 22 Feb 2019 15:44:42 -0500 + + /fmw-kubernetes/23.4.2/soa-domains/appendix/soa-cluster-sizing-info/ + Oracle SOA cluster sizing minimum requirements Oracle SOA Normal Usage Moderate Usage High Usage Administration Server No of CPU core(s) : 1, Memory : 4GB No of CPU core(s) : 1, Memory : 4GB No of CPU core(s) : 1, Memory : 4GB Number of Managed Servers 2 2 4 Configurations per Managed Server No of CPU core(s) : 2, Memory : 16GB No of CPU core(s) : 4, Memory : 16GB No of CPU core(s) : 6, Memory : 16-32GB PV Storage Minimum 250GB Minimum 250GB Minimum 500GB + + + + Quick start deployment on-premise + /fmw-kubernetes/23.4.2/soa-domains/appendix/quickstart-deployment-on-prem/ + Thu, 18 Jun 2020 15:27:38 -0500 + + /fmw-kubernetes/23.4.2/soa-domains/appendix/quickstart-deployment-on-prem/ + Use this Quick Start to create an Oracle SOA Suite domain deployment in a Kubernetes cluster (on-premise environments) with the WebLogic Kubernetes Operator. Note that this walkthrough is for demonstration purposes only, not for use in production. These instructions assume that you are already familiar with Kubernetes. If you need more detailed instructions, refer to the Install Guide. +Hardware requirements The Linux kernel supported for deploying and running Oracle SOA Suite domains with the operator is Oracle Linux 7 (UL6+) and Red Hat Enterprise Linux 7 (UL3+ only with standalone Kubernetes). + + + + Security hardening + /fmw-kubernetes/23.4.2/soa-domains/appendix/docker-k8s-hardening/ + Fri, 22 Feb 2019 15:44:42 -0500 + + /fmw-kubernetes/23.4.2/soa-domains/appendix/docker-k8s-hardening/ + Securing a Kubernetes cluster involves hardening on multiple fronts - securing the API servers, etcd, nodes, container images, container run-time, and the cluster network. Apply principles of defense in depth, principle of least privilege, and minimize the attack surface. Use security tools such as Kube-Bench to verify the cluster&rsquo;s security posture. Since Kubernetes is evolving rapidly refer to Kubernetes Security Overview for the latest information on securing a Kubernetes cluster. Also ensure the deployed Docker containers follow the Docker Security guidance. + + + + \ No newline at end of file diff --git a/docs/23.4.2/soa-domains/appendix/quickstart-deployment-on-prem/index.html b/docs/23.4.2/soa-domains/appendix/quickstart-deployment-on-prem/index.html new file mode 100644 index 000000000..605221e88 --- /dev/null +++ b/docs/23.4.2/soa-domains/appendix/quickstart-deployment-on-prem/index.html @@ -0,0 +1,6220 @@ + + + + + + + + + + + + Quick start deployment on-premise :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + +
+ +
+ +
+ +

+ + Quick start deployment on-premise +

+ + + + + + +

Use this Quick Start to create an Oracle SOA Suite domain deployment in a Kubernetes cluster (on-premise environments) with the WebLogic Kubernetes Operator. Note that this walkthrough is for demonstration purposes only, not for use in production. +These instructions assume that you are already familiar with Kubernetes. If you need more detailed instructions, +refer to the Install Guide.

+

Hardware requirements

+

The Linux kernel supported for deploying and running Oracle SOA Suite domains with the operator is Oracle Linux 7 (UL6+) and Red Hat Enterprise Linux 7 (UL3+ only with standalone Kubernetes). Refer to the prerequisites for more details.

+

For this exercise, the minimum hardware requirements to create a single-node Kubernetes cluster and then deploy the soaosb (Oracle SOA Suite, Oracle Service Bus, and Enterprise Scheduler (ESS)) domain type with one Managed Server for Oracle SOA Suite and one for the Oracle Service Bus cluster, along with Oracle Database running as a container are:

+ + + + + + + + + + + + + + + + + + + + + +
HardwareSize
RAM32GB
Disk Space250GB+
CPU core(s)6
+

See here for resource sizing information for Oracle SOA Suite domains set up on a Kubernetes cluster.

+

Set up Oracle SOA Suite in an on-premise environment

+

Use the steps in this topic to create a single-instance on-premise Kubernetes cluster and then create an Oracle SOA Suite soaosb domain type, which deploys a domain with Oracle SOA Suite, Oracle Service Bus, and Oracle Enterprise Scheduler (ESS).

+ +

1. Prepare a virtual machine for the Kubernetes cluster

+

For illustration purposes, these instructions are for Oracle Linux 7u6+. If you are using a different flavor of Linux, you will need to adjust the steps accordingly.

+ +

These steps must be run with the root user, unless specified otherwise. +Any time you see YOUR_USERID in a command, you should replace it with your actual userid.

+
+ +

1.1 Prerequisites

+
    +
  1. +

    Choose the directories where your Docker and Kubernetes files will be stored. The Docker directory should be on a disk with a lot of free space (more than 100GB) because it will be used for the Docker file system, which contains all of your images and containers. The Kubernetes directory is used for the /var/lib/kubelet file system and persistent volume storage.

    +
    $ export docker_dir=/u01/docker
    +$ export kubelet_dir=/u01/kubelet
    +$ mkdir -p $docker_dir $kubelet_dir
    +$ ln -s $kubelet_dir /var/lib/kubelet
    +
  2. +
  3. +

    Verify that IPv4 forwarding is enabled on your host.

    +

    Note: Replace eth0 with the ethernet interface name of your compute resource if it is different.

    +
    $ /sbin/sysctl -a 2>&1|grep -s 'net.ipv4.conf.docker0.forwarding'
    +$ /sbin/sysctl -a 2>&1|grep -s 'net.ipv4.conf.eth0.forwarding'
    +$ /sbin/sysctl -a 2>&1|grep -s 'net.ipv4.conf.lo.forwarding'
    +$ /sbin/sysctl -a 2>&1|grep -s 'net.ipv4.ip_nonlocal_bind'
    +

    For example: Verify that all are set to 1:

    +
    $ net.ipv4.conf.docker0.forwarding = 1
    +$ net.ipv4.conf.eth0.forwarding = 1
    +$ net.ipv4.conf.lo.forwarding = 1
    +$ net.ipv4.ip_nonlocal_bind = 1
    +

    Solution: Set all values to 1 immediately:

    +
    $ /sbin/sysctl net.ipv4.conf.docker0.forwarding=1
    +$ /sbin/sysctl net.ipv4.conf.eth0.forwarding=1
    +$ /sbin/sysctl net.ipv4.conf.lo.forwarding=1
    +$ /sbin/sysctl net.ipv4.ip_nonlocal_bind=1
    +
  4. +
  5. +

    To preserve the settings permanently: Update the above values to 1 in files in /usr/lib/sysctl.d/, /run/sysctl.d/, and /etc/sysctl.d/.

    +
  6. +
  7. +

    Verify the iptables rule for forwarding.

    +

    Kubernetes uses iptables to handle many networking and port forwarding rules. A standard Docker installation may create a firewall rule that prevents forwarding.

    +

    Verify if the iptables rule to accept forwarding traffic is set:

    +
    $ /sbin/iptables -L -n | awk '/Chain FORWARD / {print $4}' | tr -d ")"
    +

    If the output is “DROP”, then run the following command:

    +
    $ /sbin/iptables -P FORWARD ACCEPT
    +

    Verify if the iptables rule is properly set to “ACCEPT”:

    +
    $ /sbin/iptables -L -n | awk '/Chain FORWARD / {print $4}' | tr -d ")"
    +
  8. +
  9. +

    Disable and stop firewalld:

    +
    $ systemctl disable firewalld
    +$ systemctl stop firewalld
    +
  10. +
+

1.2 Install and configure Docker

+
+

Note: If you have already installed Docker with version 19.03.1+ and configured the Docker daemon root to sufficient disk space along with proxy settings, continue to Install and configure Kubernetes.

+
+
    +
  1. +

    Make sure that you have the right operating system version:

    +
    $ uname -a
    +$ more /etc/oracle-release
    +

    Example output:

    +
    Linux xxxxxxx 4.1.12-124.27.1.el7uek.x86_64 #2 SMP Mon May 13 08:56:17 PDT 2019 x86_64 x86_64 x86_64 GNU/Linux
    +Oracle Linux Server release 7.6
    +
  2. +
  3. +

    Install the latest docker-engine and start the Docker service:

    +
    $ docker_version="19.03.1.ol"
    +$ yum-config-manager --enable ol7_addons
    +$ yum install docker-engine-$docker_version
    +
    +$ systemctl enable docker
    +$ systemctl start docker
    +
  4. +
  5. +

    Add your user ID to the Docker group to allow you to run Docker commands without root access:

    +
    $ /sbin/usermod -a -G docker <YOUR_USERID>
    +
  6. +
  7. +

    Check that your Docker version is at least 18.03:

    +
    $ docker version
    +

    Example output:

    +
    Client: Docker Engine - Community
    + Version:           19.03.1-ol
    + API version:       1.40
    + Go version:        go1.12.5
    + Git commit:        ead9442
    + Built:             Wed Sep 11 06:40:28 2019
    + OS/Arch:           linux/amd64
    + Experimental:      false
    +
    +Server: Docker Engine - Community
    + Engine:
    + Version:          19.03.1-ol
    + API version:      1.40 (minimum version 1.12)
    + Go version:       go1.12.5
    + Git commit:       ead9442
    + Built:            Wed Sep 11 06:38:43 2019
    + OS/Arch:          linux/amd64
    + Experimental:     false
    + Default Registry: docker.io
    +containerd:
    + Version:          v1.2.0-rc.0-108-gc444666
    + GitCommit:        c4446665cb9c30056f4998ed953e6d4ff22c7c39
    +runc:
    + Version:          1.0.0-rc5+dev
    + GitCommit:        4bb1fe4ace1a32d3676bb98f5d3b6a4e32bf6c58
    +docker-init:
    + Version:          0.18.0
    + GitCommit:        fec3683
    +
  8. +
  9. +

    Update the Docker engine configuration:

    +
    $ mkdir -p /etc/docker
    +
    +$ cat <<EOF > /etc/docker/daemon.json
    +{
    +   "group": "docker",
    +   "data-root": "/u01/docker"
    +}
    +EOF
    +
  10. +
  11. +

    Configure proxy settings if you are behind an HTTP proxy:

    +
    ### Create the drop-in file /etc/systemd/system/docker.service.d/http-proxy.conf that contains proxy details:
    +$ cat <<EOF > /etc/systemd/system/docker.service.d/http-proxy.conf
    +[Service]
    +Environment="HTTP_PROXY=http://REPLACE-WITH-YOUR-COMPANY-PROXY-HOST:PORT"
    +Environment="HTTPS_PROXY=http://REPLACE-WITH-YOUR-COMPANY-PROXY-HOST:PORT"
    +Environment="NO_PROXY=localhost,127.0.0.0/8,ADD-YOUR-INTERNAL-NO-PROXY-LIST,/var/run/docker.sock"
    +EOF
    +
    +

    Note: On some hosts /etc/systemd/system/docker.service.d may not be available. Create this directory if it is not available.

    +
    +
  12. +
  13. +

    Restart the Docker daemon to load the latest changes:

    +
    $ systemctl daemon-reload
    +$ systemctl restart docker
    +
  14. +
  15. +

    Verify that the proxy is configured with Docker:

    +
    $ docker info|grep -i proxy
    +

    Example output:

    +
    HTTP Proxy: http://REPLACE-WITH-YOUR-COMPANY-PROXY-HOST:PORT
    +HTTPS Proxy: http://REPLACE-WITH-YOUR-COMPANY-PROXY-HOST:PORT
    +No Proxy: localhost,127.0.0.0/8,ADD-YOUR-INTERNAL-NO-PROXY-LIST,/var/run/docker.sock
    +
  16. +
  17. +

    Verify Docker installation:

    +
    $ docker run hello-world
    +

    Example output:

    +
    Hello from Docker!
    +This message shows that your installation appears to be working correctly.
    +To generate this message, Docker took the following steps:
    +1. The Docker client contacted the Docker daemon.
    +2. The Docker daemon pulled the "hello-world" image from the Docker Hub.
    +   (amd64)
    +3. The Docker daemon created a new container from that image which runs the
    +   executable that produces the output you are currently reading.
    +4. The Docker daemon streamed that output to the Docker client, which sent it to your terminal.
    +To try something more ambitious, you can run an Ubuntu container with:
    + $ docker run -it ubuntu bash
    +Share images, automate workflows, and more with a free Docker ID:
    + https://hub.docker.com/
    +For more examples and ideas, visit:
    + https://docs.docker.com/get-started/
    +
  18. +
+

1.3 Install and configure Kubernetes

+
    +
  1. +

    Add the external Kubernetes repository:

    +
    $ cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
    +[kubernetes]
    +name=Kubernetes
    +baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-\$basearch
    +enabled=1
    +gpgcheck=1
    +repo_gpgcheck=1
    +gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
    +exclude=kubelet kubeadm kubectl
    +EOF
    +
  2. +
  3. +

    Set SELinux in permissive mode (effectively disabling it):

    +
    $ export PATH=/sbin:$PATH
    +$ setenforce 0
    +$ sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
    +
  4. +
  5. +

    Export proxy and install kubeadm, kubelet, and kubectl:

    +
    ### Get the nslookup IP address of the master node to use with apiserver-advertise-address during setting up Kubernetes master
    +### as the host may have different internal ip (hostname -i) and nslookup $HOSTNAME
    +$ ip_addr=`nslookup $(hostname -f) | grep -m2 Address | tail -n1| awk -F: '{print $2}'| tr -d " "`
    +$ echo $ip_addr
    +
    +### Set the proxies
    +$ export NO_PROXY=localhost,127.0.0.0/8,ADD-YOUR-INTERNAL-NO-PROXY-LIST,/var/run/docker.sock,$ip_addr,.svc
    +$ export no_proxy=localhost,127.0.0.0/8,ADD-YOUR-INTERNAL-NO-PROXY-LIST,/var/run/docker.sock,$ip_addr,.svc
    +$ export http_proxy=http://REPLACE-WITH-YOUR-COMPANY-PROXY-HOST:PORT
    +$ export https_proxy=http://REPLACE-WITH-YOUR-COMPANY-PROXY-HOST:PORT
    +$ export HTTPS_PROXY=http://REPLACE-WITH-YOUR-COMPANY-PROXY-HOST:PORT
    +$ export HTTP_PROXY=http://REPLACE-WITH-YOUR-COMPANY-PROXY-HOST:PORT
    +
    +### Install Kubernetes
    +$ VERSION=1.23.6-0
    +$ yum install -y kubelet-$VERSION kubeadm-$VERSION kubectl-$VERSION --disableexcludes=kubernetes
    +
    +### Enable kubelet service so that it automatically restarts on reboot
    +$ systemctl enable --now kubelet
    +
  6. +
  7. +

    Ensure net.bridge.bridge-nf-call-iptables is set to 1 in your sysctl to avoid traffic routing issues:

    +
    $ cat <<EOF >  /etc/sysctl.d/k8s.conf
    +net.bridge.bridge-nf-call-ip6tables = 1
    +net.bridge.bridge-nf-call-iptables = 1
    +EOF
    +$ sysctl --system
    +
  8. +
  9. +

    Disable swap check:

    +
    $ sed -i 's/KUBELET_EXTRA_ARGS=/KUBELET_EXTRA_ARGS="--fail-swap-on=false"/' /etc/sysconfig/kubelet
    +$ cat /etc/sysconfig/kubelet
    +### Reload and restart kubelet
    +$ systemctl daemon-reload
    +$ systemctl restart kubelet
    +
  10. +
  11. +

    From Kubernetes version v1.22 onward, kubeadm will default cgroup-driver to systemd. If your Docker is using cgroup driver as cgroupfs, set --cgroup-driver=cgroupfs for kubelet.

    +
    $ sed -i 's/^KUBELET_EXTRA_ARGS=.*/KUBELET_EXTRA_ARGS="--fail-swap-on=false --cgroup-driver=cgroupfs"/' /etc/sysconfig/kubelet
    +$ cat /etc/sysconfig/kubelet
    +### Reload and restart kubelet
    +$ systemctl daemon-reload
    +$ systemctl restart kubelet
    +
  12. +
+

1.4 Set up Helm

+
    +
  1. +

    Install Helm v3.10.2+.

    +

    a. Download Helm from https://github.com/helm/helm/releases.

    +

    For example, to download Helm v3.10.2:

    +
    $ wget https://get.helm.sh/helm-v3.10.2-linux-amd64.tar.gz
    +

    b. Unpack tar.gz:

    +
    $ tar -zxvf helm-v3.10.2-linux-amd64.tar.gz
    +

    c. Find the Helm binary in the unpacked directory, and move it to its desired destination:

    +
    $ mv linux-amd64/helm /usr/bin/helm
    +
  2. +
  3. +

    Run helm version to verify its installation:

    +
    $ helm version
    +  version.BuildInfo{Version:"v3.10.2", GitCommit:"50f003e5ee8704ec937a756c646870227d7c8b58", GitTreeState:"clean", GoVersion:"go1.18.8"}
    +
  4. +
+

2. Set up a single instance Kubernetes cluster

+
+

Notes:

+
    +
  • These steps must be run with the root user, unless specified otherwise!
  • +
  • If you choose to use a different CIDR block (that is, other than 10.244.0.0/16 for the --pod-network-cidr= in the kubeadm init command), then also update NO_PROXY and no_proxy with the appropriate value. +
      +
    • Also make sure to update kube-flannel.yaml with the new value before deploying.
    • +
    +
  • +
  • Replace the following with appropriate values: +
      +
    • ADD-YOUR-INTERNAL-NO-PROXY-LIST
    • +
    • REPLACE-WITH-YOUR-COMPANY-PROXY-HOST:PORT
    • +
    +
  • +
+
+

2.1 Set up the master node

+
    +
  1. +

    Create a shell script that sets up the necessary environment variables. You can append this to the user’s .bashrc so that it will run at login. You must also configure your proxy settings here if you are behind an HTTP proxy:

    +
    ## grab my IP address to pass into  kubeadm init, and to add to no_proxy vars
    +ip_addr=`nslookup $(hostname -f) | grep -m2 Address | tail -n1| awk -F: '{print $2}'| tr -d " "`
    +export pod_network_cidr="10.244.0.0/16"
    +export service_cidr="10.96.0.0/12"
    +export PATH=$PATH:/sbin:/usr/sbin
    +
    +### Set the proxies
    +export NO_PROXY=localhost,.svc,127.0.0.0/8,ADD-YOUR-INTERNAL-NO-PROXY-LIST,/var/run/docker.sock,$ip_addr,$pod_network_cidr,$service_cidr
    +export no_proxy=localhost,.svc,127.0.0.0/8,ADD-YOUR-INTERNAL-NO-PROXY-LIST,/var/run/docker.sock,$ip_addr,$pod_network_cidr,$service_cidr
    +export http_proxy=http://REPLACE-WITH-YOUR-COMPANY-PROXY-HOST:PORT
    +export https_proxy=http://REPLACE-WITH-YOUR-COMPANY-PROXY-HOST:PORT
    +export HTTPS_PROXY=http://REPLACE-WITH-YOUR-COMPANY-PROXY-HOST:PORT
    +export HTTP_PROXY=http://REPLACE-WITH-YOUR-COMPANY-PROXY-HOST:PORT
    +
  2. +
  3. +

    Source the script to set up your environment variables:

    +
    $ . ~/.bashrc
    +
  4. +
  5. +

    To implement command completion, add the following to the script:

    +
    $ [ -f /usr/share/bash-completion/bash_completion ] && . /usr/share/bash-completion/bash_completion
    +$ source <(kubectl completion bash)
    +
  6. +
  7. +

    Run kubeadm init to create the master node:

    +
    $ kubeadm init \
    +  --pod-network-cidr=$pod_network_cidr \
    +  --apiserver-advertise-address=$ip_addr \
    +  --ignore-preflight-errors=Swap  > /tmp/kubeadm-init.out 2>&1
    +
  8. +
  9. +

    Log in to the terminal with YOUR_USERID:YOUR_GROUP. Then set up the ~/.bashrc similar to steps 1 to 3 with YOUR_USERID:YOUR_GROUP.

    +
    +

    Note that from now on we will be using YOUR_USERID:YOUR_GROUP to execute any kubectl commands and not root.

    +
    +
  10. +
  11. +

    Set up YOUR_USERID:YOUR_GROUP to access the Kubernetes cluster:

    +
    $ mkdir -p $HOME/.kube
    +$ sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    +$ sudo chown $(id -u):$(id -g) $HOME/.kube/config
    +
  12. +
  13. +

    Verify that YOUR_USERID:YOUR_GROUP is set up to access the Kubernetes cluster using the kubectl command:

    +
    $ kubectl get nodes
    +
    +

    Note: At this step, the node is not in ready state as we have not yet installed the pod network add-on. After the next step, the node will show status as Ready.

    +
    +
  14. +
  15. +

    Install a pod network add-on (flannel) so that your pods can communicate with each other.

    +
    +

    Note: If you are using a different CIDR block than 10.244.0.0/16, then download and update kube-flannel.yml with the correct CIDR address before deploying into the cluster:

    +
    +
    $ wget https://raw.githubusercontent.com/flannel-io/flannel/v0.17.0/Documentation/kube-flannel.yml
    +$ ### Update the CIDR address if you are using a CIDR block other than the default 10.244.0.0/16
    +$ kubectl apply -f kube-flannel.yml
    +
  16. +
  17. +

    Verify that the master node is in Ready status:

    +
    $ kubectl get nodes
    +

    Sample output:

    +
    NAME                 STATUS   ROLES                  AGE   VERSION
    +mymasternode      Ready    control-plane,master   12h   v1.23.6
    +

    or:

    +
    $ kubectl get pods -n kube-system
    +

    Sample output:

    +
    NAME                                    READY       STATUS      RESTARTS    AGE
    +pod/coredns-86c58d9df4-58p9f                1/1         Running         0       3m59s
    +pod/coredns-86c58d9df4-mzrr5                1/1         Running         0       3m59s
    +pod/etcd-mymasternode                       1/1         Running         0       3m4s
    +pod/kube-apiserver-node                     1/1         Running         0       3m21s
    +pod/kube-controller-manager-mymasternode    1/1         Running         0       3m25s
    +pod/kube-flannel-ds-6npx4                   1/1         Running         0       49s
    +pod/kube-proxy-4vsgm                        1/1         Running         0       3m59s
    +pod/kube-scheduler-mymasternode             1/1         Running         0       2m58s
    +
  18. +
  19. +

    To schedule pods on the master node, taint the node:

    +
    $ kubectl taint nodes --all node-role.kubernetes.io/master-
    +
  20. +
+

Congratulations! Your Kubernetes cluster environment is ready to deploy your Oracle SOA Suite domain.

+

Refer to the official documentation to set up a Kubernetes cluster.

+

3. Get scripts and images

+

3.1 Set up the source code repository to deploy Oracle SOA Suite domains

+
    +
  1. +

    Create a working directory to set up the source code:

    +
    $ mkdir $HOME/soa_23.4.2
    +$ cd $HOME/soa_23.4.2
    +
  2. +
  3. +

    Download the WebLogic Kubernetes Operator source code and Oracle SOA Suite Kubernetes deployment scripts from the SOA repository. Required artifacts are available at OracleSOASuite/kubernetes.

    +
    $ git clone https://github.com/oracle/fmw-kubernetes.git
    +$ export WORKDIR=$HOME/soa_23.4.2/fmw-kubernetes/OracleSOASuite/kubernetes
    +
  4. +
+

3.2 Get required Docker images and add them to your local registry

+
    +
  1. +

    Pull the WebLogic Kubernetes Operator image:

    +
    $ docker pull ghcr.io/oracle/weblogic-kubernetes-operator:4.1.4
    +
  2. +
  3. +

    Obtain the Oracle Database image and Oracle SOA Suite Docker image from the Oracle Container Registry:

    +

    a. For first time users, to pull an image from the Oracle Container Registry, navigate to https://container-registry.oracle.com and log in using the Oracle Single Sign-On (SSO) authentication service. If you do not already have SSO credentials, you can create an Oracle Account using: +https://profile.oracle.com/myprofile/account/create-account.jspx.

    +

    Use the web interface to accept the Oracle Standard Terms and Restrictions for the Oracle software images that you intend to deploy. Your acceptance of these terms are stored in a database that links the software images to your Oracle Single Sign-On login credentials.

    +

    To obtain the image, log in to the Oracle Container Registry:

    +
    $ docker login container-registry.oracle.com
    +

    b. Find and then pull the Oracle Database image for 12.2.0.1:

    +
    $ docker pull container-registry.oracle.com/database/enterprise:12.2.0.1-slim
    +

    c. Find and then pull the prebuilt Oracle SOA Suite image 12.2.1.4 install image:

    +
    $ docker pull container-registry.oracle.com/middleware/soasuite:12.2.1.4
    +
    +

    Note: This image does not contain any Oracle SOA Suite product patches and can only be used for test and development purposes.

    +
    +
  4. +
+

4. Install the WebLogic Kubernetes Operator

+

4.1 Prepare for the WebLogic Kubernetes Operator.

+
    +
  1. +

    Create a namespace opns for the operator:

    +
    $ kubectl create namespace opns
    +
  2. +
  3. +

    Create a service account op-sa for the operator in the operator’s namespace:

    +
    $ kubectl create serviceaccount -n opns op-sa
    +
  4. +
+

4.2 Install the WebLogic Kubernetes Operator

+

Use Helm to install and start the operator from the directory you just cloned:

+
$ cd ${WORKDIR}
+$ helm install weblogic-kubernetes-operator charts/weblogic-operator \
+  --namespace opns \
+  --set image=ghcr.io/oracle/weblogic-kubernetes-operator:4.1.4 \
+  --set serviceAccount=op-sa \
+  --wait
+

4.3 Verify the WebLogic Kubernetes Operator

+
    +
  1. +

    Verify that the operator’s pod is running by listing the pods in the operator’s namespace. You should see one for the operator:

    +
    $ kubectl get pods -n opns
    +
  2. +
  3. +

    Verify that the operator is up and running by viewing the operator pod’s logs:

    +
    $ kubectl logs -n opns -c weblogic-operator deployments/weblogic-operator
    +
  4. +
+

The WebLogic Kubernetes Operator v4.1.4 has been installed. Continue with the load balancer and Oracle SOA Suite domain setup.

+

5. Install the Traefik (ingress-based) load balancer

+

The WebLogic Kubernetes Operator supports these load balancers: Traefik, NGINX, and Apache. Samples are provided in the documentation.

+

This Quick Start demonstrates how to install the Traefik ingress controller to provide load balancing for an Oracle SOA Suite domain.

+
    +
  1. +

    Create a namespace for Traefik:

    +
    $ kubectl create namespace traefik
    +
  2. +
  3. +

    Set up Helm for 3rd party services:

    +
    $ helm repo add traefik https://helm.traefik.io/traefik --force-update
    +
  4. +
  5. +

    Install the Traefik operator in the traefik namespace with the provided sample values:

    +
    $ cd ${WORKDIR}
    +$ helm install traefik traefik/traefik \
    + --namespace traefik \
    + --values charts/traefik/values.yaml \
    + --set "kubernetes.namespaces={traefik}" \
    + --set "service.type=NodePort" \
    + --wait
    +
  6. +
+

6. Create and configure an Oracle SOA Suite domain

+

6.1 Prepare for an Oracle SOA Suite domain

+
    +
  1. +

    Create a namespace that can host Oracle SOA Suite domains. Label the namespace with weblogic-operator=enabled to manage the domain.

    +
    $ kubectl create namespace soans
    +$ kubectl label namespace soans weblogic-operator=enabled
    +
  2. +
  3. +

    Create Kubernetes secrets.

    +

    a. Create a Kubernetes secret for the domain in the same Kubernetes namespace as the domain. In this example, the username is weblogic, the password is Welcome1, and the namespace is soans:

    +
    $ cd ${WORKDIR}/create-weblogic-domain-credentials
    +$ ./create-weblogic-credentials.sh \
    +     -u weblogic \
    +     -p Welcome1 \
    +     -n soans    \
    +     -d soainfra \
    +     -s soainfra-domain-credentials
    +

    b. Create a Kubernetes secret for the RCU in the same Kubernetes namespace as the domain:

    +
      +
    • Schema user : SOA1
    • +
    • Schema password : Oradoc_db1
    • +
    • DB sys user password : Oradoc_db1
    • +
    • Domain name : soainfra
    • +
    • Domain Namespace : soans
    • +
    • Secret name : soainfra-rcu-credentials
    • +
    +
    $ cd ${WORKDIR}/create-rcu-credentials
    +$ ./create-rcu-credentials.sh \
    +      -u SOA1 \
    +      -p Oradoc_db1 \
    +      -a sys \
    +      -q Oradoc_db1 \
    +      -d soainfra \
    +      -n soans \
    +      -s soainfra-rcu-credentials
    +
  4. +
  5. +

    Create the Kubernetes persistence volume and persistence volume claim.

    +

    a. Create the Oracle SOA Suite domain home directory. +Determine if a user already exists on your host system with uid:gid of 1000:0:

    +
    $ sudo getent passwd 1000
    +

    If this command returns a username (which is the first field), you can skip the following useradd command. If not, create the oracle user with useradd:

    +
    $ sudo useradd -u 1000 -g 0 oracle
    +

    Create the directory that will be used for the Oracle SOA Suite domain home:

    +
    $ sudo mkdir /scratch/k8s_dir
    +$ sudo chown -R 1000:0 /scratch/k8s_dir
    +

    b. The create-pv-pvc-inputs.yaml has the following values by default:

    +
      +
    • baseName: domain
    • +
    • domainUID: soainfra
    • +
    • namespace: soans
    • +
    • weblogicDomainStoragePath: /scratch/k8s_dir
    • +
    +

    Review and update if any changes required.

    +
    $ cd ${WORKDIR}/create-weblogic-domain-pv-pvc
    +$ vim create-pv-pvc-inputs.yaml
    +

    c. Run the create-pv-pvc.sh script to create the PV and PVC configuration files:

    +
    $ cd ${WORKDIR}/create-weblogic-domain-pv-pvc
    +$ ./create-pv-pvc.sh -i create-pv-pvc-inputs.yaml -o output
    +

    d. Create the PV and PVC using the configuration files created in the previous step:

    +
    $ kubectl create -f  output/pv-pvcs/soainfra-domain-pv.yaml
    +$ kubectl create -f  output/pv-pvcs/soainfra-domain-pvc.yaml
    +
  6. +
  7. +

    Install and configure the database for the Oracle SOA Suite domain.

    +

    This step is required only when a standalone database is not already set up and you want to use the database in a container.

    + +

    The Oracle Database Docker images are supported only for non-production use. For more details, see My Oracle Support note: Oracle Support for Database Running on Docker (Doc ID 2216342.1). For production, it is suggested to use a standalone database. This example provides steps to create the database in a container.

    +
    + +

    a. Create a secret with your desired password (for example, Oradoc_db1):

    +
    $ kubectl create secret generic oracle-db-secret --from-literal='password=Oradoc_db1'
    +

    b. Create a database in a container:

    +
    $ cd ${WORKDIR}/create-oracle-db-service
    +$ ./start-db-service.sh -a oracle-db-secret -i  container-registry.oracle.com/database/enterprise:12.2.0.1-slim -p none
    +

    Once the database is successfully created, you can use the database connection string oracle-db.default.svc.cluster.local:1521/devpdb.k8s as an rcuDatabaseURL parameter in the create-domain-inputs.yaml file.

    +

    c. Create Oracle SOA Suite schemas for the domain type (for example, soaosb).

    +

    Create a secret that contains the database’s SYSDBA username and password.

    +
    $ kubectl -n default create secret generic oracle-rcu-secret \
    +   --from-literal='sys_username=sys' \
    +   --from-literal='sys_password=Oradoc_db1' \
    +   --from-literal='password=Oradoc_db1'
    +

    To install the Oracle SOA Suite schemas, run the create-rcu-schema.sh script with the following inputs:

    +
      +
    • -s <RCU PREFIX>
    • +
    • -t <SOA domain type>
    • +
    • -d <Oracle Database URL>
    • +
    • -i <SOASuite image>
    • +
    • -n <Namespace>
    • +
    • -c <Name of credentials secret containing SYSDBA username and password and RCU schema owner password>
    • +
    • -r <Comma-separated variables>
    • +
    • -l <Timeout limit in seconds. (optional). (default: 300)>
    • +
    +

    For example:

    +
    $ cd ${WORKDIR}/create-rcu-schema
    +$ ./create-rcu-schema.sh \
    +-s SOA1 \
    +-t soaosb \
    +-d oracle-db.default.svc.cluster.local:1521/devpdb.k8s \
    +-i container-registry.oracle.com/middleware/soasuite:12.2.1.4 \
    +-n default \
    +-c oracle-rcu-secret \
    +-r SOA_PROFILE_TYPE=SMALL,HEALTHCARE_INTEGRATION=NO
    +
  8. +
+

Now the environment is ready to start the Oracle SOA Suite domain creation.

+

6.2 Create an Oracle SOA Suite domain

+
    +
  1. +

    The sample scripts for Oracle SOA Suite domain deployment are available at ${WORKDIR}/create-soa-domain/domain-home-on-pv. You must edit create-domain-inputs.yaml (or a copy of it) to provide the details for your domain.

    +

    Update create-domain-inputs.yaml with the following values for domain creation:

    +
      +
    • domainType: soaosb
    • +
    • initialManagedServerReplicas: 1
    • +
    +
    $ cd ${WORKDIR}/create-soa-domain/domain-home-on-pv/
    +
    +$ cp create-domain-inputs.yaml create-domain-inputs.yaml.orig
    +
    +$ sed -i -e "s:domainType\: soa:domainType\: soaosb:g" create-domain-inputs.yaml
    +$ sed -i -e "s:initialManagedServerReplicas\: 2:initialManagedServerReplicas\: 1:g" create-domain-inputs.yaml
    +$ sed -i -e "s:image\: soasuite\:12.2.1.4:image\: container-registry.oracle.com/middleware/soasuite\:12.2.1.4:g" create-domain-inputs.yaml
    +
  2. +
  3. +

    Run the create-domain.sh script to create a domain:

    +
    $ cd ${WORKDIR}/create-soa-domain/domain-home-on-pv/
    +$ ./create-domain.sh -i create-domain-inputs.yaml -o output
    +
  4. +
  5. +

    Create a Kubernetes domain object:

    +

    Once the create-domain.sh is successful, it generates output/weblogic-domains/soainfra/domain.yaml, which you can use to create the Kubernetes resource domain to start the domain and servers:

    +
    $ cd ${WORKDIR}/create-soa-domain/domain-home-on-pv
    +$ kubectl create -f output/weblogic-domains/soainfra/domain.yaml
    +
  6. +
  7. +

    Verify that the Kubernetes domain object named soainfra is created:

    +
    $ kubectl get domain -n soans
    +NAME       AGE
    +soainfra   3m18s
    +
  8. +
  9. +

    Once you create the domain, the introspect pod is created. This inspects the domain home and then starts the soainfra-adminserver pod. Once the soainfra-adminserver pod starts successfully, the Managed Server pods are started in parallel. +Watch the soans namespace for the status of domain creation:

    +
    $ kubectl get pods -n soans -w
    +
  10. +
  11. +

    Verify that the Oracle SOA Suite domain server pods and services are created and in Ready state:

    +
    $ kubectl get all -n soans
    +
  12. +
+

6.3 Configure Traefik to access Oracle SOA Suite domain services

+
    +
  1. +

    Configure Traefik to manage ingresses created in the Oracle SOA Suite domain namespace (soans):

    +
    $ helm upgrade traefik traefik/traefik \
    +  --reuse-values \
    +  --namespace traefik \
    +  --set "kubernetes.namespaces={traefik,soans}" \
    +  --wait
    +
  2. +
  3. +

    Create an ingress for the domain in the domain namespace by using the sample Helm chart:

    +
    $ cd ${WORKDIR}
    +$ export LOADBALANCER_HOSTNAME=$(hostname -f)
    +$ helm install soa-traefik-ingress charts/ingress-per-domain \
    +--namespace soans \
    +--values charts/ingress-per-domain/values.yaml \
    +--set "traefik.hostname=${LOADBALANCER_HOSTNAME}" \
    +--set domainType=soaosb
    +
  4. +
  5. +

    Verify the created ingress per domain details:

    +
    $ kubectl describe ingress soainfra-traefik -n soans
    +
  6. +
+

6.4 Verify that you can access the Oracle SOA Suite domain URL

+
    +
  1. +

    Get the LOADBALANCER_HOSTNAME for your environment:

    +
    $ export LOADBALANCER_HOSTNAME=$(hostname -f)
    +
  2. +
  3. +

    Verify the following URLs are available for Oracle SOA Suite domains of domain type soaosb:

    +

    Credentials:

    +

    username: weblogic +password: Welcome1

    +
    http://${LOADBALANCER_HOSTNAME}:30305/console
    +http://${LOADBALANCER_HOSTNAME}:30305/em
    +http://${LOADBALANCER_HOSTNAME}:30305/servicebus
    +http://${LOADBALANCER_HOSTNAME}:30305/soa-infra
    +http://${LOADBALANCER_HOSTNAME}:30305/soa/composer
    +http://${LOADBALANCER_HOSTNAME}:30305/integration/worklistapp
    +http://${LOADBALANCER_HOSTNAME}:30305/ess
    +http://${LOADBALANCER_HOSTNAME}:30305/EssHealthCheck
    +
  4. +
+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.2/soa-domains/appendix/soa-cluster-sizing-info/index.html b/docs/23.4.2/soa-domains/appendix/soa-cluster-sizing-info/index.html new file mode 100644 index 000000000..52ee71799 --- /dev/null +++ b/docs/23.4.2/soa-domains/appendix/soa-cluster-sizing-info/index.html @@ -0,0 +1,5558 @@ + + + + + + + + + + + + Domain resource sizing :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + Domain resource sizing +

+ + + + + + +

Oracle SOA cluster sizing minimum requirements

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Oracle SOANormal UsageModerate UsageHigh Usage
Administration ServerNo of CPU core(s) : 1, Memory : 4GBNo of CPU core(s) : 1, Memory : 4GBNo of CPU core(s) : 1, Memory : 4GB
Number of Managed Servers224
Configurations per Managed ServerNo of CPU core(s) : 2, Memory : 16GBNo of CPU core(s) : 4, Memory : 16GBNo of CPU core(s) : 6, Memory : 16-32GB
PV StorageMinimum 250GBMinimum 250GBMinimum 500GB
+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.2/soa-domains/cleanup-domain-setup/index.html b/docs/23.4.2/soa-domains/cleanup-domain-setup/index.html new file mode 100644 index 000000000..b56ea8241 --- /dev/null +++ b/docs/23.4.2/soa-domains/cleanup-domain-setup/index.html @@ -0,0 +1,5591 @@ + + + + + + + + + + + + Uninstall :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + Uninstall +

+ + + + + + +

Learn how to clean up the Oracle SOA Suite domain setup.

+

Remove the domain

+
    +
  1. +

    Remove the domain’s ingress (for example, Traefik ingress) using Helm:

    +
    $ helm uninstall soa-domain-ingress -n sample-domain1-ns
    +

    For example:

    +
    $ helm uninstall soainfra-traefik -n soans
    +
  2. +
  3. +

    Remove the domain resources by using the sample delete-weblogic-domain-resources.sh script present at ${WORKDIR}/delete-domain:

    +
    $ cd ${WORKDIR}/delete-domain
    +$ ./delete-weblogic-domain-resources.sh -d sample-domain1
    +

    For example:

    +
    $ cd ${WORKDIR}/delete-domain
    +$ ./delete-weblogic-domain-resources.sh -d soainfra
    +
  4. +
  5. +

    Use kubectl to confirm that the server pods and domain resource are deleted:

    +
    $ kubectl get pods -n sample-domain1-ns
    +$ kubectl get domains -n sample-domain1-ns
    +$ kubectl get clusters -n sample-domain1-ns
    +

    For example:

    +
    $ kubectl get pods -n soans
    +$ kubectl get domains -n soans
    +$ kubectl get clusters -n soans
    +
  6. +
+

Drop the RCU schemas

+

Follow these steps to drop the RCU schemas created for Oracle SOA Suite domains.

+

Remove the domain namespace

+
    +
  1. +

    Configure the installed ingress load balancer (for example, Traefik) to stop managing the ingresses in the domain namespace:

    +
    $ helm upgrade traefik traefik/traefik \
    +    --namespace traefik \
    +    --reuse-values \
    +    --set "kubernetes.namespaces={traefik}" \
    +    --wait
    +
  2. +
  3. +

    Delete the domain namespace:

    +
    $ kubectl delete namespace sample-domain1-ns
    +

    For example:

    +
    $ kubectl delete namespace soans
    +
  4. +
+

Remove the operator

+
    +
  1. +

    Remove the operator:

    +
    $ helm uninstall sample-weblogic-operator -n sample-weblogic-operator-ns
    +

    For example:

    +
    $ helm uninstall weblogic-kubernetes-operator -n opns
    +
  2. +
  3. +

    Remove the operator’s namespace:

    +
    $ kubectl delete namespace sample-weblogic-operator-ns
    +

    For example:

    +
    $ kubectl delete namespace opns
    +
  4. +
+

Remove the load balancer

+
    +
  1. +

    Remove the installed ingress based load balancer (for example, Traefik):

    +
    $ helm uninstall traefik -n traefik
    +
  2. +
  3. +

    Remove the Traefik namespace:

    +
    $ kubectl delete namespace traefik
    +
  4. +
+

Delete the domain home

+

To remove the domain home that is generated using the create-domain.sh script, with appropriate privileges manually delete the contents of the storage attached to the domain home persistent volume (PV).

+

For example, for the domain’s persistent volume of type host_path:

+
$ rm -rf /scratch/k8s_dir/SOA/*
+
+ +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.2/soa-domains/create-or-update-image/index.html b/docs/23.4.2/soa-domains/create-or-update-image/index.html new file mode 100644 index 000000000..97e41ef3f --- /dev/null +++ b/docs/23.4.2/soa-domains/create-or-update-image/index.html @@ -0,0 +1,6260 @@ + + + + + + + + + + + + Create or update an image :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + Create or update an image +

+ + + + + + + +

If you have access to the My Oracle Support (MOS), and there is a need to build a new image with a patch (bundle or interim), it is recommended to use the WebLogic Image Tool to build an Oracle SOA Suite image for production deployments.

+ +

Create or update an Oracle SOA Suite Docker image using the WebLogic Image Tool

+

Using the WebLogic Image Tool, you can create a new Oracle SOA Suite Docker image (can include patches as well) or update an existing image with one or more patches (bundle patch and interim patches).

+
+

Recommendations:

+
    +
  • Use create for creating a new Oracle SOA Suite Docker image either: +
      +
    • without any patches
    • +
    • or, containing the Oracle SOA Suite binaries, bundle patch and interim patches. This is the recommended approach if you have access to the Oracle SOA Suite patches because it optimizes the size of the image.
    • +
    +
  • +
  • Use update for patching an existing Oracle SOA Suite Docker image with a single interim patch. Note that the patched image size may increase considerably due to additional image layers introduced by the patch application tool.
  • +
+
+

Set up the WebLogic Image Tool

+ +
Prerequisites
+

Verify that your environment meets the following prerequisites:

+
    +
  • Docker client and daemon on the build machine, with minimum Docker version 18.03.1.ce.
  • +
  • Bash version 4.0 or later, to enable the command complete feature.
  • +
  • JAVA_HOME environment variable set to the appropriate JDK location.
  • +
+
Set up the WebLogic Image Tool
+

To set up the WebLogic Image Tool:

+
    +
  1. +

    Create a working directory and change to it. In these steps, this directory is imagetool-setup.

    +
    $ mkdir imagetool-setup
    +$ cd imagetool-setup
    +
  2. +
  3. +

    Download the latest version of the WebLogic Image Tool from the releases page.

    +
  4. +
  5. +

    Unzip the release ZIP file to the imagetool-setup directory.

    +
  6. +
  7. +

    Execute the following commands to set up the WebLogic Image Tool on a Linux environment:

    +
    $ cd imagetool-setup/imagetool/bin
    +$ source setup.sh
    +
  8. +
+
Validate setup
+

To validate the setup of the WebLogic Image Tool:

+
    +
  1. +

    Enter the following command to retrieve the version of the WebLogic Image Tool:

    +
    $ imagetool --version
    +
  2. +
  3. +

    Enter imagetool then press the Tab key to display the available imagetool commands:

    +
    $ imagetool <TAB>
    +cache   create  help    rebase  update
    +
  4. +
+
WebLogic Image Tool build directory
+

The WebLogic Image Tool creates a temporary Docker context directory, prefixed by wlsimgbuilder_temp, every time the tool runs. Under normal circumstances, this context directory will be deleted. However, if the process is aborted or the tool is unable to remove the directory, it is safe for you to delete it manually. By default, the WebLogic Image Tool creates the Docker context directory under the user’s home directory. If you prefer to use a different directory for the temporary context, set the environment variable WLSIMG_BLDDIR:

+
$ export WLSIMG_BLDDIR="/path/to/buid/dir"
+
WebLogic Image Tool cache
+

The WebLogic Image Tool maintains a local file cache store. This store is used to look up where the Java, WebLogic Server installers, and WebLogic Server patches reside in the local file system. By default, the cache store is located in the user’s $HOME/cache directory. Under this directory, the lookup information is stored in the .metadata file. All automatically downloaded patches also reside in this directory. You can change the default cache store location by setting the environment variable WLSIMG_CACHEDIR:

+
$ export WLSIMG_CACHEDIR="/path/to/cachedir"
+
Set up additional build scripts
+

Creating an Oracle SOA Suite Docker image using the WebLogic Image Tool requires additional container scripts for Oracle SOA Suite domains.

+
    +
  1. +

    Clone the docker-images repository to set up those scripts. In these steps, this directory is DOCKER_REPO:

    +
    $ cd imagetool-setup
    +$ git clone https://github.com/oracle/docker-images.git
    +
  2. +
  3. +

    Copy the additional WebLogic Image Tool build files from the operator source repository to the imagetool-setup location:

    +
    $ mkdir -p imagetool-setup/docker-images/OracleSOASuite/imagetool/12.2.1.4.0
    +$ cd imagetool-setup/docker-images/OracleSOASuite/imagetool/12.2.1.4.0
    +$ cp -rf ${WORKDIR}/imagetool-scripts/* .
    +
  4. +
+
+

Note: If you want to create the image continue with the following steps, otherwise to update the image see update an image.

+
+

Create an image

+

After setting up the WebLogic Image Tool and required build scripts, follow these steps to use the WebLogic Image Tool to create a new Oracle SOA Suite Docker image.

+
Download the Oracle SOA Suite installation binaries and patches
+

You must download the required Oracle SOA Suite installation binaries and patches as listed below from the Oracle Software Delivery Cloud and save them in a directory of your choice. In these steps, this directory is download location.

+

The installation binaries and patches required for release 23.4.2 are:

+
    +
  • +

    JDK:

    +
      +
    • jdk-8u391-linux-x64.tar.gz
    • +
    +
  • +
  • +

    Fusion Middleware Infrastructure installer:

    +
      +
    • fmw_12.2.1.4.0_infrastructure.jar
    • +
    +
  • +
  • +

    Oracle SOA Suite installers:

    +
      +
    • fmw_12.2.1.4.0_soa.jar
    • +
    • fmw_12.2.1.4.0_osb.jar
    • +
    • fmw_12.2.1.4.0_b2bhealthcare.jar
    • +
    +
  • +
+ +

In this release, Oracle B2B is not supported to be configured, but the installer is required for completeness.

+
+ +
    +
  • +

    Fusion Middleware Infrastructure patches:

    +
      +
    • p28186730_1394214_Generic.zip (OPATCH 13.9.4.2.14 FOR EM 13.4, 13.5 AND FMW/WLS 12.2.1.3.0, 12.2.1.4.0 AND 14.1.1.0.0)
    • +
    • p35893811_122140_Generic.zip (WLS PATCH SET UPDATE 12.2.1.4.231010)
    • +
    • p35882299_122140_Generic.zip (FMW Thirdparty Bundle Patch 12.2.1.4.231006)
    • +
    • p33950717_122140_Generic.zip (OPSS BUNDLE PATCH 12.2.1.4.220311)
    • +
    • p35868571_122140_Generic.zip (OWSM BUNDLE PATCH 12.2.1.4.231003)
    • +
    • p35735469_122140_Generic.zip (ADF BUNDLE PATCH 12.2.1.4.230823)
    • +
    • p35778804_122140_Generic.zip (Coherence 12.2.1.4 Cumulative Patch 19 (12.2.1.4.19))
    • +
    • p33093748_122140_Generic.zip (FMW PLATFORM 12.2.1.4.0 SPU FOR APRCPU2021)
    • +
    • p35476067_122140_Linux-x86-64.zip (ADR FOR WEBLOGIC SERVER 12.2.1.4.0 CPU OCT 2023)
    • +
    • p32720458_122140_Generic.zip (JDBC One Off)
    • +
    • p35671137_122140_Generic.zip (RDA release 23.4-20231017 for FMW 12.2.1.4.0)
    • +
    • p35751917_122140_Generic.zip (WebCenter Core Bundle Patch 12.2.1.4.230827 )
    • +
    • p34065178_122140_Generic.zip (OVD One Off)
    • +
    • p34542329_122140_Generic.zip (EM One Off)
    • +
    • p34765492_122140_Generic.zip (EM One Off)
    • +
    • p35474754_122140_Generic.zip (WLS One Off)
    • +
    • p34809489_122140_Generic.zip (JDEV One Off)
    • +
    +
  • +
  • +

    Oracle SOA Suite and Oracle Service Bus patches

    +
      +
    • p35748499_122140_Generic.zip (SOA Bundle Patch 12.2.1.4.230827)
    • +
    • p35815693_122140_Generic.zip (OSB BUNDLE PATCH 12.2.1.4.230915)
    • +
    • p33404495_122140_Generic.zip (SOA One-off)
    • +
    • p32827327_122140_Generic.zip (OSB One-off)
    • +
    • p32808126_122140_Generic.zip (SOA/ESS One-off)
    • +
    • p31713053_122140_Linux-x86-64.zip (One-off patch)
    • +
    +
  • +
+
Update required build files
+

The following files in the code repository location <imagetool-setup-location>/docker-images/OracleSOASuite/imagetool/12.2.1.4.0 are used for creating the image:

+
    +
  • additionalBuildCmds.txt
  • +
  • buildArgs
  • +
+
    +
  1. +

    In the buildArgs file, update all occurrences of %DOCKER_REPO% with the docker-images repository location, which is the complete path of <imagetool-setup-location>/docker-images.

    +

    For example, update:

    +

    %DOCKER_REPO%/OracleSOASuite/imagetool/12.2.1.4.0/

    +

    to:

    +

    <imagetool-setup-location>/docker-images/OracleSOASuite/imagetool/12.2.1.4.0/

    +
  2. +
  3. +

    Similarly, update the placeholders %JDK_VERSION% and %BUILDTAG% with appropriate values.

    +
  4. +
  5. +

    Update the response file <imagetool-setup-location>/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file to add the parameter INSTALL_TYPE="Fusion Middleware Infrastructure" in the [GENERIC] section.

    +
  6. +
+
Create the image
+
    +
  1. +

    Add a JDK package to the WebLogic Image Tool cache:

    +
    $ imagetool cache addInstaller --type jdk --version 8u391 --path <download location>/jdk-8u391-linux-x64.tar.gz
    +
  2. +
  3. +

    Add the downloaded installation binaries to the WebLogic Image Tool cache:

    +
    $ imagetool cache addInstaller --type fmw --version 12.2.1.4.0 --path <download location>/fmw_12.2.1.4.0_infrastructure.jar
    +
    +$ imagetool cache addInstaller --type soa --version 12.2.1.4.0 --path <download location>/fmw_12.2.1.4.0_soa.jar
    +
    +$ imagetool cache addInstaller --type osb --version 12.2.1.4.0 --path <download location>/fmw_12.2.1.4.0_osb.jar
    +
    +$ imagetool cache addInstaller --type b2b --version 12.2.1.4.0 --path <download location>/fmw_12.2.1.4.0_b2bhealthcare.jar
    +
    +
  4. +
  5. +

    Add the downloaded OPatch patch to the WebLogic Image Tool cache:

    +
    $ imagetool cache addEntry --key 28186730_13.9.4.2.14 --value <download location>/p28186730_1394214_Generic.zip
    +
  6. +
  7. +

    Append the --opatchBugNumber flag and the OPatch patch key to the create command in the buildArgs file:

    +
    --opatchBugNumber 28186730_13.9.4.2.14
    +
  8. +
  9. +

    Add the downloaded product patches to the WebLogic Image Tool cache:

    +
    $ imagetool cache addEntry --key 31713053_12.2.1.4.0 --value <download location>/p31713053_122140_Linux-x86-64.zip
    +
    +$ imagetool cache addEntry --key 32720458_12.2.1.4.0 --value <download location>/p32720458_122140_Generic.zip
    +
    +$ imagetool cache addEntry --key 32808126_12.2.1.4.0 --value <download location>/p32808126_122140_Generic.zip
    +
    +$ imagetool cache addEntry --key 32827327_12.2.1.4.0 --value <download location>/p32827327_122140_Generic.zip
    +
    +$ imagetool cache addEntry --key 33093748_12.2.1.4.0 --value <download location>/p33093748_122140_Generic.zip
    +
    +$ imagetool cache addEntry --key 33404495_12.2.1.4.0 --value <download location>/p33404495_122140_Generic.zip
    +
    +$ imagetool cache addEntry --key 33950717_12.2.1.4.0 --value <download location>/p33950717_122140_Generic.zip
    +
    +$ imagetool cache addEntry --key 34065178_12.2.1.4.0 --value <download location>/p34065178_122140_Generic.zip
    +
    +$ imagetool cache addEntry --key 34542329_12.2.1.4.0 --value <download location>/p34542329_122140_Generic.zip
    +
    +$ imagetool cache addEntry --key 34765492_12.2.1.4.0 --value <download location>/p34765492_122140_Generic.zip
    +
    +$ imagetool cache addEntry --key 34809489_12.2.1.4.0 --value <download location>/p34809489_122140_Generic.zip
    +
    +$ imagetool cache addEntry --key 35474754_12.2.1.4.0 --value <download location>/p35474754_122140_Generic.zip
    +
    +$ imagetool cache addEntry --key 35476067_12.2.1.4.0 --value <download location>/p35476067_122140_Linux-x86-64.zip
    +
    +$ imagetool cache addEntry --key 35671137_12.2.1.4.0 --value <download location>/p35671137_122140_Generic.zip
    +
    +$ imagetool cache addEntry --key 35735469_12.2.1.4.0 --value <download location>/p35735469_122140_Generic.zip
    +
    +$ imagetool cache addEntry --key 35748499_12.2.1.4.0 --value <download location>/p35748499_122140_Generic.zip
    +
    +$ imagetool cache addEntry --key 35751917_12.2.1.4.0 --value <download location>/p35751917_122140_Generic.zip
    +
    +$ imagetool cache addEntry --key 35778804_12.2.1.4.0 --value <download location>/p35778804_122140_Generic.zip
    +
    +$ imagetool cache addEntry --key 35815693_12.2.1.4.0 --value <download location>/p35815693_122140_Generic.zip
    +
    +$ imagetool cache addEntry --key 35868571_12.2.1.4.0 --value <download location>/p35868571_122140_Generic.zip
    +
    +$ imagetool cache addEntry --key 35882299_12.2.1.4.0 --value <download location>/p35882299_122140_Generic.zip
    +
    +$ imagetool cache addEntry --key 35893811_12.2.1.4.0 --value <download location>/p35893811_122140_Generic.zip
    +
    +
  10. +
  11. +

    Append the --patches flag and the product patch keys to the create command in the buildArgs file. The --patches list must be a comma-separated collection of patch --key values used in the imagetool cache addEntry commands above.

    +

    Sample --patches list for the product patches added in to the cache:

    +
    --patches 31713053_12.2.1.4.0,32720458_12.2.1.4.0,32808126_12.2.1.4.0,32827327_12.2.1.4.0,33093748_12.2.1.4.0,33404495_12.2.1.4.0,33950717_12.2.1.4.0,34065178_12.2.1.4.0,34542329_12.2.1.4.0,34765492_12.2.1.4.0,34809489_12.2.1.4.0,35474754_12.2.1.4.0,35476067_12.2.1.4.0,35671137_12.2.1.4.0,35735469_12.2.1.4.0,35748499_12.2.1.4.0,35751917_12.2.1.4.0,35778804_12.2.1.4.0,35815693_12.2.1.4.0,35868571_12.2.1.4.0,35882299_12.2.1.4.0,35893811_12.2.1.4.0
    +

    Example buildArgs file after appending the OPatch patch and product patches:

    +
    create
    +--jdkVersion 8u391
    +--type soa_osb_b2b
    +--version 12.2.1.4.0
    +--tag oracle/soasuite:12.2.1.4.0
    +--pull
    +--fromImage ghcr.io/oracle/oraclelinux:7-slim
    +--chown oracle:root
    +--additionalBuildCommands <imagetool-setup-location>/docker-images/OracleSOASuite/imagetool/12.2.1.4.0/additionalBuildCmds.txt
    +--additionalBuildFiles <imagetool-setup-location>/docker-images/OracleSOASuite/dockerfiles/12.2.1.4/container-scripts
    +--installerResponseFile <imagetool-setup-location>/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file,<imagetool-setup-location>/docker-images/OracleSOASuite/dockerfiles/12.2.1.4/install/soasuite.response,<imagetool-setup-location>/docker-images/OracleSOASuite/dockerfiles/12.2.1.4/install/osb.response,<imagetool-setup-location>/docker-images/OracleSOASuite/dockerfiles/12.2.1.4/install/b2b.response
    +--patches 31713053_12.2.1.4.0,32720458_12.2.1.4.0,32808126_12.2.1.4.0,32827327_12.2.1.4.0,33093748_12.2.1.4.0,33404495_12.2.1.4.0,33950717_12.2.1.4.0,34065178_12.2.1.4.0,34542329_12.2.1.4.0,34765492_12.2.1.4.0,34809489_12.2.1.4.0,35474754_12.2.1.4.0,35476067_12.2.1.4.0,35671137_12.2.1.4.0,35735469_12.2.1.4.0,35748499_12.2.1.4.0,35751917_12.2.1.4.0,35778804_12.2.1.4.0,35815693_12.2.1.4.0,35868571_12.2.1.4.0,35882299_12.2.1.4.0,35893811_12.2.1.4.0
    +
    +

    Note: In the buildArgs file:

    +
      +
    • --jdkVersion value must match the --version value used in the imagetool cache addInstaller command for --type jdk.
    • +
    • --version value must match the --version value used in the imagetool cache addInstaller command for --type soa.
    • +
    • --pull always pulls the latest base Linux image oraclelinux:7-slim from the Docker registry. This flag can be removed if you want to use the Linux image oraclelinux:7-slim, which is already available on the host where the SOA image is created.
    • +
    +
    +

    Refer to this page for the complete list of options available with the WebLogic Image Tool create command.

    +
  12. +
  13. +

    Create the Oracle SOA Suite image:

    +
    $ imagetool @<absolute path to buildargs file>
    +
    +

    Note: Make sure that the absolute path to the buildargs file is prepended with a @ character, as shown in the example above.

    +
    +

    For example:

    +
    $ imagetool @<imagetool-setup-location>/docker-images/OracleSOASuite/imagetool/12.2.1.4.0/buildArgs
    +
    +
    +
    + + + + + Click here to see the sample Dockerfile generated with the `imagetool` command. + + +
    + +
    +
  14. +
  15. +

    Check the created image using the docker images command:

    +
      $ docker images | grep soasuite
    +
  16. +
+

Update an image

+

After setting up the WebLogic Image Tool and required build scripts, use the WebLogic Image Tool to update an existing Oracle SOA Suite Docker image:

+
    +
  1. +

    Enter the following command to add the OPatch patch to the WebLogic Image Tool cache:

    +
    $ imagetool cache addEntry --key 28186730_13.9.4.2.13 --value <download location>/p28186730_1394213_Generic.zip
    +
  2. +
  3. +

    Execute the imagetool cache addEntry command for each patch to add the required patch(es) to the WebLogic Image Tool cache. For example, to add patch p30761841_122140_Generic.zip:

    +
    $ imagetool cache addEntry --key=30761841_12.2.1.4.0 --value <downloaded-patches-location>/p30761841_122140_Generic.zip
    +
  4. +
  5. +

    Provide the following arguments to the WebLogic Image Tool update command:

    +
      +
    • –-fromImage - Identify the image that needs to be updated. In the example below, the image to be updated is soasuite:12.2.1.4.
    • +
    • –-patches - Multiple patches can be specified as a comma-separated list.
    • +
    • --tag - Specify the new tag to be applied for the image being built.
    • +
    +

    Refer here for the complete list of options available with the WebLogic Image Tool update command.

    +
    +

    Note: The WebLogic Image Tool cache should have the latest OPatch zip. The WebLogic Image Tool will update the OPatch if it is not already updated in the image.

    +
    +
    Examples
    + +
    +
    + + + + + Click here to see the example 'update' command: + + +
    + +
    + +
    +
    + + + + + Click here to see the example Dockerfile generated by the WebLogic Image Tool with the '--dryRun' option: + + +
    + +
    +
  6. +
  7. +

    Check the built image using the docker images command:

    +
      $ docker images | grep soasuite
    +  soasuite   12.2.1.4-30761841
    +  2ef2a67a685b        About a minute ago   4.84GB
    +  $
    +
  8. +
+

Create an Oracle SOA Suite Docker image using Dockerfile

+

For test and development purposes, you can create an Oracle SOA Suite image using the Dockerfile. Consult the README file for important prerequisite steps, +such as building or pulling the Server JRE Docker image, Oracle FMW Infrastructure Docker image, and downloading the Oracle SOA Suite installer and bundle patch binaries.

+

A prebuilt Oracle Fusion Middleware Infrastructure image, container-registry.oracle.com/middleware/fmw-infrastructure:12.2.1.4, is available at container-registry.oracle.com. We recommend that you pull and rename this image to build the Oracle SOA Suite image.

+
  $ docker pull container-registry.oracle.com/middleware/fmw-infrastructure:12.2.1.4
+  $ docker tag container-registry.oracle.com/middleware/fmw-infrastructure:12.2.1.4  oracle/fmw-infrastructure:12.2.1.4.0
+

Follow these steps to build an Oracle Fusion Middleware Infrastructure image, and then the Oracle SOA Suite image as a layer on top of that:

+
    +
  1. +

    Make a local clone of the sample repository:

    +
    $ git clone https://github.com/oracle/docker-images
    +
  2. +
  3. +

    Build the oracle/fmw-infrastructure:12.2.1.4 image:

    +
     $ cd docker-images/OracleFMWInfrastructure/dockerfiles
    + $ sh buildDockerImage.sh -v 12.2.1.4 -s
    +

    This will produce an image named oracle/fmw-infrastructure:12.2.1.4.

    +
  4. +
  5. +

    Tag the image as follows:

    +
      $ docker tag oracle/fmw-infrastructure:12.2.1.4  oracle/fmw-infrastructure:12.2.1.4.0
    +
  6. +
  7. +

    Download the Oracle SOA Suite installer from the Oracle Technology Network or e-delivery.

    +
    +

    Note: Copy the installer binaries to the same location as the Dockerfile.

    +
    +
  8. +
  9. +

    To build the Oracle SOA Suite image with patches, you must download and drop the patch zip files (for example, p29928100_122140_Generic.zip) into the patches/ folder under the version that is required. For example, for 12.2.1.4.0 the folder is 12.2.1.4/patches. Similarly, to build the image by including the OPatch patch, download and drop the OPatch patch zip file (for example, p28186730_1394211_Generic.zip) into the opatch_patch/ folder.

    +
  10. +
  11. +

    Create the Oracle SOA Suite image by running the provided script:

    +
    $ cd docker-images/OracleSOASuite/dockerfiles
    +$ ./buildDockerImage.sh -v 12.2.1.4 -s
    +

    The image produced will be named oracle/soasuite:12.2.1.4. The samples and instructions assume the Oracle SOA Suite image is named soasuite:12.2.1.4. You must rename your image to match this name, or update the samples to refer to the image you created.

    +
    $ docker tag oracle/soasuite:12.2.1.4 soasuite:12.2.1.4
    +
  12. +
+ + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.2/soa-domains/create-or-update-image/index.xml b/docs/23.4.2/soa-domains/create-or-update-image/index.xml new file mode 100644 index 000000000..570677ae7 --- /dev/null +++ b/docs/23.4.2/soa-domains/create-or-update-image/index.xml @@ -0,0 +1,15 @@ + + + + Create or update an image on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.2/soa-domains/create-or-update-image/ + Recent content in Create or update an image on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + Thu, 18 Apr 2019 06:46:23 -0500 + + + + + + \ No newline at end of file diff --git a/docs/23.4.2/soa-domains/edg-guide/index.html b/docs/23.4.2/soa-domains/edg-guide/index.html new file mode 100644 index 000000000..307144c9d --- /dev/null +++ b/docs/23.4.2/soa-domains/edg-guide/index.html @@ -0,0 +1,5581 @@ + + + + + + + + + + + + Enterprise Deployment Guide :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + Enterprise Deployment Guide +

+ + + + + + + + +

This Enterprise Deployment Guide is for preview release.

+
+ +

This section provides information on the Enterprise Deployment Guide (single data center reference in Maximum Availability Architectures Guide).

+ + + + + + + +
    + + + + + + + + + + + + + + + + + +

    +Topology +

    + + + + + +

    Topology for Oracle SOA Suite Enterprise Deployment on Kubernetes.

    + + + + + + + + + + + + +

    +Setup steps +

    + + + + + +

    Set up Oracle SOA Suite Enterprise Deployment on Kubernetes.

    + + + + + + + + +
+ + + + + + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.2/soa-domains/edg-guide/index.xml b/docs/23.4.2/soa-domains/edg-guide/index.xml new file mode 100644 index 000000000..fe9e42f70 --- /dev/null +++ b/docs/23.4.2/soa-domains/edg-guide/index.xml @@ -0,0 +1,34 @@ + + + + Enterprise Deployment Guide on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.2/soa-domains/edg-guide/ + Recent content in Enterprise Deployment Guide on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + Wed, 22 Jun 2022 15:44:42 -0500 + + + + + + Topology + /fmw-kubernetes/23.4.2/soa-domains/edg-guide/topology/ + Wed, 22 Jun 2022 15:44:42 -0500 + + /fmw-kubernetes/23.4.2/soa-domains/edg-guide/topology/ + Assumptions The most relevant assumptions for the Oracle SOA Suite Kubernetes Enterprise Deployment Guide (EDG) topology are related to the database and web tiers. Typical on-premises production systems keep their high end database (such as RAC, RAC+DG, Exadata, Autonomous Database) out of the Kubernetes cluster and manage that tier separately. This implies that the database runs separately from the Kubernetes cluster hosting the application tier. The database provisioning and configuration process is out of the scope of the Oracle SOA Suite Kubernetes setup. + + + + Setup steps + /fmw-kubernetes/23.4.2/soa-domains/edg-guide/setup-edg/ + Wed, 22 Jun 2022 15:44:42 -0500 + + /fmw-kubernetes/23.4.2/soa-domains/edg-guide/setup-edg/ + This section provides recommended steps to set up Oracle SOA Suite enterprise deployment on Kubernetes to eliminate single points of failure and to provide high availability. + Set up your Kubernetes cluster Prepare the environment Configure firewalls and network Load Oracle SOA Suite images on all the worker nodes Enable a shared storage location for the persistent volume Create a database and the appropriate database services Install and configure Oracle HTTP Server in the DMZ Configure a front-end load balancer Create worker nodes Apply operating system changes for Coherence Deploy WebLogic Kubernetes Operator and Oracle SOA Suite Configure redundant persistent volume Configure the required priority for mounts Set front-end addresses Enable FAN for GridLink data sources Configure ASM Configure coredns allocation Adjust server&rsquo;s pods Liveness Probe Set up your Kubernetes cluster Prepare the environment for the Kubernetes control plane (Master nodes) Create the L4/TCP listener for the load balancer (LBR). + + + + \ No newline at end of file diff --git a/docs/23.4.2/soa-domains/edg-guide/setup-edg/index.html b/docs/23.4.2/soa-domains/edg-guide/setup-edg/index.html new file mode 100644 index 000000000..7dd4a84b9 --- /dev/null +++ b/docs/23.4.2/soa-domains/edg-guide/setup-edg/index.html @@ -0,0 +1,6202 @@ + + + + + + + + + + + + Setup steps :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + +
+ +
+ +
+ +

+ + Setup steps +

+ + + + + + +

This section provides recommended steps to set up Oracle SOA Suite enterprise deployment on Kubernetes to eliminate single points of failure and to provide high availability.

+
    +
  1. Set up your Kubernetes cluster
  2. +
  3. Prepare the environment + +
  4. +
  5. Create a database and the appropriate database services
  6. +
  7. Install and configure Oracle HTTP Server in the DMZ
  8. +
  9. Configure a front-end load balancer
  10. +
  11. Create worker nodes
  12. +
  13. Apply operating system changes for Coherence
  14. +
  15. Deploy WebLogic Kubernetes Operator and Oracle SOA Suite
  16. +
  17. Configure redundant persistent volume
  18. +
  19. Configure the required priority for mounts
  20. +
  21. Set front-end addresses
  22. +
  23. Enable FAN for GridLink data sources
  24. +
  25. Configure ASM
  26. +
  27. Configure coredns allocation
  28. +
  29. Adjust server’s pods Liveness Probe
  30. +
+

Set up your Kubernetes cluster

+

Prepare the environment for the Kubernetes control plane (Master nodes)

+
    +
  • +

    Create the L4/TCP listener for the load balancer (LBR).

    +
  • +
  • +

    Create the LBR backend pool with the list of control plane nodes that will be added (do not use IPs, always use hostnames).

    +
    +

    Note: We recommend maintaining the values of the following kube-api backend pool parameters within the prescribed range to minimize the downtime while restarting the Kubernetes control pane or performing maintenance operations.

    +
    +
      +
    • Healthcheck interval : Within 1000 milliseconds
    • +
    • Healthcheck timeout : Within 900 milliseconds
    • +
    +
  • +
  • +

    Enable the L4 LBR to route to the backend set/pool.

    +
    +

    Note: It is important that this is an L4/TCP listener, not an HTTP/HTTPS listener.

    +
    +
  • +
  • +

    Make sure that the nodes are in ready state.

    +
  • +
  • +

    Create an ssh key (use a common ssh key to enable access from the node executing the setup to the control plane nodes).

    +
  • +
  • +

    Allow traffic in intermediate firewalls between control plane nodes and the front-end LBR. Refer to the Kubernetes documentation for the required ports.

    +
  • +
+

Set up Master nodes

+

Refer to the README to set up the master nodes.

+
+

Note: It is recommended to set up the control plane (Master) with three nodes. See the Topology for more details.

+
+

Prepare the environment

+

Configure firewalls and network

+
    +
  • Allow traffic from the load balancer (LBR) to the Oracle HTTP Server (OHS) port that will be configured (7777 by default for OHS).
  • +
  • Allow traffic from the OHS to the node port that will be configured in the worker nodes for the Administration Server (30701), SOA cluster (30801), and Service Bus cluster (30901).
  • +
  • Allow traffic from the worker nodes to the control plane front-end kube-api virtual server port and also to the front-end Oracle SOA Suite.
  • +
  • Allow traffic from worker nodes to the database listener and ONS port (1521 and 6200 by default, respectively).
  • +
+

You can use the Enterprise Deployment Guide for Oracle SOA Suite on-premise as a reference.

+

Load Oracle SOA Suite images on all the worker nodes

+

Refer to Obtain the Oracle SOA Suite Docker image to load the images on each worker node and tag appropriately.

+

Enable a shared storage location for the persistent volume

+

A shared storage device must be used from the different worker nodes. This storage hosts the Oracle SOA Suite domain directory. Initially, a single storage location is used to create a persistent volume that will host the Oracle SOA Suite domain. Mount this shared storage (NFS/NAS) from all the worker nodes using the same mount path in all of them.

+

For example, mount NFS1 (10.10.0.21:/k8nfs) in all the worker nodes to a share directory /k8nfs:

+
$ grep "k8nfs nfs"  /etc/fstab
+10.10.0.21:/k8nfs /k8nfs nfs rw,relatime,vers=3,rsize=1048576,wsize=1048576,namlen=255,hard,proto=tcp,timeo=600,retrans=2,sec=sys,mountaddr=10.10.0.21,mountvers=3,mountport=2048,mountproto=udp,local_lock=none,addr=10.10.0.21
+

Later, steps are provided to configure a second storage location for high availability.

+

Create a database and the appropriate database services

+

The installation and creation of the RAC database is out of the scope of this document. Once the database is configured, the appropriate services must be created to access the schemas from the middle tier. It is critical that a precise non-default/administration service is created for Oracle SOA Suite. Refer to Preparing the Database for an Enterprise Deployment in the Enterprise Deployment Guide for Oracle SOA Suite 12.2.1.4.

+

Install and configure Oracle HTTP Server in the DMZ

+

Follow the steps in the Enterprise Deployment Guide for Oracle SOA Suite to create two Oracle HTTP Server (OHS) instances in separate nodes from the worker nodes. To configure OHS with the back-end Kubernetes Oracle SOA Suite or Service Bus servers, you must use a port in the range of 30000 - 32767. You will use this port in the Oracle SOA Suite configuration scripts later on.

+ +

In this Kubernetes Enterprise Deployment Guide, OHS routes to node ports configured for each separate Oracle SOA Suite/Service Bus cluster in the SOA domain. OHS then routes to these node ports, which redirect to the pertaining server pods. The OHS directive for the configuration must disable DynamicServerList because the node ports are not really WebLogic listeners and it is the node port configuration that maintains an intelligent list of available WebLogic servers. The OHS directive for the soa-infra mount in OHS looks like this:

+
<Location /soa-infra>
+  WLSRequest ON
+  DynamicServerList OFF
+  WebLogicCluster workernode1:30801,workernode2:30801,workernode3:30801
+  WLProxySSL OFF
+  WLProxySSLPassThrough OFF
+</Location>
+

Similarly, the other directives for other paths should reflect similar node port addresses.

+

Configure a front-end load balancer

+

You can either use BigIp F5 LBR or any standard LBR, such as CISCO. Refer to the Enterprise Deployment Guide for Oracle SOA Suite for the required virtual servers: Preparing the Load Balancer and Firewalls for an Enterprise Deployment. The on-premises Enterprise Deployment Guide provides a detailed list of virtual servers/listeners that can be used for optimum isolation of services and traffic. For Kubernetes, at a minimum you should have a virtual server/listener for Oracle SOA Suite using the OHS listeners as back-end pool.

+
    +
  • +

    Create the load balancer’s L7/http listener.

    +
  • +
  • +

    Create a back-end pool with the list of OHS nodes/ports that will be used by Oracle SOA Suite (do not use IPs, always use hostnames).

    +
  • +
  • +

    Enable the L7/http listener load balancer to route to the OHS back-end set/pool.

    +
  • +
  • +

    Configure the front-end load balancer to route to the OHS pool.

    +
  • +
+

Create worker nodes

+

Refer to Set up Worker nodes for details.

+

Apply operating system changes for Coherence

+

Coherence requires specific settings to create clusters in a Kubernetes environment. Refer to the steps provided in the WebLogic Kubernetes Operator documentation.

+

Deploy WebLogic Kubernetes Operator and Oracle SOA Suite

+

The steps to deploy WebLogic Kubernetes Operator and the Oracle SOA Suite domain are automated with the scripts. Refer to the README for details.

+

After successful Oracle SOA Suite domain creation and starting the servers, check the pods and the different services created. Once the Oracle SOA Suite managed servers reach RUNNING state (the pods are ready), check typical Oracle SOA Suite URLs using the front-end load balancer:

+

SOA pods and services deployed and ready:

+
$ kubectl get all -n soans
+NAME                                                 READY   STATUS      RESTARTS   AGE
+pod/soaedgdomain-adminserver                         1/1     Running     0          47h
+pod/soaedgdomain-create-soa-infra-domain-job-6pq9z   0/1     Completed   0          68d
+pod/soaedgdomain-soa-server1                         1/1     Running     0          2d2h
+pod/soaedgdomain-soa-server2                         1/1     Running     0          2d2h
+
+NAME                                                 TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                          AGE
+service/soaedgdomain-adminserver                     ClusterIP   None             <none>        30012/TCP,7001/TCP               2d4h
+service/soaedgdomain-adminserver-ext                 NodePort    10.104.20.22     <none>        30012:30012/TCP,7001:30701/TCP   31d
+service/soaedgdomain-cluster-osb-cluster             ClusterIP   10.100.97.127    <none>        9001/TCP                         68d
+service/soaedgdomain-cluster-soa-cluster             ClusterIP   10.101.101.113   <none>        8001/TCP                         68d
+service/soaedgdomain-cluster-soa-cluster-node-port   NodePort    10.105.51.223    <none>        8001:30801/TCP                   68d
+service/soaedgdomain-osb-server1                     ClusterIP   10.110.81.153    <none>        9001/TCP                         2d4h
+service/soaedgdomain-osb-server2                     ClusterIP   10.103.220.112   <none>        9001/TCP                         2d4h
+service/soaedgdomain-osb-server3                     ClusterIP   10.97.50.117     <none>        9001/TCP                         2d4h
+service/soaedgdomain-osb-server4                     ClusterIP   10.98.48.247     <none>        9001/TCP                         2d4h
+service/soaedgdomain-osb-server5                     ClusterIP   10.102.137.176   <none>        9001/TCP                         2d4h
+service/soaedgdomain-soa-server1                     ClusterIP   None             <none>        8001/TCP                         2d4h
+service/soaedgdomain-soa-server2                     ClusterIP   None             <none>        8001/TCP                         2d4h
+service/soaedgdomain-soa-server3                     ClusterIP   10.105.108.74    <none>        8001/TCP                         2d4h
+service/soaedgdomain-soa-server4                     ClusterIP   10.109.191.102   <none>        8001/TCP                         2d4h
+service/soaedgdomain-soa-server5                     ClusterIP   10.107.2.99      <none>        8001/TCP                         2d4h
+
+NAME                                                 COMPLETIONS   DURATION   AGE
+job.batch/soaedgdomain-create-soa-infra-domain-job   1/1           4m24s      68d
+

Configure redundant persistent volume

+

To increase the flexibility in moving Oracle SOA Suite or Service Bus pods around in the Kubernetes cluster, we use node selectors where odd server pods (soa_server1, soa_server3, soa_server5, and so on) are assigned to node selector 1 and even server pods (soa_server2, soa_server4, soa_server6, and so on) are assigned to node selector 2. The resulting configuration is:

+

Redundant PV

+

To use this configuration, follow these steps:

+
    +
  • +

    Stop the Oracle SOA Suite domain. Refer to Scripts to start and stop a domain.

    +
  • +
  • +

    Mount NFS1 in all even worker nodes and NFS2 in all odd worker nodes as in the diagram above. For example:

    +
    MOUNT ON ODD NODE
    +[opc@olk8-w1 ~]$ grep "k8nfs nfs"  /etc/fstab
    +10.10.0.21:/k8nfs /k8nfs nfs rw,relatime,vers=3,rsize=1048576,wsize=1048576,namlen=255,hard,proto=tcp,timeo=600,retrans=2,sec=sys,mountaddr=10.10.0.21,mountvers=3,mountport=2048,mountproto=udp,local_lock=none,addr=10.10.0.21
    +
    +MOUNT ON EVEN NODE
    +[opc@olk8-w2 ~]$  grep "k8nfs nfs"  /etc/fstab
    +10.10.0.27:/k8nfs2 /k8nfs nfs rw,relatime,vers=3,rsize=1048576,wsize=1048576,namlen=255,hard,proto=tcp,timeo=600,retrans=2,sec=sys,mountaddr=10.10.0.27,mo  untvers=3,mountport=2048,mountproto=udp,local_lock=none,addr=10.10.0.27
    +
    +MOUNT ON ODD NODE
    +[opc@olk8-w3 ~]$  grep "k8nfs nfs"  /etc/fstab
    +10.10.0.21:/k8nfs /k8nfs nfs rw,relatime,vers=3,rsize=1048576,wsize=1048576,namlen=255,hard,proto=tcp,timeo=600,retrans=2,sec=sys,mountaddr=10.10.0.21,mountvers=3,  mountport=2048,mountproto=udp,local_lock=none,addr=10.10.0.21
    +
  • +
  • +

    Copy the domain mount to an NFS replica, NFS2 (this can be done through a snapshot or through direct sftp/secure copy).

    +

    For example, if the domain is deployed in /k8nfs hosted by NFS1, after stopping the domain, secure copy the data present at /k8nfs on NFS1 to /k8nfs2 on NFS2:

    +
    $ cd /k8nfs
    +$ scp -R * user@[NFS2]:/k8nfs2
    +
  • +
  • +

    Label the odd nodes for NFS1 and the even nodes for NFS2.

    +

    For example, add the label diskgroup=dg1 for NFS1 and diskgroup=dg2 for NFS2:

    +
    $ kubectl label nodes olk8-w1 diskgroup=dg1
    +$ kubectl label nodes olk8-w2 diskgroup=dg2
    +$ kubectl label nodes olk8-w3 diskgroup=dg1
    +

    Verify the added labels using the following command:

    +
    $ kubectl get nodes --show-labels
    +

    Sample output is:

    +
    NAME      STATUS   ROLES    AGE   VERSION   LABELS
    +olk8-m1   Ready    master   10d   v1.XX.X   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=olk8-m1,kubernetes.io/os=linux,node-role.kubernetes.io/master=
    +olk8-m2   Ready    master   10d   v1.XX.X   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=olk8-m2,kubernetes.io/os=linux,node-role.kubernetes.io/master=
    +olk8-m3   Ready    master   10d   v1.XX.X   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=olk8-m3,kubernetes.io/os=linux,node-role.kubernetes.io/master=
    +olk8-w1   Ready    <none>   10d   v1.XX.X   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,diskgroup=dg1,kubernetes.io/arch=amd64,kubernetes.io/hostname=olk8-w1,kubernetes.io/os=linux,name=admin
    +olk8-w2   Ready    <none>   10d   v1.XX.X   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,diskgroup=dg2,kubernetes.io/arch=amd64,kubernetes.io/hostname=olk8-w2,kubernetes.io/os=linux,name=wls1
    +olk8-w3   Ready    <none>   10d   v1.XX.X   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,diskgroup=dg1,kubernetes.io/arch=amd64,kubernetes.io/hostname=olk8-w3,kubernetes.io/os=linux,name=wls2
    +
  • +
  • +

    To assign the appropriate selectors in the domain:

    +

    a. Edit the domain (domain.yaml).

    +

    b. Alter the managed servers section for all the managed servers configured in the cluster, sample for soa_server1 and soa_server2 as shown below:

    +
       managedServers:
    +   - serverName: soa_server1
    +     serverPod:
    +       nodeSelector:
    +         diskgroup: dg1
    +   - serverName: soa_server2
    +     serverPod:
    +       nodeSelector:
    +         diskgroup: dg2
    +

    c. Apply the domain.yaml changes:

    +
    $ kubectl apply -f domain.yaml
    +
  • +
+
+

IMPORTANT: Once this redundant PV configuration is in use, all changes that reside out of the config directory in the domain will need to be copied/synced to the secondary NAS mount manually. The managed servers using NFS2 in the diagram above will replicate only configuration changes that modify files/artifacts under the $DOMAIN_HOME/config directory. The rest of changes are NOT copied automatically by the WebLogic infrastructure.

+
+

For example, if you deploy an ear and specify an upload or stage directory out of the config directory, the ear files will NOT be copied by WebLogic). Fileadapter composites will place output files in mounts accessible from the pods. The mount point and PV/PVC for the different Oracle SOA Suite server file locations need to be the same, hence a different one from the one used for the $DOMAIN_HOME location.

+

Configure the required priority for mounts

+

When using block volume mounts for the Docker/CRIO images, it may occur that the mount takes time to complete on reboot. This is the case block volume with mount point being affected by network and storage latency. In this case, it is required to adjust the priority and dependency of the process on reboot, otherwise Docker will start and many images will be missing. To resolve this issue:

+
    +
  • +

    Identify the systemd units for the mounts that the container will depend on, including the NFS where the Oracle SOA Suite domain will reside. +Identify mount systemd units in the Operating System:

    +
    
    +$ cat /etc/fstab  | grep "docker ext4"
    +  UUID=c07d39e4-5d8f-47af-b936-bf276cc43664  /docker ext4 defaults,_netdev,nofail 0 2
    +
    +$ ls  /run/systemd/generator/ | grep docker
    +docker.mount
    +scratch-docker.mount
    +
  • +
  • +

    Add the units to the Docker/CRIO service as a dependency in the AFTER list. +Identify mount systemd units in the operating system expand source:

    +
    $ cat /lib/systemd/system/docker.service  | grep After
    +After=network-online.target firewalld.service containerd.service docker.mount
    +
  • +
  • +

    This guarantees that the container will start only after the required mount is ready.

    +
  • +
+

Set front-end addresses

+

For each of the Oracle SOA Suite (soa_cluster) and Service Bus (osb_cluster) clusters, set the appropriate front-end address as follows:

+
    +
  1. +

    Sign in to the WebLogic Administration Console.

    +
  2. +
  3. +

    Navigate to domain_name -> environment -> clusters.

    +
  4. +
  5. +

    Click the cluster name.

    +
  6. +
  7. +

    Click the HTTP tab.

    +
  8. +
  9. +

    Enter the Frontend Host and Frontend Port details.

    +
  10. +
+

FrontEndAddress

+
+

Note: Set the front-end details for each cluster and for the Administration Server.

+
+ +

Data sources in the default deployment for Oracle SOA Suite on Kubernetes use generic data sources. With ONS auto-registration in 12.2 Database and later, it is only necessary to enable FAN for the data sources to convert to GridLink (GLDS). You can do this by using the WebLogic Administration Console or the following command and restarting the Administration Server and the managed servers:

+
grep -L fan-enabled $domain_home/config/jdbc/*.xml | xargs sed -i "s/<\/jdbc-data-source>/<jdbc-oracle-params><fan-enabled>true<\/fan-enabled><\/jdbc-oracle-params><\/jdbc-data-source>/g"
+

After the change is applied, verify that all data sources are marked as GridLink Data Sources in the WebLogic Administration Console.

+

Configure ASM

+

Refer to the on-premises Enterprise Deployment Guide for steps to configure Automatic Service Migration.

+

Configure coredns allocation

+
+

NOTE: This step is applicable to any Kubernetes system using coredns regardless of Oracle SOA Suite deployment. However, the worker node creation is implicit in the setup, hence it is applied in this context (post Oracle SOA Suite deployment).

+
+

Configure replicas for cordens to spawn both control plane and master plane. If a restore operation is performed on the control plane, worker nodes may stop working properly. If the coredns lands entirely in the worker nodes, the control plane may not function correctly if they are brought down for maintenance. Place at least two coredns pods on the control plane and another two on the worker nodes. The coredns footprint is low.

+
 VIRT    RES    SHR S  %CPU %MEM     TIME+ COMMAND
+
+146268  41684  29088 S   0.3  0.1  25:44.04 coredns
+

According to the CoreDNS documentation, you can estimate the amount of memory required for a CoreDNS instance (using default settings) with the following formula:

+
MB required (default settings) = (Pods + Services) / 1000 + 54
+

Hence, first label nodes in both control and worker plane.

+
$ kubectl label nodes olk8-m1 area=dnsarea
+$ kubectl label nodes olk8-m2 area=dnsarea
+$ kubectl label nodes olk8-m3 area=dnsarea
+$ kubectl label nodes olk8-w1 area=dnsarea
+$ kubectl label nodes olk8-w2 area=dnsarea
+$ kubectl label nodes olk8-w3 area=dnsarea
+

And then update the coredns deployment to use topology spread constraints

+
+

NOTE: Topology spread constraints is beta starting in Kubernetes v1.18

+
+

First, enable the feature gate in kube-api server and in kube-scheduler. Then, modify the coredns deployment for an appropriate spread of pods across worker and control plane nodes.

+

The coredns topology spread config details are:

+

Click below for sample updated coredns deployment: + +

+
+ + + + + Coredns deployment YAML + + +
+ +

+

The labels and spread topology changes are:

+
  labels:
+        foo: bar
+        k8s-app: kube-dns
+
  topologySpreadConstraints:
+  - labelSelector:
+      matchLabels:
+        foo: bar
+    maxSkew: 1
+    topologyKey: area
+    whenUnsatisfiable: DoNotSchedule
+

This guarantees an even distribution across the master and worker nodes, so that if the control plane is restored, the worker pods will continue without issues and the other way around.

+

Sample resulting coredns distribution:

+
$ kubectl get pods -A -o wide | grep coredns
+kube-system   coredns-84b49c57fd-4fz4g                         1/1     Running     0          166m    10.244.1.20   olk8-m2   <none>           <none>
+kube-system   coredns-84b49c57fd-5mrkw                         1/1     Running     0          165m    10.244.4.76   olk8-w2   <none>           <none>
+kube-system   coredns-84b49c57fd-5zm88                         1/1     Running     0          165m    10.244.2.17   olk8-m3   <none>           <none>
+kube-system   coredns-84b49c57fd-nqlwb                         1/1     Running     0          166m    10.244.4.75   olk8-w2   <none>           <none>
+

Adjust server’s pods Liveness Probe

+

By default, the liveness probe is configured to check liveness every 45 seconds, which might cause requests to be routed to backend pods that are no longer available during outage scenarios. Recommended to adjust liveness probe values so that on hard node failures pods are marked as down faster. To configure a more aggressive probe, edit the domain and change the serverPods.livenessProbe values to the following:

+
livenessProbe:
+   failureThreshold: 1
+   initialDelaySeconds: 30
+   periodSeconds: 5
+   successThreshold: 1
+   timeoutSeconds: 3
+

Refer WebLogic Kubernetes Operator documentation for details on how to customize the liveness probe.

+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.2/soa-domains/edg-guide/topology/index.html b/docs/23.4.2/soa-domains/edg-guide/topology/index.html new file mode 100644 index 000000000..0a8308747 --- /dev/null +++ b/docs/23.4.2/soa-domains/edg-guide/topology/index.html @@ -0,0 +1,5602 @@ + + + + + + + + + + + + Topology :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+ +
+ + +
+
+ +
+ +
+ +
+ +

+ + Topology +

+ + + + + + +

Assumptions

+

The most relevant assumptions for the Oracle SOA Suite Kubernetes Enterprise Deployment Guide (EDG) topology are related to the database and web tiers. Typical on-premises production systems keep their high end database (such as RAC, RAC+DG, Exadata, Autonomous Database) out of the Kubernetes cluster and manage that tier separately. This implies that the database runs separately from the Kubernetes cluster hosting the application tier. The database provisioning and configuration process is out of the scope of the Oracle SOA Suite Kubernetes setup. Typically, it is administered and maintained by different teams and the Oracle SOA Suite Enterprise Deployment Guide would need to consume just the scan address and database service to be used for the RCU and data source creation. Similarly, the demilitarized zone (DMZ) will likely remain untouched: Customer investments in load balancers (LBRs) are well consolidated, and the security and DMZ policies are well established. Using an HTTP proxy in the DMZ has become a standard. Additionally, Single Sign On (such as OAM and others) may remain for some time and those are best addressed in the web tier than as part of the Kubernetes cluster.

+

Due to total cost of ownership reasons, the Kubernetes control plane uses a stacked etcd. External etcd is also possible but, given RAFT protocol’s reliability, this option requires a significant amount of additional setup work and three additional nodes just to host the etcd system.

+

Topology Diagram

+

Topology

+

Tier Description

+

Control Plane

+

The control plane consists of three nodes where the Kubernetes API server is deployed, front-ended by a load balancer (LBR). The LBR exposes the required virtual IP address (VIP) and virtual server (VS) for both the Oracle SOA Suite and the control plane itself (although an internal-only VS can also be used). This URL must be site or data center agnostic in preparation for disaster protection. The control plane and worker nodes must be able to resolve this hostname name properly AND SHOULD NOT USE IPs for it. As explained in the assumptions section, the etcd tier is stacked with the kube-api servers. Each control plane node runs Kubernetes kube-api, kube-controller, kube-scheduler, and etcd instances pointing to local etcd mounts. The etcd has been tested by the Maximum Availability Architecture (MAA) team on DBFS, NFS, and local file systems. No significant performance differences were observed between the two options for an Oracle SOA Suite system. However, this decision may require additional analysis in each customer case depending on the usage by other apps in the same cluster. Using etcd directly on DBFS allows shipping etcd directly to secondary data centers and allows flashing back quickly to previous versions of the control plane using DB technology. However, it creates a dependency between etcd and the database that is discouraged. The etcd snapshots CAN, however, be placed on DBFS to simplify the continuous shipping of copies to secondary regions and leaving it to Data Guard. These are the options that will be applicable or not depending on the customer needs:

+
    +
  • Place etcd on root or local volumes, ship snapshots with regular rsyncs over reliable networks.
  • +
  • Place etcd on NFS, ship snapshots with regular rsyncs over reliable networks.
  • +
  • Place etcd on NFS or local volume, copy snapshots to DBFS for shipping to secondary.
  • +
  • Application tier.
  • +
+

Application Tier

+

Oracle SOA Suite internal applications and custom deployments on SOA (composites) and WebLogic (ears, jars, wars) are run on three worker nodes in the Kubernetes cluster. The typical allocation on Kubernetes places the WebLogic Administration Server on the first node and each of the servers in the Oracle SOA Suite and/or Service Bus clusters in the other two. This can vary depending on the workloads and kube controller and scheduler decisions.

+
$ kubectl get pods -A -o wide | grep soa
+soans         soaedgdomain-adminserver                         1/1     Running     0          19h   10.244.3.127   olk8-w1   <none>           <none>
+soans         soaedgdomain-create-soa-infra-domain-job-6pq9z   0/1     Completed   0          67d   10.244.4.2     olk8-w2   <none>           <none>
+soans         soaedgdomain-soa-server1                         1/1     Running     0          22h   10.244.5.161   olk8-w3   <none>           <none>
+soans         soaedgdomain-soa-server2                         1/1     Running     0          22h   10.244.4.178   olk8-w2   <none>           <none>
+
+

The application tier is critically dependent on the Kubernetes DNS pods to be able to resolve the scan address for the RAC database.

+

A dual configuration for persistent volumes is used to avoid a single point of failure in the Oracle SOA Suite domain configuration. Two separate NFS devices are used for high availability. Refer to the Configure redundant persistent volume for details.

+

The internal Oracle SOA Suite and Service Bus clusters use some of the configuration best practices prescribed for on-premises systems. Refer to the Enterprise Deployment Guide for Oracle SOA Suite 12c (12.2.1.4):

+
    +
  • +

    Automatic Service Migration is used for the Java Message Service (JMS) and Java Transaction API (JTA) Services.

    +
  • +
  • +

    All the persistent stores are JDBC persistent stores.

    +
  • +
  • +

    Node manager is used to monitor the managed server’s health.

    +
  • +
+

There are significant differences with an on-premises enterprise deployment. The most relevant are:

+
    +
  • +

    WebLogic Kubernetes Operator is used to manage the lifecycle of the managed servers, instead of the WebLogic Administration Console or WLST.

    +
  • +
  • +

    Scale out procedures are based on the Kubernetes cluster.

    +
  • +
  • +

    The WebLogic Administration Server does NOT use its own/separate WebLogic domain directory.

    +
  • +
  • +

    Listen addresses and hostnames are “virtual” out-of-the-box, which provides a significant advantage when considering disaster protection.

    +
  • +
  • +

    The OHS/DMZ tier routes to the back-end WebLogic clusters and Administration Server, using a precise node port available in the different nodes.

    +
  • +
+

External subsystems: LBR, OHS, and Database

+

Oracle SOA Suite interacts with the following external artifacts: front-end LBR, OHS/DMZ-proxy, Database, and external authenticators.

+
    +
  • +

    The LBR is used both to route to kube-api and as a standard Oracle SOA Suite front end.

    +
  • +
  • +

    The front-end LBR Oracle SOA Suite configuration is identical to an on-premises system. For more information, see the Enterprise Deployment Guide for Oracle SOA Suite 12c 12.2.1.4.

    +
  • +
  • +

    For kube-api access, the LBR uses a layer4 (TCP) listener. This listener forwards to the backend control plane nodes. Note: Use hostnames and avoid IP address.

    +
  • +
  • +

    The Database tier consists of a RAC Database that hosts JMS/JTA persistent stores, OPSS, and MDS information. The setup of this RAC database is out of the scope of these procedures. All the standard MAA best practices for database services, processes, and DB configuration for a SOA cluster are applicable on Kubernetes. For more details, see the Enterprise Deployment Guide for Oracle SOA Suite 12c 12.2.1.4.

    +
  • +
  • +

    External auth providers can be used as described in the Oracle SOA Suite EDG procedures.

    +
  • +
  • +

    Single Sign-On through OHS with OAM is possible, as in a standard on-premises Oracle SOA Suite deployment.

    +
  • +
  • +

    OHS routes to precise node ports for the SOA Cluster, OSB Cluster, and Administration Server. This is based on the fact that Oracle SOA Suite exposes very few services, which do not change much through the system’s lifecycle. This also avoids overhead and dependencies on ingress and additional third party controllers. The OHS configuration is similar to a standard on-premises configuration, except that it disables the Dynamic Server list and uses the list of worker nodes with the precise node port for each cluster.

    +
  • +
+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.2/soa-domains/faq/index.html b/docs/23.4.2/soa-domains/faq/index.html new file mode 100644 index 000000000..e7e5fc85e --- /dev/null +++ b/docs/23.4.2/soa-domains/faq/index.html @@ -0,0 +1,5633 @@ + + + + + + + + + + + + Frequently Asked Questions :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + Frequently Asked Questions +

+ + + + + + +

Overriding tuning parameters is not supported using configuration overrides

+

The WebLogic Kubernetes Operator enables you to override some of the domain configuration using configuration overrides (also called situational configuration). +See supported overrides. Overriding the tuning parameters such as MaxMessageSize and PAYLOAD, for Oracle SOA Suite domains is not supported using the configuration overrides feature. However, you can override them using the following steps:

+
    +
  1. +

    Specify the new value using the environment variable K8S_REFCONF_OVERRIDES in serverPod.env section in domain.yaml configuration file (example path: <domain-creation-output-directory>/weblogic-domains/soainfra/domain.yaml) based on the servers to which the changes are to be applied.

    +

    For example, to override the value at the Administration Server pod level:

    +
      spec:
    +    adminServer:
    +      serverPod:
    +        env:
    +        - name: K8S_REFCONF_OVERRIDES
    +          value: "-Dweblogic.MaxMessageSize=78787878"
    +        - name: USER_MEM_ARGS
    +          value: '-Djava.security.egd=file:/dev/./urandom -Xms512m -Xmx1024m '
    +      serverStartState: RUNNING
    +

    For example, to override the value at a specific cluster level (soa_cluster or osb_cluster):

    +
      apiVersion: "weblogic.oracle/v1"
    +  kind: Cluster
    +  metadata:
    +    name: soainfra-soa-cluster
    +    # Update this with the namespace your domain will run in:
    +    namespace: soans
    +    labels:
    +      # Update this with the `domainUID` of your domain:
    +      weblogic.domainUID: soainfra
    +  spec:
    +    clusterName: soa_cluster
    +    serverService:
    +      precreateService: true
    +    serverPod:
    +      env:
    +      - name: K8S_REFCONF_OVERRIDES
    +        value: "-Dsoa.payload.threshold.kb=102410"
    +
    +

    Note: When multiple system properties are specified for serverPod.env.value, make sure each system property is separated by a space.

    +
    +
  2. +
  3. +

    Apply the updated domain.yaml file:

    +
    $ kubectl apply -f domain.yaml
    +
    +

    Note: The server pod(s) will be automatically restarted (rolling restart).

    +
    +
  4. +
+

Deployments in the WebLogic Server Administration Console may display unexpected error

+

In an Oracle SOA Suite environment deployed using the operator, accessing Deployments from the WebLogic Server Administration Console home page may display the error message Unexpected error encountered while obtaining monitoring information for applications. This error does not have any functional impact and can be ignored. You can verify that the applications are in Active state from the Control tab in Summary of deployments page.

+

Enterprise Manager Console may display ADF_FACES-30200 error

+

In an Oracle SOA Suite environment deployed using the operator, the Enterprise Manager Console may intermittently display the following error when the domain servers are restarted:

+
ADF_FACES-30200: For more information, please see the server's error log for an entry beginning with: The UIViewRoot is null. Fatal exception during PhaseId: RESTORE_VIEW 1.
+

You can refresh the Enterprise Manager Console URL to successfully log in to the Console.

+

Configure the external URL access for Oracle SOA Suite composite applications

+

For Oracle SOA Suite composite applications to access the external URLs over the internet (if your cluster is behind a http proxy server), you must configure the following proxy parameters for Administration Server and Managed Server pods.

+
-Dhttp.proxyHost=www-your-proxy.com  
+-Dhttp.proxyPort=proxy-port  
+-Dhttps.proxyHost=www-your-proxy.com  
+-Dhttps.proxyPort=proxy-port  
+-Dhttp.nonProxyHosts="localhost|soainfra-adminserver|soainfra-soa-server1|soainfra-osb-server1|...soainfra-soa-serverN|*.svc.cluster.local|*.your.domain.com|/var/run/docker.sock"  
+

To do this, edit the domain.yaml configuration file and append the proxy parameters to the spec.serverPod.env.JAVA_OPTIONS environment variable value.

+

For example:

+
  serverPod:
+    env:
+    - name: JAVA_OPTIONS
+      value: -Dweblogic.StdoutDebugEnabled=false -Dweblogic.ssl.Enabled=true -Dweblogic.security.SSL.ignoreHostnameVerification=true -Dhttp.proxyHost=www-your-proxy.com -Dhttp.proxyPort=proxy-port -Dhttps.proxyHost=www-your-proxy.com -Dhttps.proxyPort=proxy-port -Dhttp.nonProxyHosts="localhost|soainfra-adminserver|soainfra-soa-server1|soainfra-osb-server1|...soainfra-soa-serverN|*.svc.cluster.local|*.your.domain.com|/var/run/docker.sock"
+    - name: USER_MEM_ARGS
+      value: '-Djava.security.egd=file:/dev/./urandom -Xms256m -Xmx1024m '
+    volumeMounts:
+
+

Note: The -Dhttp.nonProxyHosts parameter must have the pod names of the Administration Server and each Managed Server. For example: soainfra-adminserver, soainfra-soa-server1, soainfra-osb-server1, and so on.

+
+

Apply the updated domain.yaml file:

+
 $ kubectl apply -f domain.yaml
+
+

Note: The server pod(s) will be automatically restarted (rolling restart).

+
+

Configure the external access for the Oracle Enterprise Scheduler WebServices WSDL URLs

+

In an Oracle SOA Suite domain deployed including the Oracle Enterprise Scheduler (ESS) component, the following ESS WebServices WSDL URLs shown in the table format in the ess/essWebServicesWsdl.jsp page are not reachable outside the Kubernetes cluster.

+
ESSWebService
+EssAsyncCallbackService
+EssWsJobAsyncCallbackService
+

Follow these steps to configure the external access for the Oracle Enterprise Scheduler WebServices WSDL URLs:

+
    +
  1. Log in to the Administration Console URL of the domain.
    +For example: http://<LOADBALANCER-HOST>:<port>/console
  2. +
  3. In the Home Page, click Clusters. Then click the soa_cluster.
  4. +
  5. Click the HTTP tab and then click Lock & Edit in the Change Center panel.
  6. +
  7. Update the following values: +
      +
    • Frontend Host: host name of the load balancer. For example, domain1.example.com.
    • +
    • Frontend HTTP Port: load balancer port. For example, 30305.
    • +
    • Frontend HTTPS Port: load balancer https port. For example, 30443.
    • +
    +
  8. +
  9. Click Save.
  10. +
  11. Click Activate Changes in the Change Center panel.
  12. +
  13. Restart the servers in the SOA cluster.
  14. +
+
+

Note: Do not restart servers from the Administration Console.

+
+

Missing gif images in Oracle Service Bus console pipeline configuration page

+

In an Oracle SOA Suite domain environment upgraded to the release 21.1.2, some gif images are not rendered in the Oracle Serice Bus console pipeline configuration page, as their corresponding url paths are not exposed via the Ingress path rules in the earlier releases (for Non-SSL and SSL termination). To resolve this issue, perform the following steps to apply the latest ingress configuration:

+
$ cd ${WORKDIR}
+$ helm upgrade <helm_release_for_ingress> \
+    charts/ingress-per-domain \
+    --namespace <domain_namespace> \
+    --reuse-values
+
+

Note: helm_release_for_ingress is the ingress name used in the corresponding helm install command for the ingress installation.

+
+

For example, to upgrade the NGINX based ingress configuration:

+
$ cd ${WORKDIR}
+$ helm upgrade soa-nginx-ingress \
+    charts/ingress-per-domain \
+    --namespace soans \
+    --reuse-values
+

WebLogic Kubernetes Operator FAQs

+

See the general frequently asked questions for using the WebLogic Kubernetes Operator.

+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.2/soa-domains/index.html b/docs/23.4.2/soa-domains/index.html new file mode 100644 index 000000000..34ae88e85 --- /dev/null +++ b/docs/23.4.2/soa-domains/index.html @@ -0,0 +1,5580 @@ + + + + + + + + + + + + Oracle SOA Suite :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + Oracle SOA Suite +

+ + + + + + + +

The WebLogic Kubernetes Operator (the “operator”) supports deployment of Oracle SOA Suite components such as Oracle Service-Oriented Architecture (SOA), Oracle Service Bus, and Oracle Enterprise Scheduler (ESS). Currently the operator supports these domain types:

+
    +
  • soa : Deploys a SOA domain with Oracle Enterprise Scheduler (ESS)
  • +
  • osb : Deploys an Oracle Service Bus domain
  • +
  • soaosb : Deploys a domain with SOA, Oracle Service Bus, and Oracle Enterprise Scheduler (ESS)
  • +
+

In this release, Oracle SOA Suite domains are supported using the “domain on a persistent volume” +model only, where the domain home is located in a persistent volume (PV).

+

The operator has several key features to assist you with deploying and managing Oracle SOA Suite domains in a Kubernetes environment. You can:

+
    +
  • Create Oracle SOA Suite instances in a Kubernetes persistent volume (PV). This PV can reside in an NFS file system or other Kubernetes volume types.
  • +
  • Start servers based on declarative startup parameters and desired states.
  • +
  • Expose the Oracle SOA Suite services and composites for external access.
  • +
  • Scale Oracle SOA Suite domains by starting and stopping Managed Servers on demand, or by integrating with a REST API.
  • +
  • Publish operator and WebLogic Server logs to Elasticsearch and interact with them in Kibana.
  • +
  • Monitor the Oracle SOA Suite instance using Prometheus and Grafana.
  • +
+

Current production release

+

The current production release for the Oracle SOA Suite domains deployment on Kubernetes is 23.4.2. This release uses the WebLogic Kubernetes Operator version 4.1.4.

+

Recent changes and known issues

+

See the Release Notes for recent changes and known issues for Oracle SOA Suite domains deployment on Kubernetes.

+

Pricing and licensing

+

See here for pricing and licensing details.

+

Limitations

+

See here for limitations in this release.

+

About this documentation

+

This documentation includes sections targeted to different audiences. To help you find what you are looking for more easily, +please consult this table of contents:

+
    +
  • +

    Quick Start explains how to quickly get an Oracle SOA Suite domain instance running using default settings. Note that this is only for development and test purposes.

    +
  • +
  • +

    Install Guide and Administration Guide provide detailed information about all aspects of using the Kubernetes operator including:

    +
      +
    • Installing and configuring the operator.
    • +
    • Using the operator to create and manage Oracle SOA Suite domains.
    • +
    • Configuring Kubernetes load balancers.
    • +
    • Configuring custom SSL certificates.
    • +
    • Configuring Elasticsearch and Kibana to access the operator and WebLogic Server log files.
    • +
    • Deploying composite applications for Oracle SOA Suite and Oracle Service Bus.
    • +
    • Patching an Oracle SOA Suite Docker image.
    • +
    • Removing domains.
    • +
    • And much more!
    • +
    +
  • +
  • +

    Enterprise Deployment Guide (preview release) provides information on the topology and set up steps for Oracle SOA Suite Enterprise Deployment on Kubernetes.

    +
  • +
+

Documentation for earlier releases

+

To view documentation for an earlier release, see:

+ +

Additional reading

+

Oracle SOA Suite domains deployment on Kubernetes leverages the WebLogic Kubernetes Operator framework.

+
    +
  • To develop an understanding of the operator, including design, architecture, domain life cycle management, and configuration overrides, review the operator documentation.
  • +
  • To learn more about the Oracle SOA Suite architecture and components, see Understanding Oracle SOA Suite.
  • +
  • To review the known issues and common questions for Oracle SOA Suite domains deployment on Kubernetes, see the frequently asked questions.
  • +
+ + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.2/soa-domains/index.xml b/docs/23.4.2/soa-domains/index.xml new file mode 100644 index 000000000..c0f96942a --- /dev/null +++ b/docs/23.4.2/soa-domains/index.xml @@ -0,0 +1,49 @@ + + + + Oracle SOA Suite on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.2/soa-domains/ + Recent content in Oracle SOA Suite on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + Sat, 23 Feb 2019 16:43:45 -0500 + + + + + + Release Notes + /fmw-kubernetes/23.4.2/soa-domains/release-notes/ + Fri, 15 Mar 2019 11:25:28 -0400 + + /fmw-kubernetes/23.4.2/soa-domains/release-notes/ + Review the latest changes and known issues for Oracle SOA Suite on Kubernetes. +Recent changes Date Version Change November 30, 2023 23.4.2 Supports Oracle SOA Suite 12.2.1.4 domains deployment using October 2023 PSU and known bug fixes. Support for WebLogic Kubernetes Operator 4.1.4. Oracle SOA Suite 12.2.1.4 Docker image for this release can be downloaded from My Oracle Support (MOS patch 35908803 for Oracle Linux 7 and MOS patch 35915091 for Oracle Linux 8) and container-registry. + + + + Uninstall + /fmw-kubernetes/23.4.2/soa-domains/cleanup-domain-setup/ + Fri, 22 Feb 2019 15:44:42 -0500 + + /fmw-kubernetes/23.4.2/soa-domains/cleanup-domain-setup/ + Learn how to clean up the Oracle SOA Suite domain setup. +Remove the domain Remove the domain&rsquo;s ingress (for example, Traefik ingress) using Helm: +$ helm uninstall soa-domain-ingress -n sample-domain1-ns For example: +$ helm uninstall soainfra-traefik -n soans Remove the domain resources by using the sample delete-weblogic-domain-resources.sh script present at ${WORKDIR}/delete-domain: +$ cd ${WORKDIR}/delete-domain $ ./delete-weblogic-domain-resources.sh -d sample-domain1 For example: +$ cd ${WORKDIR}/delete-domain $ ./delete-weblogic-domain-resources.sh -d soainfra Use kubectl to confirm that the server pods and domain resource are deleted: + + + + Frequently Asked Questions + /fmw-kubernetes/23.4.2/soa-domains/faq/ + Fri, 22 Feb 2019 15:44:42 -0500 + + /fmw-kubernetes/23.4.2/soa-domains/faq/ + Overriding tuning parameters is not supported using configuration overrides The WebLogic Kubernetes Operator enables you to override some of the domain configuration using configuration overrides (also called situational configuration). See supported overrides. Overriding the tuning parameters such as MaxMessageSize and PAYLOAD, for Oracle SOA Suite domains is not supported using the configuration overrides feature. However, you can override them using the following steps: + Specify the new value using the environment variable K8S_REFCONF_OVERRIDES in serverPod. + + + + \ No newline at end of file diff --git a/docs/23.4.2/soa-domains/installguide/create-soa-domains/index.html b/docs/23.4.2/soa-domains/installguide/create-soa-domains/index.html new file mode 100644 index 000000000..79f6600ba --- /dev/null +++ b/docs/23.4.2/soa-domains/installguide/create-soa-domains/index.html @@ -0,0 +1,6374 @@ + + + + + + + + + + + + Create Oracle SOA Suite domains :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + Create Oracle SOA Suite domains +

+ + + + + + + +

The SOA deployment scripts demonstrate the creation of an Oracle SOA Suite domain home on an existing Kubernetes persistent volume (PV) and persistent volume claim (PVC). The scripts also generate the domain YAML file, which can then be used to start the Kubernetes artifacts of the corresponding domain.

+

Prerequisites

+

Before you begin, complete the following steps:

+
    +
  1. Review the Domain resource documentation.
  2. +
  3. Review the requirements and limitations.
  4. +
  5. Ensure that you have executed all the preliminary steps in Prepare your environment.
  6. +
  7. Ensure that the database and the WebLogic Kubernetes Operator are running.
  8. +
+

Prepare to use the create domain script

+

The sample scripts for Oracle SOA Suite domain deployment are available at ${WORKDIR}/create-soa-domain.

+

You must edit create-domain-inputs.yaml (or a copy of it) to provide the details for your domain. +Refer to the configuration parameters below to understand the information that you must +provide in this file.

+

Configuration parameters

+

The following parameters can be provided in the inputs file.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterDefinitionDefault
sslEnabledBoolean value that indicates whether SSL must be enabled for each WebLogic Server instance. To enable end-to-end SSL access during load balancer setup, set sslEnabled to true and also, set appropriate value for the javaOptions property as detailed in this table.false
adminPortPort number for the Administration Server inside the Kubernetes cluster.7001
adminServerSSLPortSSL port number of the Administration Server inside the Kubernetes cluster.7002
adminNodePortPort number of the Administration Server outside the Kubernetes cluster.30701
adminServerNameName of the Administration Server.AdminServer
configuredManagedServerCountNumber of Managed Server instances to generate for the domain.5
soaClusterNameName of the SOA WebLogic Server cluster instance to generate for the domain. By default, the cluster name is soa_cluster. This configuration parameter is applicable only for soa and soaosb domain types.soa_cluster
osbClusterNameName of the Oracle Service Bus WebLogic Server cluster instance to generate for the domain. By default, the cluster name is osb_cluster. This configuration parameter is applicable only for osb and soaosb domain types.osb_cluster
createDomainFilesDirDirectory on the host machine to locate all the files to create a WebLogic Server domain, including the script that is specified in the createDomainScriptName parameter. By default, this directory is set to the relative path wlst, and the create script will use the built-in WLST offline scripts in the wlst directory to create the WebLogic Server domain. An absolute path is also supported to point to an arbitrary directory in the file system. The built-in scripts can be replaced by the user-provided scripts as long as those files are in the specified directory. Files in this directory are put into a Kubernetes config map, which in turn is mounted to the createDomainScriptsMountPath, so that the Kubernetes pod can use the scripts and supporting files to create a domain home.wlst
createDomainScriptsMountPathMount path where the create domain scripts are located inside a pod. The create-domain.sh script creates a Kubernetes job to run the script (specified by the createDomainScriptName parameter) in a Kubernetes pod to create a domain home. Files in the createDomainFilesDir directory are mounted to this location in the pod, so that the Kubernetes pod can use the scripts and supporting files to create a domain home./u01/weblogic
createDomainScriptNameScript that the create domain script uses to create a WebLogic Server domain. The create-domain.sh script creates a Kubernetes job to run this script to create a domain home. The script is located in the in-pod directory that is specified by the createDomainScriptsMountPath parameter. If you need to provide your own scripts to create the domain home, instead of using the built-in scripts, you must use this property to set the name of the script that you want the create domain job to run.create-domain-job.sh
domainHomeHome directory of the SOA domain. If not specified, the value is derived from the domainUID as /shared/domains/<domainUID>./u01/oracle/user_projects/domains/soainfra
domainPVMountPathMount path of the domain persistent volume./u01/oracle/user_projects
domainUIDUnique ID that will be used to identify this particular domain. Used as the name of the generated WebLogic Server domain as well as the name of the Kubernetes domain resource. This ID must be unique across all domains in a Kubernetes cluster. This ID cannot contain any character that is not valid in a Kubernetes service name.soainfra
domainTypeType of the domain. Mandatory input for Oracle SOA Suite domains. You must provide one of the supported domain type values: soa (deploys a SOA domain with Enterprise Scheduler (ESS)), osb (deploys an Oracle Service Bus domain), and soaosb (deploys a domain with SOA, Oracle Service Bus, and Enterprise Scheduler (ESS)).soa
exposeAdminNodePortBoolean value indicating if the Administration Server is exposed outside of the Kubernetes cluster.false
exposeAdminT3ChannelBoolean value indicating if the T3 administrative channel is exposed outside the Kubernetes cluster.false
httpAccessLogInLogHomeBoolean value indicating if server HTTP access log files should be written to the same directory as logHome. If false, server HTTP access log files will be written to the directory specified in the WebLogic Server domain home configuration.true
imageSOA Suite Docker image. The operator requires Oracle SOA Suite 12.2.1.4. Refer to Obtain the Oracle SOA Suite Docker image for details on how to obtain or create the image.soasuite:12.2.1.4
imagePullPolicyOracle SOA Suite Docker image pull policy. Valid values are IfNotPresent, Always, Never.IfNotPresent
imagePullSecretNameName of the Kubernetes secret to access the Docker Store to pull the WebLogic Server Docker image. The presence of the secret will be validated when this parameter is specified.
includeServerOutInPodLogBoolean value indicating whether to include the server .out to the pod’s stdout.true
initialManagedServerReplicasNumber of Managed Servers to initially start for the domain.1
javaOptionsJava options for initiating the Administration Server and Managed Servers. A Java option can have references to one or more of the following predefined variables to obtain WebLogic Server domain information: $(DOMAIN_NAME), $(DOMAIN_HOME), $(ADMIN_NAME), $(ADMIN_PORT), and $(SERVER_NAME). If sslEnabled is set to true, add -Dweblogic.ssl.Enabled=true -Dweblogic.security.SSL.ignoreHostnameVerification=true to allow the Managed Servers to connect to the Administration Server while booting up. In this environment, the demo certificate generated by the WebLogic Server contains a host name that is different from the runtime container’s host name.-Dweblogic.StdoutDebugEnabled=false
logHomeThe in-pod location for the domain log, server logs, server out, and Node Manager log files. If not specified, the value is derived from the domainUID as /shared/logs/<domainUID>./u01/oracle/user_projects/domains/logs/soainfra
soaManagedServerNameBaseBase string used to generate Managed Server names in the SOA cluster. The default value is soa_server. This configuration parameter is applicable only for soa and soaosb domain types.soa_server
osbManagedServerNameBaseBase string used to generate Managed Server names in the Oracle Service Bus cluster. The default value is osb_server. This configuration parameter is applicable only for osb and soaosb domain types.osb_server
soaManagedServerPortPort number for each Managed Server in the SOA cluster. This configuration parameter is applicable only for soa and soaosb domain types.8001
osbManagedServerPortPort number for each Managed Server in the Oracle Service Bus cluster. This configuration parameter is applicable only for osb and soaosb domain types.9001
soaManagedServerSSLPortSSL port number for each Managed Server in the SOA cluster. This configuration parameter is applicable only for soa and soaosb domain types.8002
osbManagedServerSSLPortSSL port number for each Managed Server in the Oracle Service Bus cluster. This configuration parameter is applicable only for osb and soaosb domain types.9002
namespaceKubernetes namespace in which to create the domain.soans
persistentVolumeClaimNameName of the persistent volume claim created to host the domain home. If not specified, the value is derived from the domainUID as <domainUID>-weblogic-sample-pvc.soainfra-domain-pvc
productionModeEnabledBoolean value indicating if production mode is enabled for the domain.true
serverStartPolicyDetermines which WebLogic Server instances will be started. Valid values are Never, IfNeeded, or AdminOnly.IfNeeded
t3ChannelPortPort for the T3 channel of the NetworkAccessPoint.30012
t3PublicAddressPublic address for the T3 channel. This should be set to the public address of the Kubernetes cluster. This would typically be a load balancer address. For development environments only: In a single server (all-in-one) Kubernetes deployment, this may be set to the address of the master, or at the very least, it must be set to the address of one of the worker nodes.If not provided, the script will attempt to set it to the IP address of the Kubernetes cluster.
weblogicCredentialsSecretNameName of the Kubernetes secret for the Administration Server’s user name and password. If not specified, then the value is derived from the domainUID as <domainUID>-weblogic-credentials.soainfra-domain-credentials
weblogicImagePullSecretNameName of the Kubernetes secret for the Docker Store, used to pull the WebLogic Server image.
serverPodCpuRequest, serverPodMemoryRequest, serverPodCpuCLimit, serverPodMemoryLimitThe maximum amount of compute resources allowed, and minimum amount of compute resources required, for each server pod. Refer to the Kubernetes documentation on Managing Compute Resources for Containers for details.Resource requests and resource limits are not specified.
rcuSchemaPrefixThe schema prefix to use in the database. For example SOA1. You may wish to make this the same as the domainUID in order to simplify matching domains to their RCU schemas.SOA1
rcuDatabaseURLThe database URL.oracle-db.default.svc.cluster.local:1521/devpdb.k8s
rcuCredentialsSecretThe Kubernetes secret containing the database credentials.soainfra-rcu-credentials
persistentStoreThe persistent store for ‘JMS servers’ and ‘Transaction log store’ in the domain. Valid values are jdbc, file.jdbc
+

Note that the names of the Kubernetes resources in the generated YAML files may be formed with the +value of some of the properties specified in the create-domain-inputs.yaml file. Those properties include +the adminServerName, soaClusterName, and soaManagedServerNameBase etc. If those values contain any +characters that are invalid in a Kubernetes service name, those characters are converted to +valid values in the generated YAML files. For example, an uppercase letter is converted to a +lowercase letter and an underscore ("_") is converted to a hyphen ("-").

+

The sample demonstrates how to create an Oracle SOA Suite domain home and associated Kubernetes resources for the domain. In addition, the sample provides the capability for users to supply their own scripts +to create the domain home for other use cases. The generated domain YAML file could also be modified to cover more use cases.

+

Run the create domain script

+

Run the create domain script, specifying your inputs file and an output directory to store the +generated artifacts:

+
$ ./create-domain.sh \
+  -i create-domain-inputs.yaml \
+  -o <path to output-directory>
+

The script will perform the following steps:

+
    +
  • Create a directory for the generated Kubernetes YAML files for this domain if it does not +already exist. The path name is <path to output-directory>/weblogic-domains/<domainUID>. +If the directory already exists, its contents must be removed before using this script.
  • +
  • Create a Kubernetes job that will start up a utility Oracle SOA Suite container and run +offline WLST scripts to create the domain on the shared storage.
  • +
  • Run and wait for the job to finish.
  • +
  • Create a Kubernetes domain YAML file, domain.yaml, in the “output” directory that was created above.
  • +
  • Create a convenient utility script, delete-domain-job.yaml, to clean up the domain home +created by the create script.
  • +
+

Post install tasks

+

Review the Read Me file of each patch listed in the following MOS notes, depending on your Oracle Linux version.

+ +

Also, for SOA bundle 12.2.1.4.230827 patch post install tasks, see 35748499. Refer Section 6: Post-Installation Instructions in the Read Me file for instructions related to purge and maven plugin.

+

You can ignore the instructions to clean PS3 maven plugin files, since it will be automated as part of container image creation.

+

Perform the following steps to purge.

+
    +
  1. +

    Create a YAML file named soapostinstall.yaml with the following sample. The necessary SQL scripts are mounted inside pod at “/soa_purge”.

    +
    +

    Note: Replace the domain namespace soans and SOA Suite image soasuite:12.2.1.4 with the details specific to your environment in the following sample YAML.

    +
    +
     apiVersion: v1
    + kind: Pod
    + metadata:
    +   name: soapostinstall
    +   namespace: soans
    +   labels:
    +     app.kubernetes.io/name: dbclient
    + spec:
    +   containers:
    +   - name: dbclient-container
    +         image: ghcr.io/oracle/oraclelinux8-instantclient:21
    +     command: ['sh', '-c', 'echo The dbclient pod is running! && sleep 3600']
    +     volumeMounts:
    +     - mountPath: "/soa_purge"
    +       name: soa-shared-volume
    +   initContainers:
    +   - name: soa-oh
    +     image: soasuite:12.2.1.4
    +     command: ['sh', '-c', "cp -rf /u01/oracle/soa/common/sql/soainfra/sql/oracle/122140/ /soa_purge"]
    +     volumeMounts:
    +     - mountPath: "/soa_purge"
    +       name: soa-shared-volume
    +   volumes:
    +     - name: soa-shared-volume
    +
  2. +
  3. +

    Apply the YAML to create a Kubernetes pod named soapostinstall in the soans namespace.

    +
    $ kubectl apply -f soapostinstall.yaml
    +
  4. +
  5. +

    Start a bash shell in the soapostinstall pod.

    +
    $ kubectl exec -it -n soans soapostinstall -- bash
    +
  6. +
  7. +

    A bash shell opens in the soapostinstall pod.

    +
    [root@soapostinstall /]#
    +
  8. +
  9. +

    Reload the SOA purge scripts as a SOAINFRA user.

    +
      +
    • +

      Change the directory to the location where the scripts for post installation steps are stored.

      +
      [root@soapostinstall /]# cd /soa_purge/122140/soa_purge12/
      +
    • +
    • +

      Connect to the database as a SOAINFRA user.

      +
      [root@soapostinstall /]# sqlplus <SOAINFRA_USER>/<PASSWORD>@<DATABASE_SERVICE>
      +

      For example, if the schema prefix is SOA1, schema password is Oradoc_db1 and connection string to database is oracle-db.default.svc.cluster.local:1521/devpdb.k8s, you can connect to database as SOAINFRA user using the following command.

      +
      [root@soapostinstall /]# sqlplus SOA1_SOAINFRA/Oradoc_db1@oracle-db.default.svc.cluster.local:1521/devpdb.k8s
      +
    • +
    • +

      Execute the following script.

      +
      SQL> @soa_purge_scripts.sql
      +
      +SQL> show errors
      +
    • +
    +
  10. +
  11. +

    Verify the SOA purge scripts.

    +
      +
    • +

      Change the directory to location where the SOA purge scripts are stored.

      +
      [root@soapostinstall /]# cd /soa_purge/122140/verify12/
      +
    • +
    • +

      Connect to the database as a SOAINFRA user.

      +
      [root@soapostinstall /]# sqlplus SOA1_SOAINFRA/Oradoc_db1@oracle-db.default.svc.cluster.local:1521/devpdb.k8s
      +
    • +
    • +

      Execute the following script.

      +
      SQL> @soa_verify_scripts.sql
      +
      +SQL> show errors
      +
    • +
    +
  12. +
  13. +

    Recreate the auto purge jobs with new jobs added in 12.2.1.4.201210SOABP.

    +
      +
    • +

      Change the directory to location where scripts are stored.

      +
      [root@soapostinstall /]# cd /soa_purge/122140/soa_purge12/soa
      +
    • +
    • +

      Connect to the database as a SOAINFRA user.

      +
      [root@soapostinstall /]# sqlplus SOA1_SOAINFRA/Oradoc_db1@oracle-db.default.svc.cluster.local:1521/devpdb.k8s
      +
    • +
    • +

      Execute the following script.

      +
      SQL> @loadcompbasedpurgeprogram.sql
      +
      +SQL> show errors
      +
    • +
    +
  14. +
+

Start the domain

+

The domain.yaml created by create-domain.sh script above has details about the Oracle SOA Suite Domain and Cluster Kubernetes resources. You can create Oracle SOA Suite Domain using the kubectl create -f +or kubectl apply -f command:

+
```
+$ kubectl apply -f <path to output-directory>/weblogic-domains/<domainUID>/domain.yaml
+```
+
+

The default domain created by the script has the following characteristics:

+
    +
  • An Administration Server named AdminServer listening on port 7001.
  • +
  • A configured cluster named soa_cluster of size 5.
  • +
  • Managed Server, named soa_server1 listening on port 8001.
  • +
  • Log files that are located in /shared/logs/<domainUID>.
  • +
  • SOA Infra, SOA Composer, and WorklistApp applications deployed.
  • +
+ +

Refer to the troubleshooting page to troubleshoot issues during the domain creation.

+
+ +

Verify the results

+

The create domain script verifies that the domain was created, and reports failure if there is an error. +However, it may be desirable to manually verify the domain, even if just to gain familiarity with the +various Kubernetes objects that were created by the script.

+
Generated YAML files with the default inputs
+ +
+
+ + + + + Click here to see sample content of the generated `domain.yaml` for `soaosb` domainType that creates SOA and Oracle Service Bus clusters. + + +
+ +
+

Verify the domain

+

To confirm that the domain was created, enter the following command:

+
$ kubectl describe domain DOMAINUID -n NAMESPACE
+

Replace DOMAINUID with the domainUID and NAMESPACE with the actual namespace.

+ +
+
+ + + + + Click here to see a sample domain description. + + +
+ +
+

In the Status section of the output, the available servers and clusters are listed. +Note that if this command is issued very soon after the script finishes, there may be +no servers available yet, or perhaps only the Administration Server but no Managed Servers. +The operator will start up the Administration Server first and wait for it to become ready +before starting the Managed Servers.

+

Verify the pods

+

Enter the following command to see the pods running the servers:

+
$ kubectl get pods -n NAMESPACE
+

Here is an example of the output of this command. You can verify that an Administration Server and a Managed Server for each cluster (SOA and Oracle Service Bus) are running for soaosb domain type.

+
$ kubectl get pods -n soans
+NAME                                                READY   STATUS      RESTARTS   AGE
+soainfra-adminserver                         1/1     Running     0          53m
+soainfra-osb-server1                         1/1     Running     0          50m
+soainfra-soa-server1                         1/1     Running     0          50m
+

Verify the services

+

Enter the following command to see the services for the domain:

+
$ kubectl get services -n NAMESPACE
+

Here is an example of the output of this command. You can verify that services for Administration Server and Managed Servers (for SOA and Oracle Service Bus clusters) are created for soaosb domain type.

+ +
+
+ + + + + Click here to see a sample list of services. + + +
+ +
+ + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.2/soa-domains/installguide/create-soa-domains/index.xml b/docs/23.4.2/soa-domains/installguide/create-soa-domains/index.xml new file mode 100644 index 000000000..a70665187 --- /dev/null +++ b/docs/23.4.2/soa-domains/installguide/create-soa-domains/index.xml @@ -0,0 +1,15 @@ + + + + Create Oracle SOA Suite domains on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.2/soa-domains/installguide/create-soa-domains/ + Recent content in Create Oracle SOA Suite domains on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + Thu, 18 Apr 2019 07:32:31 -0500 + + + + + + \ No newline at end of file diff --git a/docs/23.4.2/soa-domains/installguide/index.html b/docs/23.4.2/soa-domains/installguide/index.html new file mode 100644 index 000000000..f45b10fcf --- /dev/null +++ b/docs/23.4.2/soa-domains/installguide/index.html @@ -0,0 +1,5600 @@ + + + + + + + + + + + + Install Guide :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + Install Guide +

+ + + + + + + +

Install the WebLogic Kubernetes Operator and prepare and deploy Oracle SOA Suite domains.

+ + + + + + + +
    + + + + + + + + + + + + + + + + + + + +

    +Requirements and pricing +

    + + + + + +

    Understand the system requirements, limitations, licensing, and pricing for deploying and running Oracle SOA Suite domains with the WebLogic Kubernetes Operator, including the SOA cluster sizing.

    + + + + + + + + + + + + +

    +Prepare your environment +

    + + + + + +

    Prepare for creating Oracle SOA Suite domains, including required secrets creation, persistent volume and volume claim creation, database creation, and database schema creation.

    + + + + + + + + + + + + +

    +Create Oracle SOA Suite domains +

    + + + + + +

    Create an Oracle SOA Suite domain home on an existing PV or PVC, and create the domain resource YAML file for deploying the generated Oracle SOA Suite domain.

    + + + + + + + + +
+ + + + + + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.2/soa-domains/installguide/index.xml b/docs/23.4.2/soa-domains/installguide/index.xml new file mode 100644 index 000000000..fea4fed93 --- /dev/null +++ b/docs/23.4.2/soa-domains/installguide/index.xml @@ -0,0 +1,15 @@ + + + + Install Guide on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.2/soa-domains/installguide/ + Recent content in Install Guide on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + Thu, 18 Jun 2020 15:27:48 -0500 + + + + + + \ No newline at end of file diff --git a/docs/23.4.2/soa-domains/installguide/prepare-your-environment/index.html b/docs/23.4.2/soa-domains/installguide/prepare-your-environment/index.html new file mode 100644 index 000000000..89a4c0c38 --- /dev/null +++ b/docs/23.4.2/soa-domains/installguide/prepare-your-environment/index.html @@ -0,0 +1,5953 @@ + + + + + + + + + + + + Prepare your environment :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + +
+ +
+ +
+ +

+ + Prepare your environment +

+ + + + + + + +

To prepare your Oracle SOA Suite in Kubernetes environment, complete the following steps.

+ +

Refer to the troubleshooting page to troubleshoot issues during the domain deployment process.

+
+ +
    +
  1. +

    Set up your Kubernetes cluster

    +
  2. +
  3. +

    Install Helm

    +
  4. +
  5. +

    Get dependent images

    +
  6. +
  7. +

    Set up the code repository to deploy Oracle SOA Suite domains

    +
  8. +
  9. +

    Obtain the Oracle SOA Suite Docker image

    +
  10. +
  11. +

    Install the WebLogic Kubernetes Operator

    +
  12. +
  13. +

    Prepare the environment for Oracle SOA Suite domains

    +

    a. Create a namespace for an Oracle SOA Suite domain

    +

    b. Create a persistent storage for an Oracle SOA Suite domain

    +

    c. Create a Kubernetes secret with domain credentials

    +

    d. Create a Kubernetes secret with the RCU credentials

    +

    e. Configure access to your database

    +

    f. Run the Repository Creation Utility to set up your database schemas

    +
  14. +
  15. +

    Create an Oracle SOA Suite domain

    +
  16. +
+

Set up your Kubernetes cluster

+

Refer the official Kubernetes set up documentation to set up a production grade Kubernetes cluster.

+

Install Helm

+

The operator uses Helm to create and deploy the necessary resources and then run the operator in a Kubernetes cluster. For Helm installation and usage information, see here.

+

Get dependent images

+

Obtain dependent images and add them to your local registry.

+
    +
  1. +

    For first time users, to pull an image from the Oracle Container Registry, navigate to https://container-registry.oracle.com and log in using the Oracle Single Sign-On (SSO) authentication service. If you do not already have an SSO account, you can create an Oracle Account here.

    +

    Use the web interface to accept the Oracle Standard Terms and Restrictions for the Oracle software images that you intend to deploy. Your acceptance of these terms are stored in a database that links the software images to your Oracle Single Sign-On login credentials.

    +

    Log in to the Oracle Container Registry (container-registry.oracle.com) from your Docker client:

    +
    $ docker login container-registry.oracle.com
    +
  2. +
  3. +

    Pull the operator image:

    +
    $ docker pull ghcr.io/oracle/weblogic-kubernetes-operator:4.1.4
    +
  4. +
+

Set up the code repository to deploy Oracle SOA Suite domains

+

Oracle SOA Suite domain deployment on Kubernetes leverages the WebLogic Kubernetes Operator infrastructure. To deploy an Oracle SOA Suite domain, you must set up the deployment scripts.

+
    +
  1. +

    Create a working directory to set up the source code:

    +
    $ mkdir $HOME/soa_23.4.2
    +$ cd $HOME/soa_23.4.2
    +
  2. +
  3. +

    Download the WebLogic Kubernetes Operator source code and Oracle SOA Suite Kubernetes deployment scripts from the SOA repository. Required artifacts are available at OracleSOASuite/kubernetes.

    +
    $ git clone https://github.com/oracle/fmw-kubernetes.git
    +$ export WORKDIR=$HOME/soa_23.4.2/fmw-kubernetes/OracleSOASuite/kubernetes
    +
  4. +
+

Obtain the Oracle SOA Suite Docker image

+

The Oracle SOA Suite image with the latest bundle patch and required interim patches is prebuilt by Oracle and includes Oracle SOA Suite 12.2.1.4.0, the April Patch Set Update (PSU), and other fixes released with the Critical Patch Update (CPU) program. The Oracle Container Registry hosts container images based on both Oracle Linux 7 (ol7) and 8 (ol8). These are the only images supported for production deployments. Obtain the Oracle SOA Suite images using either of the following methods:

+
    +
  1. +

    Download from Oracle Container Registry:

    +
      +
    • +

      Log in to Oracle Container Registry, navigate to Middleware > soasuite_cpu and accept the license agreement if not already done.

      +
    • +
    • +

      Log in to the Oracle Container Registry (container-registry.oracle.com) from your Docker client:

      +
      $ docker login container-registry.oracle.com
      +
    • +
    • +

      Pull the Oracle Linux 7 or 8 based images:

      +

      For example:

      +
      $ docker pull container-registry.oracle.com/middleware/soasuite_cpu:12.2.1.4-jdk8-<ol7 or ol8>-<TAG>
      +
    • +
    +
  2. +
  3. +

    Download from My Oracle Support:

    +
      +
    • +

      Download patch 35908803 for Oracle Linux 7 based container image or 35915091 for Oracle Linux 8 based container image from My Oracle Support (MOS).

      +
    • +
    • +

      Unzip the downloaded patch zip file.

      +
    • +
    • +

      Load the image archive using the docker load command.

      +

      For example:

      +
      $ docker load < soasuite-12.2.1.4-jdk8-<ol7 or ol8>-<TAG>.tar
      +  Loaded image: oracle/soasuite:12.2.1.4-jdk8-<ol7 or ol8>-<TAG>
      +$
      +
    • +
    • +

      Run the docker inspect command to verify that the downloaded image is the latest released image. The value of label com.oracle.weblogic.imagetool.buildid must match to 40af8c36-79ce-466e-8915-483547b6aa4b for Oracle Linux 7 based container image and abe2fde0-4f44-422b-a50f-23fdd257bcc1 for Oracle Linux 8 based container image.

      +

      For example:

      +
      $ docker inspect --format='{{ index .Config.Labels "com.oracle.weblogic.imagetool.buildid" }}' oracle/soasuite:12.2.1.4-jdk8-ol7-231016.113116
      +    40af8c36-79ce-466e-8915-483547b6aa4b
      +$
      +
    • +
    +
  4. +
+

If you want to build and use an Oracle SOA Suite Docker image with any additional bundle patch or interim patches that are not part of the image obtained from My Oracle Support, then follow these steps to create the image.

+
+

Note: The default Oracle SOA Suite image name used for Oracle SOA Suite domains deployment is soasuite:12.2.1.4. The image obtained must be tagged as soasuite:12.2.1.4 using the docker tag command. If you want to use a different name for the image, make sure to update the new image tag name in the create-domain-inputs.yaml file and also in other instances where the soasuite:12.2.1.4 image name is used.

+
+

Install the WebLogic Kubernetes Operator

+

The WebLogic Kubernetes Operator supports the deployment of Oracle SOA Suite domains in the Kubernetes environment. Follow the steps in this document to install the operator.

+
+

Note: Optionally, you can follow these steps to send the contents of the operator’s logs to Elasticsearch.

+
+

In the following example commands to install the WebLogic Kubernetes Operator, opns is the namespace and op-sa is the service account created for the operator:

+
$ kubectl create namespace opns
+$ kubectl create serviceaccount -n opns  op-sa
+$ cd ${WORKDIR}
+$ helm install weblogic-kubernetes-operator charts/weblogic-operator  --namespace opns  --set serviceAccount=op-sa --set "javaLoggingLevel=FINE" --wait
+

This Helm release deploys the operator with the default behavior of managing Oracle SOA Suite domains in all Kubernetes namespaces with the label weblogic-operator=enabled.

+

Prepare the environment for Oracle SOA Suite domains

+

Create a namespace for an Oracle SOA Suite domain

+

Create a Kubernetes namespace (for example, soans) for the domain unless you intend to use the default namespace. Label the namespace with weblogic-operator=enabled to manage the domain. Use the new namespace in the remaining steps in this section. +For details, see Prepare to run a domain.

+
$ kubectl create namespace soans
+$ kubectl label namespace soans weblogic-operator=enabled
+

Create a persistent storage for an Oracle SOA Suite domain

+

In the Kubernetes namespace you created, create the PV and PVC for the domain by running the create-pv-pvc.sh script. Follow the instructions for using the script to create a dedicated PV and PVC for the Oracle SOA Suite domain.

+
    +
  • +

    Review the configuration parameters for PV creation here. Based on your requirements, update the values in the create-pv-pvc-inputs.yaml file located at ${WORKDIR}/create-weblogic-domain-pv-pvc/. Sample configuration parameter values for an Oracle SOA Suite domain are:

    +
      +
    • baseName: domain
    • +
    • domainUID: soainfra
    • +
    • namespace: soans
    • +
    • weblogicDomainStorageType: HOST_PATH
    • +
    • weblogicDomainStoragePath: /scratch/k8s_dir/SOA
    • +
    +
  • +
  • +

    Ensure that the path for the weblogicDomainStoragePath property exists and have the ownership for 1000:0. If not, you need to create it as follows:

    +
    $ sudo mkdir /scratch/k8s_dir/SOA
    +$ sudo chown -R 1000:0 /scratch/k8s_dir/SOA
    +
  • +
  • +

    Run the create-pv-pvc.sh script:

    +
    $ cd ${WORKDIR}/create-weblogic-domain-pv-pvc
    +$ ./create-pv-pvc.sh -i create-pv-pvc-inputs.yaml -o output_soainfra
    +
  • +
  • +

    The create-pv-pvc.sh script will create a subdirectory pv-pvcs under the given /path/to/output-directory directory and creates two YAML configuration files for PV and PVC. Apply these two YAML files to create the PV and PVC Kubernetes resources using the kubectl create -f command:

    +
    $ kubectl create -f output_soainfra/pv-pvcs/soainfra-domain-pv.yaml
    +$ kubectl create -f output_soainfra/pv-pvcs/soainfra-domain-pvc.yaml
    +
  • +
+

Create a Kubernetes secret with domain credentials

+

Create the Kubernetes secrets username and password of the administrative account in the same Kubernetes namespace as the domain:

+
  $ cd ${WORKDIR}/create-weblogic-domain-credentials
+  $ ./create-weblogic-credentials.sh -u weblogic -p Welcome1 -n soans -d soainfra -s soainfra-domain-credentials
+

For more details, see this document.

+

You can check the secret with the kubectl get secret command.

+

For example:

+ +
+
+ + + + + Click here to see the sample secret description. + + +
+ +
+

Create a Kubernetes secret with the RCU credentials

+

You also need to create a Kubernetes secret containing the credentials for the database schemas. +When you create your domain, it will obtain the RCU credentials +from this secret.

+

Use the provided sample script to create the secret:

+
$ cd ${WORKDIR}/create-rcu-credentials
+$ ./create-rcu-credentials.sh \
+  -u SOA1 \
+  -p Oradoc_db1 \
+  -a sys \
+  -q Oradoc_db1 \
+  -d soainfra \
+  -n soans \
+  -s soainfra-rcu-credentials
+

The parameter values are:

+
    +
  • -u username for schema owner (regular user), required.
  • +
  • -p password for schema owner (regular user), required.
  • +
  • -a username for SYSDBA user, required.
  • +
  • -q password for SYSDBA user, required.
  • +
  • -d domainUID. Example: soainfra
  • +
  • -n namespace. Example: soans
  • +
  • -s secretName. Example: soainfra-rcu-credentials
  • +
+

You can confirm the secret was created as expected with the kubectl get secret command.

+

For example:

+ +
+
+ + + + + Click here to see the sample secret description. + + +
+ +
+

Configure access to your database

+

Oracle SOA Suite domains require a database with the necessary schemas installed in them. The Repository Creation Utility (RCU) allows you to create +those schemas. You must set up the database before you create your domain. There are no additional requirements added by running Oracle SOA Suite in Kubernetes; the same existing requirements apply.

+

For production deployments, you must set up and use the standalone (non-container) based database running outside of Kubernetes.

+

Before creating a domain, you will need to set up the necessary schemas in your database.

+

Run the Repository Creation Utility to set up your database schemas

+
Create schemas
+

The RCU pod requires that you create a secret in the same namespace as the RCU pod that contains the database’s SYSDBA username and password in its sys_username and sys_password fields, and also contains a password of your choice for RCU schemas in its password field.

+

For example:

+
$ kubectl -n default create secret generic oracle-rcu-secret \
+  --from-literal='sys_username=sys' \
+  --from-literal='sys_password=MY_SYS_PASSWORD' \
+  --from-literal='password=MY_RCU_SCHEMA_PASSWORD'
+

To create the database schemas for Oracle SOA Suite, run the create-rcu-schema.sh script.

+

For example:

+
$ cd ${WORKDIR}/create-rcu-schema
+
+$ ./create-rcu-schema.sh -h
+usage: ./create-rcu-schema.sh -s <schemaPrefix> [-t <schemaType>] [-d <dburl>] [-n <namespace>] [-c <credentialsSecretName>] [-p <docker-store>] [-i <image>] [-u <imagePullPolicy>] [-o <rcuOutputDir>] [-r <customVariables>] [-l <timeoutLimit>] [-h]
+  -s RCU Schema Prefix (required)
+  -t RCU Schema Type (optional)
+      (supported values: osb,soa,soaosb)
+  -d RCU Oracle Database URL (optional)
+      (default: oracle-db.default.svc.cluster.local:1521/devpdb.k8s)
+  -n Namespace for RCU pod (optional)
+      (default: default)
+  -c Name of credentials secret (optional)
+       (default: oracle-rcu-secret)
+       Must contain SYSDBA username at key 'sys_username',
+       SYSDBA password at key 'sys_password',
+       and RCU schema owner password at key 'password'.
+  -p OracleSOASuite ImagePullSecret (optional)
+      (default: none)
+  -i OracleSOASuite Image (optional)
+      (default: soasuite:12.2.1.4)
+  -u OracleSOASuite ImagePullPolicy (optional)
+      (default: IfNotPresent)
+  -o Output directory for the generated YAML file (optional)
+      (default: rcuoutput)
+  -r Comma-separated custom variables in the format variablename=value (optional)
+      (default: none)
+  -l Timeout limit in seconds (optional)
+      (default: 300)
+  -h Help
+NOTE: The c, p, i, u, and o arguments are ignored if an RCU pod is already running in the namespace.
+
+
+$ ./create-rcu-schema.sh \
+  -s SOA1 \
+  -t soaosb \
+  -d oracle-db.default.svc.cluster.local:1521/devpdb.k8s \
+  -n default \
+  -c oracle-rcu-secret \
+  -i soasuite:12.2.1.4 \
+  -r SOA_PROFILE_TYPE=SMALL,HEALTHCARE_INTEGRATION=NO
+
+

For Oracle SOA Suite domains, the create-rcu-schema.sh script supports:

+
    +
  • domain types: soa, osb, and soaosb. +You must specify one of these using the -t flag.
  • +
  • For Oracle SOA Suite you must specify the Oracle SOA schema profile type using the -r flag. For example, -r SOA_PROFILE_TYPE=SMALL. Supported values for SOA_PROFILE_TYPE are SMALL, MED, and LARGE.
  • +
+
+

Note: To use the LARGE schema profile type, make sure that the partitioning feature is enabled in the Oracle Database.

+
+

Make sure that you maintain the association between the database schemas and the +matching domain just like you did in a non-Kubernetes environment. There is no specific functionality provided to help with this.

+
Drop schemas
+

If you want to drop a schema, you can use the drop-rcu-schema.sh script.

+

For example:

+
$ cd ${WORKDIR}/create-rcu-schema
+
+$ ./drop-rcu-schema.sh -h
+usage: ./drop-rcu-schema.sh -s <schemaPrefix> [-t <schemaType>] [-d <dburl>] [-n <namespace>] [-c <credentialsSecretName>] [-p <docker-store>] [-i <image>] [-u <imagePullPolicy>] [-o <rcuOutputDir>] [-r <customVariables>] [-h]
+  -s RCU Schema Prefix (required)
+  -t RCU Schema Type (optional)
+      (supported values: osb,soa,soaosb)
+  -d RCU Oracle Database URL (optional)
+      (default: oracle-db.default.svc.cluster.local:1521/devpdb.k8s)
+  -n Namespace for RCU pod (optional)
+      (default: default)
+  -c Name of credentials secret (optional)
+       (default: oracle-rcu-secret)
+       Must contain SYSDBA username at key 'sys_username',
+       SYSDBA password at key 'sys_password',
+       and RCU schema owner password at key 'password'.
+  -p OracleSOASuite ImagePullSecret (optional)
+      (default: none)
+  -i OracleSOASuite Image (optional)
+      (default: soasuite:12.2.1.4)
+  -u OracleSOASuite ImagePullPolicy (optional)
+      (default: IfNotPresent)
+  -o Output directory for the generated YAML file (optional)
+      (default: rcuoutput)
+  -r Comma-separated custom variables in the format variablename=value (optional)
+      (default: none)
+  -h Help
+NOTE: The c, p, i, u, and o arguments are ignored if an RCU pod is already running in the namespace.
+
+
+$ ./drop-rcu-schema.sh \
+  -s SOA1 \
+  -t soaosb \
+  -d oracle-db.default.svc.cluster.local:1521/devpdb.k8s \
+  -n default \
+  -c oracle-rcu-secret \
+  -r SOA_PROFILE_TYPE=SMALL,HEALTHCARE_INTEGRATION=NO
+
+

For Oracle SOA Suite domains, the drop-rcu-schema.sh script supports:

+
    +
  • Domain types: soa, osb, and soaosb. +You must specify one of these using the -t flag.
  • +
  • For Oracle SOA Suite, you must specify the Oracle SOA schema profile type using the -r flag. For example, -r SOA_PROFILE_TYPE=SMALL. Supported values for SOA_PROFILE_TYPE are SMALL, MED, and LARGE.
  • +
+

Create an Oracle SOA Suite domain

+

Now that you have your Docker images and you have created your RCU schemas, you are ready to create your domain. To continue, follow the instructions in Create Oracle SOA Suite domains.

+ + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.2/soa-domains/installguide/prepare-your-environment/index.xml b/docs/23.4.2/soa-domains/installguide/prepare-your-environment/index.xml new file mode 100644 index 000000000..eb3d75bcb --- /dev/null +++ b/docs/23.4.2/soa-domains/installguide/prepare-your-environment/index.xml @@ -0,0 +1,15 @@ + + + + Prepare your environment on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.2/soa-domains/installguide/prepare-your-environment/ + Recent content in Prepare your environment on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + Thu, 18 Apr 2019 06:46:23 -0500 + + + + + + \ No newline at end of file diff --git a/docs/23.4.2/soa-domains/installguide/prerequisites/index.html b/docs/23.4.2/soa-domains/installguide/prerequisites/index.html new file mode 100644 index 000000000..32cd09a8e --- /dev/null +++ b/docs/23.4.2/soa-domains/installguide/prerequisites/index.html @@ -0,0 +1,5610 @@ + + + + + + + + + + + + Requirements and pricing :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + Requirements and pricing +

+ + + + + + + +

This section provides information about the system requirements, limitations, licensing, and pricing for deploying and running Oracle SOA Suite domains with the WebLogic Kubernetes Operator.

+

System requirements for Oracle SOA Suite domains

+

Release 23.4.2 has the following system requirements:

+
    +
  • Kubernetes 1.24.0+, 1.25.0+, 1.26.2+, and 1.27.2+ (check with kubectl version).
  • +
  • Docker 19.03.11+ (check with docker version) or CRI-O 1.20.2+ (check with crictl version | grep RuntimeVersion).
  • +
  • Flannel networking v0.13.0-amd64 or later (check with docker images | grep flannel), Calico networking v3.16.1 or later.
  • +
  • Helm 3.10.2+ (check with helm version --client --short).
  • +
  • WebLogic Kubernetes Operator 4.1.4 (see the operator releases 4.1.4 pages).
  • +
  • You must have the cluster-admin role to install the operator. The operator does not need the cluster-admin role at runtime. +For more information, see the role-based access control (RBAC) documentation.
  • +
  • We do not currently support running SOA in non-Linux containers.
  • +
  • Container images based on Oracle Linux 8 are now supported. My Oracle Support and the Oracle Container Registry host container images based on both Oracle Linux 7 and 8.
  • +
  • Additionally, see the Oracle SOA Suite documentation for other requirements, such as database version.
  • +
+

See here for resource sizing information for Oracle SOA Suite domains set up on a Kubernetes cluster.

+

Limitations

+

Compared to running a WebLogic Server domain in Kubernetes using the operator, the +following limitations currently exist for Oracle SOA Suite domains:

+
    +
  • In this release, Oracle SOA Suite domains are supported using the +domain on a persistent volume model only, where the domain home is located in a persistent volume (PV).
  • +
  • The “domain in image” and “model in image” models are not supported. Also, “WebLogic Deploy Tooling (WDT)” based deployments are currently not supported.
  • +
  • Only configured clusters are supported. Dynamic clusters are not supported for +Oracle SOA Suite domains. Note that you can still use all of the scaling features, +but you need to define the maximum size of your cluster at domain creation time. Mixed clusters (configured servers targeted to a dynamic cluster) are not supported.
  • +
  • The WebLogic Logging Exporter project has been archived. Users are encouraged to use Fluentd or Logstash.
  • +
  • The WebLogic Monitoring Exporter currently supports WebLogic MBean trees only. Support for JRF and Oracle SOA Suite MBeans is not available. Also, a metrics dashboard specific to Oracle SOA Suite is not available. Instead, use the WebLogic Server dashboard to monitor the Oracle SOA Suite server metrics in Grafana.
  • +
  • Some features such as multicast, multitenancy, production redeployment, and Node Manager (although it is used internally for the liveness probe and to start WebLogic Server instances) are not supported in this release.
  • +
  • Features such as Java Messaging Service whole server migration and consensus leasing are not supported in this release.
  • +
  • Maximum availability architecture (Oracle SOA Suite EDG setup) is available for preview.
  • +
  • Enabling or disabling the memory resiliency for Oracle Service Bus using the Enterprise Manager Console is not supported in this release.
  • +
  • Zero downtime upgrade (ZDT) of the domain is not supported.
  • +
+

For up-to-date information about the features of WebLogic Server that are supported in Kubernetes environments, see My Oracle Support Doc ID 2349228.1.

+

Pricing and licensing

+

The WebLogic Kubernetes Operator and Oracle Linux are open source and free; WebLogic Server requires licenses in any environment. All WebLogic Server licenses are suitable for deploying WebLogic to containers and Kubernetes, including free single desktop Oracle Technology Network (OTN) developer licenses. See the following sections for more detailed information:

+ +

Oracle SOA Suite

+

Oracle SOA Suite is licensed as an option to Oracle WebLogic Suite. Valid licenses are needed in at least one of the following combinations:

+
    +
  • WebLogic Suite and Oracle SOA Suite
  • +
  • WebLogic Suite and Oracle Service Bus
  • +
  • WebLogic Suite and Oracle BPEL Engine
  • +
+

For more information, see the Fusion Middleware Licensing Information User Manual - Oracle SOA Suite and the following sections.

+

Oracle Linux

+

Oracle Linux is under open source license and is completely free to download and use.

+

Note that Oracle SOA Suite licenses that include support do not include customer entitlements for direct access to Oracle Linux support or Unbreakable Linux Network (to directly access the standalone Oracle Linux patches). The latest Oracle Linux patches are included with the latest Oracle SOA Suite images.

+

Oracle Java

+

Oracle support for Java is included with Oracle SOA Suite licenses when Java is used for running WebLogic and Coherence servers or clients.

+

For more information, see the Fusion Middleware Licensing Information User Manual.

+

Oracle SOA Suite images

+

Oracle provides two different types of Oracle SOA Suite images:

+
    +
  • +

    Critical Patch Update (CPU) images: +Images with the latest Oracle SOA Suite, Fusion Middleware Infrastructure, Coherence PSUs, and other fixes released by the Critical Patch Update (CPU) program. CPU images are intended for production use.

    +
  • +
  • +

    General Availability (GA) images: +Images that are not intended for production use and do not include Oracle SOA Suite, WebLogic, Fusion Middleware Infrastructure, or Coherence PSUs.

    +
  • +
+

All Oracle SOA Suite licenses, including free Oracle Technology Network (OTN) developer licenses, include access to the latest General Availability (GA) Oracle SOA Suite images, which bundle Java SE.

+

Customers with access to Oracle SOA Suite support additionally have:

+
    +
  • Access to Critical Patch Update (CPU) Oracle SOA Suite images, which bundle Java SE.
  • +
  • Access to Oracle SOA Suite patches.
  • +
  • Oracle support for Oracle SOA Suite images.
  • +
+

WebLogic Kubernetes Operator

+

The WebLogic Kubernetes Operator is open source and free, licensed under the Universal Permissive license (UPL), Version 1.0. For support details, see Get help.

+

Additional references

+ + + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.2/soa-domains/installguide/prerequisites/index.xml b/docs/23.4.2/soa-domains/installguide/prerequisites/index.xml new file mode 100644 index 000000000..3787be5f4 --- /dev/null +++ b/docs/23.4.2/soa-domains/installguide/prerequisites/index.xml @@ -0,0 +1,15 @@ + + + + Requirements and pricing on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.2/soa-domains/installguide/prerequisites/ + Recent content in Requirements and pricing on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + Thu, 18 Apr 2019 07:32:31 -0500 + + + + + + \ No newline at end of file diff --git a/docs/23.4.2/soa-domains/patch_and_upgrade/index.html b/docs/23.4.2/soa-domains/patch_and_upgrade/index.html new file mode 100644 index 000000000..7d3a0f060 --- /dev/null +++ b/docs/23.4.2/soa-domains/patch_and_upgrade/index.html @@ -0,0 +1,5598 @@ + + + + + + + + + + + + Patch and upgrade :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + Patch and upgrade +

+ + + + + + + +

Patch an existing Oracle SOA Suite image or upgrade the infrastructure, such as upgrading the underlying Kubernetes cluster to a new release and upgrading the WebLogic Kubernetes Operator release.

+ + + + + + + +
    + + + + + + + + + + + + + + + + + +

    +Patch an image +

    + + + + + +

    Create a patched Oracle SOA Suite image using the WebLogic Image Tool.

    + + + + + + + + + + + + +

    +Upgrade an operator release +

    + + + + + +

    Upgrade the WebLogic Kubernetes Operator release to a newer version.

    + + + + + + + + + + + + +

    +Upgrade a Kubernetes cluster +

    + + + + + +

    Upgrade the underlying Kubernetes cluster version in a running SOA Kubernetes environment.

    + + + + + + + + +
+ + + + + + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.2/soa-domains/patch_and_upgrade/index.xml b/docs/23.4.2/soa-domains/patch_and_upgrade/index.xml new file mode 100644 index 000000000..6982c57bf --- /dev/null +++ b/docs/23.4.2/soa-domains/patch_and_upgrade/index.xml @@ -0,0 +1,46 @@ + + + + Patch and upgrade on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.2/soa-domains/patch_and_upgrade/ + Recent content in Patch and upgrade on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + Thu, 18 Jun 2020 15:27:38 -0500 + + + + + + Patch an image + /fmw-kubernetes/23.4.2/soa-domains/patch_and_upgrade/patch-an-image/ + Fri, 22 Feb 2019 15:44:42 -0500 + + /fmw-kubernetes/23.4.2/soa-domains/patch_and_upgrade/patch-an-image/ + Oracle releases Oracle SOA Suite images regularly with the latest bundle and recommended interim patches in My Oracle Support (MOS). However, if you need to create images with new bundle and interim patches, you can build these images using the WebLogic Image Tool. +If you have access to the Oracle SOA Suite patches, you can patch an existing Oracle SOA Suite image with a bundle patch and interim patches. Oracle recommends that you use the WebLogic Image Tool to patch the Oracle SOA Suite image. + + + + Upgrade an operator release + /fmw-kubernetes/23.4.2/soa-domains/patch_and_upgrade/upgrade-operator-release/ + Fri, 22 Feb 2019 15:44:42 -0500 + + /fmw-kubernetes/23.4.2/soa-domains/patch_and_upgrade/upgrade-operator-release/ + To upgrade the WebLogic Kubernetes operator, use the helm upgrade command with new Helm chart and operator image. See the steps here to pull the operator image and set up the Oracle SOA Suite repository that contains the operator chart. To upgrade the operator run the following command: +$ cd ${WORKDIR} $ helm upgrade \ --reuse-values \ --set image=ghcr.io/oracle/weblogic-kubernetes-operator:4.1.4 \ --namespace weblogic-operator-namespace \ --wait \ weblogic-kubernetes-operator \ charts/weblogic-operator Note: When the WebLogic Kubernetes Operator is upgraded from release version 3. + + + + Upgrade a Kubernetes cluster + /fmw-kubernetes/23.4.2/soa-domains/patch_and_upgrade/upgrade-k8s-cluster/ + Fri, 22 Feb 2019 15:44:42 -0500 + + /fmw-kubernetes/23.4.2/soa-domains/patch_and_upgrade/upgrade-k8s-cluster/ + These instructions describe how to upgrade a Kubernetes cluster created using kubeadm on which an Oracle SOA Suite domain is deployed. A rolling upgrade approach is used to upgrade nodes (master and worker) of the Kubernetes cluster. +It is expected that there will be a down time during the upgrade of the Kubernetes cluster as the nodes need to be drained as part of the upgrade process. + Prerequisites Review Prerequisites and ensure that your Kubernetes cluster is ready for upgrade. + + + + \ No newline at end of file diff --git a/docs/23.4.2/soa-domains/patch_and_upgrade/patch-an-image/index.html b/docs/23.4.2/soa-domains/patch_and_upgrade/patch-an-image/index.html new file mode 100644 index 000000000..5e8d16ee8 --- /dev/null +++ b/docs/23.4.2/soa-domains/patch_and_upgrade/patch-an-image/index.html @@ -0,0 +1,5648 @@ + + + + + + + + + + + + Patch an image :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + Patch an image +

+ + + + + + +

Oracle releases Oracle SOA Suite images regularly with the latest bundle and recommended interim patches in My Oracle Support (MOS). However, if you need to create images with new bundle and interim patches, you can build these images using the WebLogic Image Tool.

+

If you have access to the Oracle SOA Suite patches, you can patch an existing Oracle SOA Suite image with a bundle patch and interim patches. Oracle recommends that you use the WebLogic Image Tool to patch the Oracle SOA Suite image.

+
+

Recommendations:

+
    +
  • Use the WebLogic Image Tool create feature for patching the Oracle SOA Suite Docker image with a bundle patch and multiple interim patches. This is the recommended approach because it optimizes the size of the image.
  • +
  • Use the WebLogic Image Tool update feature for patching the Oracle SOA Suite Docker image with a single interim patch. Note that the patched image size may increase considerably due to additional image layers introduced by the patch application tool.
  • +
+
+

Apply the patched Oracle SOA Suite image

+

To update an Oracle SOA Suite domain with a patched image, first make sure the patched image is pulled or created and available on the nodes in your Kubernetes cluster. +Once the patched image is available, you can follow these steps to update the Oracle SOA Suite domain with a patched image:

+ +

Stop all servers

+
+

Note: The following steps are applicable only for non-Zero Downtime Patching. For Zero Downtime Patching, go to Address post-installation requirements.

+
+

Before applying the patch, stop all servers in the domain:

+
    +
  1. +

    In the domain.yaml configuration file, update the spec.serverStartPolicy field value to NEVER (for Operator 3.x) or Never (for Operator 4.x).

    +
  2. +
  3. +

    Shut down the domain (stop all servers) by applying the updated domain.yaml file:

    +
    $ kubectl apply -f domain.yaml
    +
  4. +
+

Update user permissions of the domain PV storage

+

The Oracle SOA Suite image for release 23.4.2 has an oracle user with UID 1000, with the default group set to root. Before applying the patched image, update the user permissions of the domain persistent volume (PV) to set the group to root:

+
$ sudo chown -R 1000:0 /scratch/k8s_dir/SOA
+

Address post-installation requirements

+

If the patches in the patched Oracle SOA Suite image have any post-installation steps, follow these steps:

+ +
Create a Kubernetes pod with domain home access
+
    +
  1. +

    Get domain home persistence volume claim details for the Oracle SOA Suite domain.

    +

    For example, to list the persistent volume claim details in the namespace soans:

    +
    $ kubectl get pvc -n soans   
    +

    Sample output showing the persistent volume claim is soainfra-domain-pvc:

    +
    NAME                  STATUS   VOLUME               CAPACITY   ACCESS MODES   STORAGECLASS                    AGE
    +soainfra-domain-pvc   Bound    soainfra-domain-pv   10Gi       RWX            soainfra-domain-storage-class   xxd
    +
  2. +
  3. +

    Create a YAML soapostinstall.yaml using the domain home persistence volume claim.

    +

    For example, using soainfra-domain-pvc per the sample output:

    +
    +

    Note: Replace soasuite:12.2.1.4-30761841 with the patched image in the following sample YAML:

    +
    +
    apiVersion: v1
    +kind: Pod
    +metadata:
    +  labels:
    +     run: soapostinstall
    +  name: soapostinstall
    +  namespace: soans
    +spec:
    + containers:
    + - image: soasuite:12.2.1.4-30761841
    +   name: soapostinstall
    +   command: ["/bin/bash", "-c", "sleep infinity"]
    +   imagePullPolicy: IfNotPresent
    +   volumeMounts:
    +   - name: soainfra-domain-storage-volume
    +     mountPath: /u01/oracle/user_projects
    + volumes:
    + - name: soainfra-domain-storage-volume
    +   persistentVolumeClaim:
    +    claimName: soainfra-domain-pvc
    +
  4. +
  5. +

    Apply the YAML to create the Kubernetes pod:

    +
    $ kubectl apply -f soapostinstall.yaml
    +
  6. +
+
Perform post-installation steps
+

If you need to perform any post-installation steps on the domain home:

+
    +
  1. +

    Start a bash shell in the soapostinstall pod:

    +
    $ kubectl exec -it -n soans soapostinstall -- bash
    +

    This opens a bash shell in the running soapostinstall pod:

    +
    [oracle@soapostinstall oracle]$
    +
  2. +
  3. +

    Use the bash shell of the soapostinstall pod and perform the required steps on the domain home.

    +
  4. +
  5. +

    After successful completion of the post-installation steps, you can delete the soapostinstall pod:

    +
    $ kubectl delete -f  soapostinstall.yaml
    +
  6. +
+

Apply the patched image

+

After completing the required SOA schema upgrade and post-installation steps, start up the domain:

+
    +
  1. +

    In the domain.yaml configuration file, update the image field value with the patched image:
    +For example:

    +
      image: soasuite:12.2.1.4-30761841
    +
  2. +
  3. +

    In case of non-Zero Downtime Patching, update the spec.serverStartPolicy field value to IF_NEEDED (for Operator 3.x) or IfNeeded (for Operator 4.x) in domain.yaml.

    +
  4. +
  5. +

    Apply the updated domain.yaml configuration file to start up the domain.

    +
    $ kubectl apply -f domain.yaml
    +
    +

    Note: In case of non-Zero Downtime Patching, the complete domain startup happens, as the servers in the domain were stopped earlier. For Zero Downtime Patching, the servers in the domain are rolling restarted.

    +
    +
  6. +
  7. +

    Verify the domain is updated with the patched image:

    +
    $ kubectl describe domain <domainUID> -n <domain-namespace>|grep "Image:"
    +

    Sample output:

    +
    $ kubectl describe domain soainfra -n soans |grep "Image:"
    +Image:                          soasuite:12.2.1.4-30761841
    +$
    +
  8. +
+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.2/soa-domains/patch_and_upgrade/upgrade-k8s-cluster/index.html b/docs/23.4.2/soa-domains/patch_and_upgrade/upgrade-k8s-cluster/index.html new file mode 100644 index 000000000..d7608c35d --- /dev/null +++ b/docs/23.4.2/soa-domains/patch_and_upgrade/upgrade-k8s-cluster/index.html @@ -0,0 +1,5545 @@ + + + + + + + + + + + + Upgrade a Kubernetes cluster :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + Upgrade a Kubernetes cluster +

+ + + + + + +

These instructions describe how to upgrade a Kubernetes cluster created using kubeadm on which an Oracle SOA Suite domain is deployed. A rolling upgrade approach is used to upgrade nodes (master and worker) of the Kubernetes cluster.

+ +

It is expected that there will be a down time during the upgrade of the Kubernetes cluster as the nodes need to be drained as part of the upgrade process.

+
+ +

Prerequisites

+
    +
  • Review Prerequisites and ensure that your Kubernetes cluster is ready for upgrade. Make sure your environment meets all prerequisites.
  • +
  • Make sure the database used for the SOA domain deployment is up and running during the upgrade process.
  • +
+

Upgrade the Kubernetes version

+

An upgrade of Kubernetes is supported from one MINOR version to the next MINOR version, or between PATCH versions of the same MINOR. +For example, you can upgrade from 1.x to 1.x+1, but not from 1.x to 1.x+2. +To upgrade a Kubernetes version, first all the master nodes of the Kubernetes cluster must be upgraded sequentially, followed by the sequential upgrade of each worker node.

+
    +
  • See here for Kubernetes official documentation to upgrade from 1.23 to 1.24
  • +
  • See here for Kubernetes official documentation to upgrade from 1.24 to 1.25
  • +
  • See here for Kubernetes official documentation to upgrade from 1.25 to 1.26
  • +
  • See here for Kubernetes official documentation to upgrade from 1.26 to 1.27
  • +
+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.2/soa-domains/patch_and_upgrade/upgrade-operator-release/index.html b/docs/23.4.2/soa-domains/patch_and_upgrade/upgrade-operator-release/index.html new file mode 100644 index 000000000..0521b3ef0 --- /dev/null +++ b/docs/23.4.2/soa-domains/patch_and_upgrade/upgrade-operator-release/index.html @@ -0,0 +1,5564 @@ + + + + + + + + + + + + Upgrade an operator release :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + Upgrade an operator release +

+ + + + + + +

To upgrade the WebLogic Kubernetes operator, use the helm upgrade command with new Helm chart and operator image. See the steps here to pull the operator image and set up the Oracle SOA Suite repository that contains the operator chart. To upgrade the operator run the following command:

+
$ cd ${WORKDIR}
+$ helm upgrade \
+  --reuse-values \
+  --set image=ghcr.io/oracle/weblogic-kubernetes-operator:4.1.4 \
+  --namespace weblogic-operator-namespace \
+  --wait \
+  weblogic-kubernetes-operator \
+  charts/weblogic-operator
+
+

Note: When the WebLogic Kubernetes Operator is upgraded from release version 3.2.1 to 3.3.0 or later, it may be expected that the Administration Server pod in the domain gets restarted.

+
+

Post upgrade steps

+

When you upgrade a 3.x operator to 4.0, the upgrade process creates a WebLogic Domain resource conversion webhook deployment and its associated resources in the same namespace. If the conversion webhook deployment already exists in another namespace, then a new conversion webhook deployment is not created. The webhook automatically and transparently upgrades the existing WebLogic Domains from the 3.x schema to the 4.0 schema. For more information, see WebLogic Domain resource conversion webhook.

+

If you have a single WebLogic Kubernetes Operator per Kubernetes cluster (most common use case), you can upgrade directly from any 3.x operator release to 4.1.4. The Helm chart for 4.1.4 automatically installs the schema conversion webhook.

+

If there is more than one WebLogic Kubernetes Operator in a single Kubernetes cluster:

+
    +
  • You must upgrade every operator to at least version 3.4.1 before upgrading any operator to 4.0.0.
  • +
  • As the 4.0.x Helm chart also installs a singleton schema conversion webhook that is shared by all 4.0.x operators in the cluster, use the webhookOnly Helm chart option to install this webhook in its own namespace prior to installing any of the 4.0.0 operators, and also use the preserveWebhook Helm chart option with each operator to prevent an operator uninstall from uninstalling the shared webhook.
  • +
  • The operator provides a utility that can be used to convert existing “v8” Domain YAML files to “v9”.
  • +
  • Several Helm chart default values have been changed. If you upgrade 3.x installations using the --reuse-values option during the Helm upgrade, the installations will continue to use the values from their original installation.
  • +
+

If you are still using an older operator version (from 3.1.1) the T3 channel Kubernetes service name extension is changed from -external to -ext. If the Administration Server was configured to expose a T3 channel in your domain, then follow these steps to recreate the Kubernetes service (for T3 channel) with the new name -ext.

+
+

Note: If these steps are not performed, then the domain restart using spec.serverStartPolicy fails to bring up the servers.

+
+
    +
  1. +

    Get the existing Kubernetes service name for the T3 channel from the domain namespace. For example, if the domainUID is soainfra, and the Administration Server name is adminserver, then the service is:

    +
    soainfra-adminserver-external
    +
  2. +
  3. +

    Delete the existing Kubernetes service for T3 channel, so that operator 3.1.1 creates a new one:

    +
    $ kubectl delete service <T3 channel service> --namespace <domain-namespace>
    +

    For example, if the domainUID is soainfra, the Administration Server name is adminserver and domain namespace is soans, then the command is:

    +
    $ kubectl delete service soainfra-adminserver-external --namespace soans
    +
  4. +
+

Now, the operator automatically creates a new Kubernetes service with -ext instead of -external:

+
soainfra-adminserver-ext
+
+ +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.2/soa-domains/release-notes/index.html b/docs/23.4.2/soa-domains/release-notes/index.html new file mode 100644 index 000000000..025c1afdf --- /dev/null +++ b/docs/23.4.2/soa-domains/release-notes/index.html @@ -0,0 +1,5604 @@ + + + + + + + + + + + + Release Notes :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + Release Notes +

+ + + + + + +

Review the latest changes and known issues for Oracle SOA Suite on Kubernetes.

+

Recent changes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
DateVersionChange
November 30, 202323.4.2Supports Oracle SOA Suite 12.2.1.4 domains deployment using October 2023 PSU and known bug fixes. Support for WebLogic Kubernetes Operator 4.1.4. Oracle SOA Suite 12.2.1.4 Docker image for this release can be downloaded from My Oracle Support (MOS patch 35908803 for Oracle Linux 7 and MOS patch 35915091 for Oracle Linux 8) and container-registry.oracle.com.
August 31, 202323.3.2Supports Oracle SOA Suite 12.2.1.4 domains deployment using July 2023 PSU and known bug fixes. Support for WebLogic Kubernetes Operator 4.1.0. Oracle SOA Suite 12.2.1.4 Docker image for this release can be downloaded from My Oracle Support (MOS patch 35729956 for Oracle Linux 7 and MOS patch 35730025 for Oracle Linux 8) and container-registry.oracle.com.
May 31, 202323.2.2Supports Oracle SOA Suite 12.2.1.4 domains deployment using April 2023 PSU and known bug fixes. Support for WebLogic Kubernetes Operator 4.0.6. Container images based on Oracle Linux 8 are now supported. Oracle SOA Suite 12.2.1.4 Docker image for this release can be downloaded from My Oracle Support (MOS patch 35269141 for Oracle Linux 7 and MOS patch 35285229 for Oracle Linux 8) and container-registry.oracle.com.
February 28, 202323.1.2Supports Oracle SOA Suite 12.2.1.4 domains deployment using January 2023 PSU and known bug fixes. Support for WebLogic Kubernetes Operator 4.0.4. Oracle SOA Suite 12.2.1.4 Docker image for this release can be downloaded from My Oracle Support (MOS patch 34980883) and container-registry.oracle.com.
November 30, 202222.4.2Supports Oracle SOA Suite 12.2.1.4 domains deployment using October 2022 PSU and known bug fixes. Support for WebLogic Kubernetes Operator 3.4.4. Oracle SOA Suite 12.2.1.4 container images are now only available from container-registry.oracle.com and are no longer available from My Oracle Support.
August 31, 202222.3.2Supports Oracle SOA Suite 12.2.1.4 domains deployment using July 2022 PSU and known bug fixes. Enterprise Deployment Guide as preview release. Oracle SOA Suite 12.2.1.4 Docker image for this release can be downloaded from My Oracle Support (MOS patch 34410491).
May 31, 202222.2.2Supports Oracle SOA Suite 12.2.1.4 domains deployment using April 2022 PSU and known bug fixes. Oracle SOA Suite 12.2.1.4 Docker image for this release can be downloaded from My Oracle Support (MOS patch 34077593).
February 25, 202222.1.2Supports Oracle SOA Suite 12.2.1.4 domains deployment using January 2022 PSU and known bug fixes. Oracle SOA Suite 12.2.1.4 Docker image for this release can be downloaded from My Oracle Support (MOS patch 33749496).
November 30, 202121.4.2Supports Oracle SOA Suite 12.2.1.4 domains deployment using October 2021 PSU and known bug fixes. Oracle SOA Suite 12.2.1.4 Docker image for this release can be downloaded from My Oracle Support (MOS patch 33467899).
August 6, 202121.3.2Supports Oracle SOA Suite 12.2.1.4 domains deployment using July 2021 PSU and known bug fixes. Oracle SOA Suite 12.2.1.4 Docker image for this release can be downloaded from My Oracle Support (MOS patch 33125465).
May 31, 202121.2.2Supports Oracle SOA Suite 12.2.1.4 domains deployment using April 2021 PSU and known bug fixes. Oracle SOA Suite 12.2.1.4 Docker image for this release can be downloaded from My Oracle Support (MOS patch 32794257).
February 28, 202121.1.2Supports Oracle SOA Suite 12.2.1.4 domains deployment using January 2021 PSU and known bug fixes. Oracle SOA Suite 12.2.1.4 Docker image for this release can be downloaded from My Oracle Support (MOS patch 32398542).
November 30, 202020.4.2Supports Oracle SOA Suite 12.2.1.4 domains deployment using October 2020 PSU and known bug fixes. Added HEALTHCHECK support for Oracle SOA Suite docker image. Oracle SOA Suite 12.2.1.4 Docker image for this release can be downloaded from My Oracle Support (MOS patch 32215749).
October 3, 202020.3.3Certified Oracle WebLogic Kubernetes Operator version 3.0.1. Kubernetes 1.14.8+, 1.15.7+, 1.16.0+, 1.17.0+, and 1.18.0+ support. Flannel is the only supported CNI in this release. SSL enabling for the Administration Server and Managed Servers is supported. Only Oracle SOA Suite 12.2.1.4 is supported.
+

Known issues

+
    +
  1. Overriding tuning parameters is not supported using configuration overrides
  2. +
  3. Deployments in WebLogic administration console display unexpected error
  4. +
  5. Enterprise Manager console may display ADF_FACES-30200 error
  6. +
  7. Configure the external URL access for Oracle SOA Suite composite applications
  8. +
  9. Configure the external access for the Oracle Enterprise Scheduler WebServices WSDL URLs
  10. +
  11. Missing gif images in Oracle Service Bus console pipeline configuration page
  12. +
+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.2/soa-domains/troubleshooting/index.html b/docs/23.4.2/soa-domains/troubleshooting/index.html new file mode 100644 index 000000000..4626719a3 --- /dev/null +++ b/docs/23.4.2/soa-domains/troubleshooting/index.html @@ -0,0 +1,5718 @@ + + + + + + + + + + + + Troubleshooting :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + Troubleshooting +

+ + + + + + + +

This document describes common issues that may occur during the deployment of Oracle SOA Suite on Kubernetes and the steps to troubleshoot them. Also refer to the FAQs page for frequent issues and steps to resolve them.

+ +

WebLogic Kubernetes Operator installation failure

+

If the WebLogic Kubernetes Operator installation failed with timing out:

+
    +
  • Check the status of the operator Helm release using the command helm ls -n <operator-namespace>.
  • +
  • Check if the operator pod is successfully created in the operator namespace.
  • +
  • Describe the operator pod using kubectl describe pod <operator-pod-name> -n <operator-namespace> to identify any obvious errors.
  • +
+

RCU schema creation failure

+

When creating the RCU schema using create-rcu-schema.sh, the possible causes for RCU schema creation failure are:

+
    +
  • Database is not up and running
  • +
  • Incorrect database connection URL used
  • +
  • Invalid database credentials used
  • +
  • Schema prefix already exists
  • +
+

Make sure that all the above causes are reviewed and corrected as needed.
+Also drop the existing schema with the same prefix before rerunning the create-rcu-schema.sh with correct values.

+

Domain creation failure

+

If the Oracle SOA Suite domain creation fails when running create-domain.sh, perform the following steps to diagnose the issue:

+
    +
  1. +

    Run the following command to diagnose the create domain job:

    +
    $ kubectl logs jobs/<domain_job> -n <domain_namespace>
    +

    For example:

    +
    $ kubectl logs jobs/soainfra-create-soa-infra-domain-job -n soans
    +

    Also run:

    +
    $ kubectl describe pod <domain_job> -n <domain_namespace>
    +

    For example:

    +
    $ kubectl describe pod soainfra-create-soa-infra-domain-job-mcc6v -n soans
    +

    Use the output to diagnose the problem and resolve the issue.

    +
  2. +
  3. +

    Clean up the failed domain creation:

    +
      +
    1. Delete the failed domain creation job in the domain namespace using the command kubectl delete job <domain-creation-job-name> -n <domain-namespace>.
    2. +
    3. Delete the contents of the domain home directory
    4. +
    5. Drop the existing RCU schema
    6. +
    +
  4. +
  5. +

    Recreate the domain:

    +
      +
    1. Recreate the RCU schema
    2. +
    3. Make sure the Persistent Volume and Persistent Volume Claim used for the domain are created with correct permissions and bound together.
    4. +
    5. Rerun the create domain script
    6. +
    +
  6. +
+

Common domain creation issues

+

A common domain creation issue is error Failed to build JDBC Connection object in the create domain job logs.

+ +
+
+ + + + + Click here to see the error stack trace: + + +
+ +
+

This error is reported when there is an issue with database schema access during domain creation. The possible causes are:

+
    +
  • Incorrect schema name specified in create-domain-inputs.yaml.
  • +
  • RCU schema credentials specified in the secret soainfra-rcu-credentials are different from the credentials specified while creating the RCU schema using create-rcu-schema.sh.
  • +
+

To resolve these possible causes, check that the schema name and credentials used during the domain creation are the same as when the RCU schema was created.

+

Server pods not started after applying domain configuration file

+

When a domain configuration file (YAML) is deployed and no introspector or server pods are initiated, as well as there is no mention of the domain in the operator log, ensure that the domain’s namespace has been configured to be managed by WebLogic Kubernetes Operator.

+

The domainNamespaceSelectionStrategy configuration in the operator installation Helm chart determines the namespaces managed by operators. The default value of the domainNamespaceSelectionStrategy Helm chart value was altered between versions 3.4 and 4.0. In version 3.4, the default value was List and in version 4.0, the default value is LabelSelector. Therefore, instead of managing the set of namespaces listed in the domainNamespaces Helm chart value, the operator now searches for namespaces with the label specified in the domainNamespaceLabelSelector Helm chart value, which defaults to weblogic-operator=enabled.

+

For operator versions 3.4 and lower, you can verify the configuration by running the command helm get values <operator-release> -n <operator-namespace> and checking the values under the domainNamespaces section.

+

For example:

+
$ helm get values  weblogic-kubernetes-operator -n opns
+USER-SUPPLIED VALUES:
+domainNamespaces:
+- soans
+image: ghcr.io/oracle/weblogic-kubernetes-operator:3.4.4
+javaLoggingLevel: FINE
+serviceAccount: op-sa
+$
+

If you don’t see the domain namespace value under the domainNamespaces section, run the helm upgrade command in the operator namespace with appropriate values to configure the operator to manage the domain namespace.

+
$ helm upgrade --reuse-values --namespace opns --set "domainNamespaces={soans}" --wait weblogic-kubernetes-operator charts/weblogic-operator
+

For operator versions 4.0 and higher, verify if the label weblogic-operator=enabled is specified for domain namespace that is to be managed by the operator, by running the following command:

+
$ kubectl get ns --selector="weblogic-operator=enabled"
+

For example, if your domain namespace is soans and the preceding command did not list the soans namespace, then execute the following command for operator to manage the domain namespace:

+
$ kubectl label namespace soans weblogic-operator=enabled
+

Ingress controller not serving the domain URLs

+

To diagnose this issue:

+
    +
  1. Verify that the Ingress controller is installed successfully.
    +For example, to verify the Traefik Ingress controller status, run the following command: +
    $ helm list -n traefik
    +NAME                    NAMESPACE       REVISION        UPDATED                                 STATUS          CHART               APP VERSION
    +traefik                  traefik         2               2022-11-30 11:31:18.599876918 +0000 UTC deployed        traefik-20.5.3       v2.9.5
    +$
    +
  2. +
  3. Verify that the Ingress controller is setup to monitor the domain namespace.
    +For example, to verify the Traefik Ingress controller manages the soans domain namespace, run the following command and check the values under namespaces section. +
    $ helm get values traefik-operator -n traefik
    +USER-SUPPLIED VALUES:
    +kubernetes:
    +   namespaces:
    +   - traefik
    +   - soans
    +$
    +
  4. +
  5. Verify that the Ingress chart is installed correctly in domain namespace. For example, run the following command: +
    $ helm list -n soans
    +NAME                    NAMESPACE       REVISION        UPDATED                                 STATUS          CHART                       APP VERSION
    +soainfra-traefik        soans           1               2021-10-27 11:24:31.7572791 +0000 UTC   deployed        ingress-per-domain-0.1.0    1.0
    +$
    +
  6. +
  7. Verify that the Ingress URL paths and hostnames are configured correctly by running the following commands: + +
    +
    + + + + + Click here to see the sample commands and output + + +
    + +
  8. +
+

Security warnings reported in WebLogic Administration console

+

With the July 2021 PSU applied, WebLogic Server regularly validates your domain configuration settings against a set of security configuration guidelines to determine whether the domain meets key security guidelines recommended by Oracle. If your domain does not meet a recommendation for a security configuration setting, a warning is logged in the Security Warnings Report in the WebLogic Administration Console.

+

See Review Potential Security Issues in Securing a Production Environment for Oracle WebLogic Server for more information.

+

Disable Remote Anonymous RMI T3 and IIOP Requests

+

If you see security warning message that Remote Anonymous RMI T3 or IIOP requests are enabled, resolve this warning by setting RemoteAnonymousRMIT3Enabled and RemoteAnonymousRMIIIOPEnabled attributes to false in domain.yaml with JAVA_OPTIONS before starting the domain as shown below:

+
serverPod:
+    # an (optional) list of environment variable to be set on the servers
+    env:
+    - name: JAVA_OPTIONS
+      value: "-Dweblogic.StdoutDebugEnabled=false -Dweblogic.ssl.Enabled=true -Dweblogic.security.SSL.ignoreHostnameVerification=true -Dweblogic.security.remoteAnonymousRMIT3Enabled=false -Dweblogic.security.remoteAnonymousRMIIIOPEnabled=false"
+

See link for more details.

+ + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.2/soa-domains/troubleshooting/index.xml b/docs/23.4.2/soa-domains/troubleshooting/index.xml new file mode 100644 index 000000000..da83e4394 --- /dev/null +++ b/docs/23.4.2/soa-domains/troubleshooting/index.xml @@ -0,0 +1,14 @@ + + + + Troubleshooting on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.2/soa-domains/troubleshooting/ + Recent content in Troubleshooting on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/23.4.2/tags/index.html b/docs/23.4.2/tags/index.html index efe960b64..2e70bd381 100644 --- a/docs/23.4.2/tags/index.html +++ b/docs/23.4.2/tags/index.html @@ -12,18 +12,18 @@ Tags :: Oracle Fusion Middleware on Kubernetes - - - - - - - - - + + + + + + + + + - +