diff --git a/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/domain-resources/domain.yaml b/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/domain-resources/domain.yaml old mode 100644 new mode 100755 diff --git a/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/wdt-artifacts/OAM.json b/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/wdt-artifacts/OAM.json old mode 100644 new mode 100755 diff --git a/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/wdt-artifacts/agl_jdbc.yaml b/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/wdt-artifacts/agl_jdbc.yaml old mode 100644 new mode 100755 diff --git a/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/wdt-artifacts/domainInfo.yaml b/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/wdt-artifacts/domainInfo.yaml old mode 100644 new mode 100755 diff --git a/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/wdt-artifacts/oam.properties b/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/wdt-artifacts/oam.properties old mode 100644 new mode 100755 diff --git a/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/wdt-artifacts/resource.yaml b/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/wdt-artifacts/resource.yaml old mode 100644 new mode 100755 diff --git a/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/wdt-artifacts/topology.yaml b/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/wdt-artifacts/topology.yaml old mode 100644 new mode 100755 diff --git a/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/domain-resources/domain.yaml b/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/domain-resources/domain.yaml old mode 100644 new mode 100755 diff --git a/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/wdt-artifacts/OIG.json b/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/wdt-artifacts/OIG.json old mode 100644 new mode 100755 diff --git a/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/wdt-artifacts/agl_jdbc.yaml b/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/wdt-artifacts/agl_jdbc.yaml old mode 100644 new mode 100755 diff --git a/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/wdt-artifacts/domainInfo.yaml b/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/wdt-artifacts/domainInfo.yaml old mode 100644 new mode 100755 diff --git a/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/wdt-artifacts/oig.properties b/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/wdt-artifacts/oig.properties old mode 100644 new mode 100755 diff --git a/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/wdt-artifacts/resource.yaml b/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/wdt-artifacts/resource.yaml old mode 100644 new mode 100755 diff --git a/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/wdt-artifacts/topology.yaml b/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/wdt-artifacts/topology.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/oud-storageclass-config.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/oud-storageclass-config.yaml old mode 100644 new mode 100755 diff --git a/docs/23.4.1/404.html b/docs/23.4.1/404.html new file mode 100644 index 000000000..1b3f11ce6 --- /dev/null +++ b/docs/23.4.1/404.html @@ -0,0 +1,57 @@ + + + + + + + + + 404 Page not found + + + + + + + + + + + + + + + + +
+
+
+
+

Error

+

+

+

Woops. Looks like this page doesn't exist ¯\_(ツ)_/¯.

+

+

Go to homepage

+

Page not found!

+
+
+ +
+ + + diff --git a/docs/23.4.1/categories/index.html b/docs/23.4.1/categories/index.html new file mode 100644 index 000000000..e32796c8a --- /dev/null +++ b/docs/23.4.1/categories/index.html @@ -0,0 +1,3922 @@ + + + + + + + + + + + + Categories :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + Categories +

+ + + + + + + + + +
    + +
+ + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/categories/index.xml b/docs/23.4.1/categories/index.xml new file mode 100644 index 000000000..345fa86e9 --- /dev/null +++ b/docs/23.4.1/categories/index.xml @@ -0,0 +1,14 @@ + + + + Categories on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/categories/ + Recent content in Categories on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/23.4.1/css/atom-one-dark-reasonable.css b/docs/23.4.1/css/atom-one-dark-reasonable.css new file mode 100644 index 000000000..fd41c996a --- /dev/null +++ b/docs/23.4.1/css/atom-one-dark-reasonable.css @@ -0,0 +1,77 @@ +/* + +Atom One Dark With support for ReasonML by Gidi Morris, based off work by Daniel Gamage + +Original One Dark Syntax theme from https://github.com/atom/one-dark-syntax + +*/ +.hljs { + display: block; + overflow-x: auto; + padding: 0.5em; + line-height: 1.3em; + color: #abb2bf; + background: #282c34; + border-radius: 5px; +} +.hljs-keyword, .hljs-operator { + color: #F92672; +} +.hljs-pattern-match { + color: #F92672; +} +.hljs-pattern-match .hljs-constructor { + color: #61aeee; +} +.hljs-function { + color: #61aeee; +} +.hljs-function .hljs-params { + color: #A6E22E; +} +.hljs-function .hljs-params .hljs-typing { + color: #FD971F; +} +.hljs-module-access .hljs-module { + color: #7e57c2; +} +.hljs-constructor { + color: #e2b93d; +} +.hljs-constructor .hljs-string { + color: #9CCC65; +} +.hljs-comment, .hljs-quote { + color: #b18eb1; + font-style: italic; +} +.hljs-doctag, .hljs-formula { + color: #c678dd; +} +.hljs-section, .hljs-name, .hljs-selector-tag, .hljs-deletion, .hljs-subst { + color: #e06c75; +} +.hljs-literal { + color: #56b6c2; +} +.hljs-string, .hljs-regexp, .hljs-addition, .hljs-attribute, .hljs-meta-string { + color: #98c379; +} +.hljs-built_in, .hljs-class .hljs-title { + color: #e6c07b; +} +.hljs-attr, .hljs-variable, .hljs-template-variable, .hljs-type, .hljs-selector-class, .hljs-selector-attr, .hljs-selector-pseudo, .hljs-number { + color: #d19a66; +} +.hljs-symbol, .hljs-bullet, .hljs-link, .hljs-meta, .hljs-selector-id, .hljs-title { + color: #61aeee; +} +.hljs-emphasis { + font-style: italic; +} +.hljs-strong { + font-weight: bold; +} +.hljs-link { + text-decoration: underline; +} diff --git a/docs/23.4.1/css/auto-complete.css b/docs/23.4.1/css/auto-complete.css new file mode 100644 index 000000000..ac6979ad3 --- /dev/null +++ b/docs/23.4.1/css/auto-complete.css @@ -0,0 +1,47 @@ +.autocomplete-suggestions { + text-align: left; + cursor: default; + border: 1px solid #ccc; + border-top: 0; + background: #fff; + box-shadow: -1px 1px 3px rgba(0,0,0,.1); + + /* core styles should not be changed */ + position: absolute; + display: none; + z-index: 9999; + max-height: 254px; + overflow: hidden; + overflow-y: auto; + box-sizing: border-box; + +} +.autocomplete-suggestion { + position: relative; + cursor: pointer; + padding: 7px; + line-height: 23px; + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; + color: #333; +} + +.autocomplete-suggestion b { + font-weight: normal; + color: #1f8dd6; +} + +.autocomplete-suggestion.selected { + background: #333; + color: #fff; +} + +.autocomplete-suggestion:hover { + background: #444; + color: #fff; +} + +.autocomplete-suggestion > .context { + font-size: 12px; +} diff --git a/docs/23.4.1/css/featherlight.min.css b/docs/23.4.1/css/featherlight.min.css new file mode 100644 index 000000000..1b00c7861 --- /dev/null +++ b/docs/23.4.1/css/featherlight.min.css @@ -0,0 +1,8 @@ +/** + * Featherlight - ultra slim jQuery lightbox + * Version 1.7.13 - http://noelboss.github.io/featherlight/ + * + * Copyright (c) 2015, Noël Raoul Bossart (http://www.noelboss.com) + * MIT Licensed. +**/ +html.with-featherlight{overflow:hidden}.featherlight{display:none;position:fixed;top:0;right:0;bottom:0;left:0;z-index:2147483647;text-align:center;white-space:nowrap;cursor:pointer;background:#333;background:rgba(0,0,0,0)}.featherlight:last-of-type{background:rgba(0,0,0,.8)}.featherlight:before{content:'';display:inline-block;height:100%;vertical-align:middle}.featherlight .featherlight-content{position:relative;text-align:left;vertical-align:middle;display:inline-block;overflow:auto;padding:25px 25px 0;border-bottom:25px solid transparent;margin-left:5%;margin-right:5%;max-height:95%;background:#fff;cursor:auto;white-space:normal}.featherlight .featherlight-inner{display:block}.featherlight link.featherlight-inner,.featherlight script.featherlight-inner,.featherlight style.featherlight-inner{display:none}.featherlight .featherlight-close-icon{position:absolute;z-index:9999;top:0;right:0;line-height:25px;width:25px;cursor:pointer;text-align:center;font-family:Arial,sans-serif;background:#fff;background:rgba(255,255,255,.3);color:#000;border:0;padding:0}.featherlight .featherlight-close-icon::-moz-focus-inner{border:0;padding:0}.featherlight .featherlight-image{width:100%}.featherlight-iframe .featherlight-content{border-bottom:0;padding:0;-webkit-overflow-scrolling:touch}.featherlight iframe{border:0}.featherlight *{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}@media only screen and (max-width:1024px){.featherlight .featherlight-content{margin-left:0;margin-right:0;max-height:98%;padding:10px 10px 0;border-bottom:10px solid transparent}}@media print{html.with-featherlight>*>:not(.featherlight){display:none}} \ No newline at end of file diff --git a/docs/23.4.1/css/fontawesome-all.min.css b/docs/23.4.1/css/fontawesome-all.min.css new file mode 100644 index 000000000..de5647372 --- /dev/null +++ b/docs/23.4.1/css/fontawesome-all.min.css @@ -0,0 +1 @@ +.fa,.fab,.fal,.far,.fas{-moz-osx-font-smoothing:grayscale;-webkit-font-smoothing:antialiased;display:inline-block;font-style:normal;font-variant:normal;text-rendering:auto;line-height:1}.fa-lg{font-size:1.33333em;line-height:.75em;vertical-align:-.0667em}.fa-xs{font-size:.75em}.fa-sm{font-size:.875em}.fa-1x{font-size:1em}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-6x{font-size:6em}.fa-7x{font-size:7em}.fa-8x{font-size:8em}.fa-9x{font-size:9em}.fa-10x{font-size:10em}.fa-fw{text-align:center;width:1.25em}.fa-ul{list-style-type:none;margin-left:2.5em;padding-left:0}.fa-ul>li{position:relative}.fa-li{left:-2em;position:absolute;text-align:center;width:2em;line-height:inherit}.fa-border{border:.08em solid #eee;border-radius:.1em;padding:.2em .25em .15em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa.fa-pull-left,.fab.fa-pull-left,.fal.fa-pull-left,.far.fa-pull-left,.fas.fa-pull-left{margin-right:.3em}.fa.fa-pull-right,.fab.fa-pull-right,.fal.fa-pull-right,.far.fa-pull-right,.fas.fa-pull-right{margin-left:.3em}.fa-spin{animation:fa-spin 2s infinite linear}.fa-pulse{animation:fa-spin 1s infinite steps(8)}@keyframes fa-spin{0%{transform:rotate(0deg)}to{transform:rotate(1turn)}}.fa-rotate-90{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";transform:rotate(90deg)}.fa-rotate-180{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";transform:rotate(180deg)}.fa-rotate-270{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";transform:rotate(270deg)}.fa-flip-horizontal{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";transform:scaleX(-1)}.fa-flip-vertical{transform:scaleY(-1)}.fa-flip-horizontal.fa-flip-vertical,.fa-flip-vertical{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)"}.fa-flip-horizontal.fa-flip-vertical{transform:scale(-1)}:root .fa-flip-horizontal,:root .fa-flip-vertical,:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270{filter:none}.fa-stack{display:inline-block;height:2em;line-height:2em;position:relative;vertical-align:middle;width:2.5em}.fa-stack-1x,.fa-stack-2x{left:0;position:absolute;text-align:center;width:100%}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-500px:before{content:"\f26e"}.fa-accessible-icon:before{content:"\f368"}.fa-accusoft:before{content:"\f369"}.fa-acquisitions-incorporated:before{content:"\f6af"}.fa-ad:before{content:"\f641"}.fa-address-book:before{content:"\f2b9"}.fa-address-card:before{content:"\f2bb"}.fa-adjust:before{content:"\f042"}.fa-adn:before{content:"\f170"}.fa-adobe:before{content:"\f778"}.fa-adversal:before{content:"\f36a"}.fa-affiliatetheme:before{content:"\f36b"}.fa-air-freshener:before{content:"\f5d0"}.fa-algolia:before{content:"\f36c"}.fa-align-center:before{content:"\f037"}.fa-align-justify:before{content:"\f039"}.fa-align-left:before{content:"\f036"}.fa-align-right:before{content:"\f038"}.fa-alipay:before{content:"\f642"}.fa-allergies:before{content:"\f461"}.fa-amazon:before{content:"\f270"}.fa-amazon-pay:before{content:"\f42c"}.fa-ambulance:before{content:"\f0f9"}.fa-american-sign-language-interpreting:before{content:"\f2a3"}.fa-amilia:before{content:"\f36d"}.fa-anchor:before{content:"\f13d"}.fa-android:before{content:"\f17b"}.fa-angellist:before{content:"\f209"}.fa-angle-double-down:before{content:"\f103"}.fa-angle-double-left:before{content:"\f100"}.fa-angle-double-right:before{content:"\f101"}.fa-angle-double-up:before{content:"\f102"}.fa-angle-down:before{content:"\f107"}.fa-angle-left:before{content:"\f104"}.fa-angle-right:before{content:"\f105"}.fa-angle-up:before{content:"\f106"}.fa-angry:before{content:"\f556"}.fa-angrycreative:before{content:"\f36e"}.fa-angular:before{content:"\f420"}.fa-ankh:before{content:"\f644"}.fa-app-store:before{content:"\f36f"}.fa-app-store-ios:before{content:"\f370"}.fa-apper:before{content:"\f371"}.fa-apple:before{content:"\f179"}.fa-apple-alt:before{content:"\f5d1"}.fa-apple-pay:before{content:"\f415"}.fa-archive:before{content:"\f187"}.fa-archway:before{content:"\f557"}.fa-arrow-alt-circle-down:before{content:"\f358"}.fa-arrow-alt-circle-left:before{content:"\f359"}.fa-arrow-alt-circle-right:before{content:"\f35a"}.fa-arrow-alt-circle-up:before{content:"\f35b"}.fa-arrow-circle-down:before{content:"\f0ab"}.fa-arrow-circle-left:before{content:"\f0a8"}.fa-arrow-circle-right:before{content:"\f0a9"}.fa-arrow-circle-up:before{content:"\f0aa"}.fa-arrow-down:before{content:"\f063"}.fa-arrow-left:before{content:"\f060"}.fa-arrow-right:before{content:"\f061"}.fa-arrow-up:before{content:"\f062"}.fa-arrows-alt:before{content:"\f0b2"}.fa-arrows-alt-h:before{content:"\f337"}.fa-arrows-alt-v:before{content:"\f338"}.fa-artstation:before{content:"\f77a"}.fa-assistive-listening-systems:before{content:"\f2a2"}.fa-asterisk:before{content:"\f069"}.fa-asymmetrik:before{content:"\f372"}.fa-at:before{content:"\f1fa"}.fa-atlas:before{content:"\f558"}.fa-atlassian:before{content:"\f77b"}.fa-atom:before{content:"\f5d2"}.fa-audible:before{content:"\f373"}.fa-audio-description:before{content:"\f29e"}.fa-autoprefixer:before{content:"\f41c"}.fa-avianex:before{content:"\f374"}.fa-aviato:before{content:"\f421"}.fa-award:before{content:"\f559"}.fa-aws:before{content:"\f375"}.fa-baby:before{content:"\f77c"}.fa-baby-carriage:before{content:"\f77d"}.fa-backspace:before{content:"\f55a"}.fa-backward:before{content:"\f04a"}.fa-balance-scale:before{content:"\f24e"}.fa-ban:before{content:"\f05e"}.fa-band-aid:before{content:"\f462"}.fa-bandcamp:before{content:"\f2d5"}.fa-barcode:before{content:"\f02a"}.fa-bars:before{content:"\f0c9"}.fa-baseball-ball:before{content:"\f433"}.fa-basketball-ball:before{content:"\f434"}.fa-bath:before{content:"\f2cd"}.fa-battery-empty:before{content:"\f244"}.fa-battery-full:before{content:"\f240"}.fa-battery-half:before{content:"\f242"}.fa-battery-quarter:before{content:"\f243"}.fa-battery-three-quarters:before{content:"\f241"}.fa-bed:before{content:"\f236"}.fa-beer:before{content:"\f0fc"}.fa-behance:before{content:"\f1b4"}.fa-behance-square:before{content:"\f1b5"}.fa-bell:before{content:"\f0f3"}.fa-bell-slash:before{content:"\f1f6"}.fa-bezier-curve:before{content:"\f55b"}.fa-bible:before{content:"\f647"}.fa-bicycle:before{content:"\f206"}.fa-bimobject:before{content:"\f378"}.fa-binoculars:before{content:"\f1e5"}.fa-biohazard:before{content:"\f780"}.fa-birthday-cake:before{content:"\f1fd"}.fa-bitbucket:before{content:"\f171"}.fa-bitcoin:before{content:"\f379"}.fa-bity:before{content:"\f37a"}.fa-black-tie:before{content:"\f27e"}.fa-blackberry:before{content:"\f37b"}.fa-blender:before{content:"\f517"}.fa-blender-phone:before{content:"\f6b6"}.fa-blind:before{content:"\f29d"}.fa-blog:before{content:"\f781"}.fa-blogger:before{content:"\f37c"}.fa-blogger-b:before{content:"\f37d"}.fa-bluetooth:before{content:"\f293"}.fa-bluetooth-b:before{content:"\f294"}.fa-bold:before{content:"\f032"}.fa-bolt:before{content:"\f0e7"}.fa-bomb:before{content:"\f1e2"}.fa-bone:before{content:"\f5d7"}.fa-bong:before{content:"\f55c"}.fa-book:before{content:"\f02d"}.fa-book-dead:before{content:"\f6b7"}.fa-book-open:before{content:"\f518"}.fa-book-reader:before{content:"\f5da"}.fa-bookmark:before{content:"\f02e"}.fa-bowling-ball:before{content:"\f436"}.fa-box:before{content:"\f466"}.fa-box-open:before{content:"\f49e"}.fa-boxes:before{content:"\f468"}.fa-braille:before{content:"\f2a1"}.fa-brain:before{content:"\f5dc"}.fa-briefcase:before{content:"\f0b1"}.fa-briefcase-medical:before{content:"\f469"}.fa-broadcast-tower:before{content:"\f519"}.fa-broom:before{content:"\f51a"}.fa-brush:before{content:"\f55d"}.fa-btc:before{content:"\f15a"}.fa-bug:before{content:"\f188"}.fa-building:before{content:"\f1ad"}.fa-bullhorn:before{content:"\f0a1"}.fa-bullseye:before{content:"\f140"}.fa-burn:before{content:"\f46a"}.fa-buromobelexperte:before{content:"\f37f"}.fa-bus:before{content:"\f207"}.fa-bus-alt:before{content:"\f55e"}.fa-business-time:before{content:"\f64a"}.fa-buysellads:before{content:"\f20d"}.fa-calculator:before{content:"\f1ec"}.fa-calendar:before{content:"\f133"}.fa-calendar-alt:before{content:"\f073"}.fa-calendar-check:before{content:"\f274"}.fa-calendar-day:before{content:"\f783"}.fa-calendar-minus:before{content:"\f272"}.fa-calendar-plus:before{content:"\f271"}.fa-calendar-times:before{content:"\f273"}.fa-calendar-week:before{content:"\f784"}.fa-camera:before{content:"\f030"}.fa-camera-retro:before{content:"\f083"}.fa-campground:before{content:"\f6bb"}.fa-canadian-maple-leaf:before{content:"\f785"}.fa-candy-cane:before{content:"\f786"}.fa-cannabis:before{content:"\f55f"}.fa-capsules:before{content:"\f46b"}.fa-car:before{content:"\f1b9"}.fa-car-alt:before{content:"\f5de"}.fa-car-battery:before{content:"\f5df"}.fa-car-crash:before{content:"\f5e1"}.fa-car-side:before{content:"\f5e4"}.fa-caret-down:before{content:"\f0d7"}.fa-caret-left:before{content:"\f0d9"}.fa-caret-right:before{content:"\f0da"}.fa-caret-square-down:before{content:"\f150"}.fa-caret-square-left:before{content:"\f191"}.fa-caret-square-right:before{content:"\f152"}.fa-caret-square-up:before{content:"\f151"}.fa-caret-up:before{content:"\f0d8"}.fa-carrot:before{content:"\f787"}.fa-cart-arrow-down:before{content:"\f218"}.fa-cart-plus:before{content:"\f217"}.fa-cash-register:before{content:"\f788"}.fa-cat:before{content:"\f6be"}.fa-cc-amazon-pay:before{content:"\f42d"}.fa-cc-amex:before{content:"\f1f3"}.fa-cc-apple-pay:before{content:"\f416"}.fa-cc-diners-club:before{content:"\f24c"}.fa-cc-discover:before{content:"\f1f2"}.fa-cc-jcb:before{content:"\f24b"}.fa-cc-mastercard:before{content:"\f1f1"}.fa-cc-paypal:before{content:"\f1f4"}.fa-cc-stripe:before{content:"\f1f5"}.fa-cc-visa:before{content:"\f1f0"}.fa-centercode:before{content:"\f380"}.fa-centos:before{content:"\f789"}.fa-certificate:before{content:"\f0a3"}.fa-chair:before{content:"\f6c0"}.fa-chalkboard:before{content:"\f51b"}.fa-chalkboard-teacher:before{content:"\f51c"}.fa-charging-station:before{content:"\f5e7"}.fa-chart-area:before{content:"\f1fe"}.fa-chart-bar:before{content:"\f080"}.fa-chart-line:before{content:"\f201"}.fa-chart-pie:before{content:"\f200"}.fa-check:before{content:"\f00c"}.fa-check-circle:before{content:"\f058"}.fa-check-double:before{content:"\f560"}.fa-check-square:before{content:"\f14a"}.fa-chess:before{content:"\f439"}.fa-chess-bishop:before{content:"\f43a"}.fa-chess-board:before{content:"\f43c"}.fa-chess-king:before{content:"\f43f"}.fa-chess-knight:before{content:"\f441"}.fa-chess-pawn:before{content:"\f443"}.fa-chess-queen:before{content:"\f445"}.fa-chess-rook:before{content:"\f447"}.fa-chevron-circle-down:before{content:"\f13a"}.fa-chevron-circle-left:before{content:"\f137"}.fa-chevron-circle-right:before{content:"\f138"}.fa-chevron-circle-up:before{content:"\f139"}.fa-chevron-down:before{content:"\f078"}.fa-chevron-left:before{content:"\f053"}.fa-chevron-right:before{content:"\f054"}.fa-chevron-up:before{content:"\f077"}.fa-child:before{content:"\f1ae"}.fa-chrome:before{content:"\f268"}.fa-church:before{content:"\f51d"}.fa-circle:before{content:"\f111"}.fa-circle-notch:before{content:"\f1ce"}.fa-city:before{content:"\f64f"}.fa-clipboard:before{content:"\f328"}.fa-clipboard-check:before{content:"\f46c"}.fa-clipboard-list:before{content:"\f46d"}.fa-clock:before{content:"\f017"}.fa-clone:before{content:"\f24d"}.fa-closed-captioning:before{content:"\f20a"}.fa-cloud:before{content:"\f0c2"}.fa-cloud-download-alt:before{content:"\f381"}.fa-cloud-meatball:before{content:"\f73b"}.fa-cloud-moon:before{content:"\f6c3"}.fa-cloud-moon-rain:before{content:"\f73c"}.fa-cloud-rain:before{content:"\f73d"}.fa-cloud-showers-heavy:before{content:"\f740"}.fa-cloud-sun:before{content:"\f6c4"}.fa-cloud-sun-rain:before{content:"\f743"}.fa-cloud-upload-alt:before{content:"\f382"}.fa-cloudscale:before{content:"\f383"}.fa-cloudsmith:before{content:"\f384"}.fa-cloudversify:before{content:"\f385"}.fa-cocktail:before{content:"\f561"}.fa-code:before{content:"\f121"}.fa-code-branch:before{content:"\f126"}.fa-codepen:before{content:"\f1cb"}.fa-codiepie:before{content:"\f284"}.fa-coffee:before{content:"\f0f4"}.fa-cog:before{content:"\f013"}.fa-cogs:before{content:"\f085"}.fa-coins:before{content:"\f51e"}.fa-columns:before{content:"\f0db"}.fa-comment:before{content:"\f075"}.fa-comment-alt:before{content:"\f27a"}.fa-comment-dollar:before{content:"\f651"}.fa-comment-dots:before{content:"\f4ad"}.fa-comment-slash:before{content:"\f4b3"}.fa-comments:before{content:"\f086"}.fa-comments-dollar:before{content:"\f653"}.fa-compact-disc:before{content:"\f51f"}.fa-compass:before{content:"\f14e"}.fa-compress:before{content:"\f066"}.fa-compress-arrows-alt:before{content:"\f78c"}.fa-concierge-bell:before{content:"\f562"}.fa-confluence:before{content:"\f78d"}.fa-connectdevelop:before{content:"\f20e"}.fa-contao:before{content:"\f26d"}.fa-cookie:before{content:"\f563"}.fa-cookie-bite:before{content:"\f564"}.fa-copy:before{content:"\f0c5"}.fa-copyright:before{content:"\f1f9"}.fa-couch:before{content:"\f4b8"}.fa-cpanel:before{content:"\f388"}.fa-creative-commons:before{content:"\f25e"}.fa-creative-commons-by:before{content:"\f4e7"}.fa-creative-commons-nc:before{content:"\f4e8"}.fa-creative-commons-nc-eu:before{content:"\f4e9"}.fa-creative-commons-nc-jp:before{content:"\f4ea"}.fa-creative-commons-nd:before{content:"\f4eb"}.fa-creative-commons-pd:before{content:"\f4ec"}.fa-creative-commons-pd-alt:before{content:"\f4ed"}.fa-creative-commons-remix:before{content:"\f4ee"}.fa-creative-commons-sa:before{content:"\f4ef"}.fa-creative-commons-sampling:before{content:"\f4f0"}.fa-creative-commons-sampling-plus:before{content:"\f4f1"}.fa-creative-commons-share:before{content:"\f4f2"}.fa-creative-commons-zero:before{content:"\f4f3"}.fa-credit-card:before{content:"\f09d"}.fa-critical-role:before{content:"\f6c9"}.fa-crop:before{content:"\f125"}.fa-crop-alt:before{content:"\f565"}.fa-cross:before{content:"\f654"}.fa-crosshairs:before{content:"\f05b"}.fa-crow:before{content:"\f520"}.fa-crown:before{content:"\f521"}.fa-css3:before{content:"\f13c"}.fa-css3-alt:before{content:"\f38b"}.fa-cube:before{content:"\f1b2"}.fa-cubes:before{content:"\f1b3"}.fa-cut:before{content:"\f0c4"}.fa-cuttlefish:before{content:"\f38c"}.fa-d-and-d:before{content:"\f38d"}.fa-d-and-d-beyond:before{content:"\f6ca"}.fa-dashcube:before{content:"\f210"}.fa-database:before{content:"\f1c0"}.fa-deaf:before{content:"\f2a4"}.fa-delicious:before{content:"\f1a5"}.fa-democrat:before{content:"\f747"}.fa-deploydog:before{content:"\f38e"}.fa-deskpro:before{content:"\f38f"}.fa-desktop:before{content:"\f108"}.fa-dev:before{content:"\f6cc"}.fa-deviantart:before{content:"\f1bd"}.fa-dharmachakra:before{content:"\f655"}.fa-dhl:before{content:"\f790"}.fa-diagnoses:before{content:"\f470"}.fa-diaspora:before{content:"\f791"}.fa-dice:before{content:"\f522"}.fa-dice-d20:before{content:"\f6cf"}.fa-dice-d6:before{content:"\f6d1"}.fa-dice-five:before{content:"\f523"}.fa-dice-four:before{content:"\f524"}.fa-dice-one:before{content:"\f525"}.fa-dice-six:before{content:"\f526"}.fa-dice-three:before{content:"\f527"}.fa-dice-two:before{content:"\f528"}.fa-digg:before{content:"\f1a6"}.fa-digital-ocean:before{content:"\f391"}.fa-digital-tachograph:before{content:"\f566"}.fa-directions:before{content:"\f5eb"}.fa-discord:before{content:"\f392"}.fa-discourse:before{content:"\f393"}.fa-divide:before{content:"\f529"}.fa-dizzy:before{content:"\f567"}.fa-dna:before{content:"\f471"}.fa-dochub:before{content:"\f394"}.fa-docker:before{content:"\f395"}.fa-dog:before{content:"\f6d3"}.fa-dollar-sign:before{content:"\f155"}.fa-dolly:before{content:"\f472"}.fa-dolly-flatbed:before{content:"\f474"}.fa-donate:before{content:"\f4b9"}.fa-door-closed:before{content:"\f52a"}.fa-door-open:before{content:"\f52b"}.fa-dot-circle:before{content:"\f192"}.fa-dove:before{content:"\f4ba"}.fa-download:before{content:"\f019"}.fa-draft2digital:before{content:"\f396"}.fa-drafting-compass:before{content:"\f568"}.fa-dragon:before{content:"\f6d5"}.fa-draw-polygon:before{content:"\f5ee"}.fa-dribbble:before{content:"\f17d"}.fa-dribbble-square:before{content:"\f397"}.fa-dropbox:before{content:"\f16b"}.fa-drum:before{content:"\f569"}.fa-drum-steelpan:before{content:"\f56a"}.fa-drumstick-bite:before{content:"\f6d7"}.fa-drupal:before{content:"\f1a9"}.fa-dumbbell:before{content:"\f44b"}.fa-dumpster:before{content:"\f793"}.fa-dumpster-fire:before{content:"\f794"}.fa-dungeon:before{content:"\f6d9"}.fa-dyalog:before{content:"\f399"}.fa-earlybirds:before{content:"\f39a"}.fa-ebay:before{content:"\f4f4"}.fa-edge:before{content:"\f282"}.fa-edit:before{content:"\f044"}.fa-eject:before{content:"\f052"}.fa-elementor:before{content:"\f430"}.fa-ellipsis-h:before{content:"\f141"}.fa-ellipsis-v:before{content:"\f142"}.fa-ello:before{content:"\f5f1"}.fa-ember:before{content:"\f423"}.fa-empire:before{content:"\f1d1"}.fa-envelope:before{content:"\f0e0"}.fa-envelope-open:before{content:"\f2b6"}.fa-envelope-open-text:before{content:"\f658"}.fa-envelope-square:before{content:"\f199"}.fa-envira:before{content:"\f299"}.fa-equals:before{content:"\f52c"}.fa-eraser:before{content:"\f12d"}.fa-erlang:before{content:"\f39d"}.fa-ethereum:before{content:"\f42e"}.fa-ethernet:before{content:"\f796"}.fa-etsy:before{content:"\f2d7"}.fa-euro-sign:before{content:"\f153"}.fa-exchange-alt:before{content:"\f362"}.fa-exclamation:before{content:"\f12a"}.fa-exclamation-circle:before{content:"\f06a"}.fa-exclamation-triangle:before{content:"\f071"}.fa-expand:before{content:"\f065"}.fa-expand-arrows-alt:before{content:"\f31e"}.fa-expeditedssl:before{content:"\f23e"}.fa-external-link-alt:before{content:"\f35d"}.fa-external-link-square-alt:before{content:"\f360"}.fa-eye:before{content:"\f06e"}.fa-eye-dropper:before{content:"\f1fb"}.fa-eye-slash:before{content:"\f070"}.fa-facebook:before{content:"\f09a"}.fa-facebook-f:before{content:"\f39e"}.fa-facebook-messenger:before{content:"\f39f"}.fa-facebook-square:before{content:"\f082"}.fa-fantasy-flight-games:before{content:"\f6dc"}.fa-fast-backward:before{content:"\f049"}.fa-fast-forward:before{content:"\f050"}.fa-fax:before{content:"\f1ac"}.fa-feather:before{content:"\f52d"}.fa-feather-alt:before{content:"\f56b"}.fa-fedex:before{content:"\f797"}.fa-fedora:before{content:"\f798"}.fa-female:before{content:"\f182"}.fa-fighter-jet:before{content:"\f0fb"}.fa-figma:before{content:"\f799"}.fa-file:before{content:"\f15b"}.fa-file-alt:before{content:"\f15c"}.fa-file-archive:before{content:"\f1c6"}.fa-file-audio:before{content:"\f1c7"}.fa-file-code:before{content:"\f1c9"}.fa-file-contract:before{content:"\f56c"}.fa-file-csv:before{content:"\f6dd"}.fa-file-download:before{content:"\f56d"}.fa-file-excel:before{content:"\f1c3"}.fa-file-export:before{content:"\f56e"}.fa-file-image:before{content:"\f1c5"}.fa-file-import:before{content:"\f56f"}.fa-file-invoice:before{content:"\f570"}.fa-file-invoice-dollar:before{content:"\f571"}.fa-file-medical:before{content:"\f477"}.fa-file-medical-alt:before{content:"\f478"}.fa-file-pdf:before{content:"\f1c1"}.fa-file-powerpoint:before{content:"\f1c4"}.fa-file-prescription:before{content:"\f572"}.fa-file-signature:before{content:"\f573"}.fa-file-upload:before{content:"\f574"}.fa-file-video:before{content:"\f1c8"}.fa-file-word:before{content:"\f1c2"}.fa-fill:before{content:"\f575"}.fa-fill-drip:before{content:"\f576"}.fa-film:before{content:"\f008"}.fa-filter:before{content:"\f0b0"}.fa-fingerprint:before{content:"\f577"}.fa-fire:before{content:"\f06d"}.fa-fire-alt:before{content:"\f7e4"}.fa-fire-extinguisher:before{content:"\f134"}.fa-firefox:before{content:"\f269"}.fa-first-aid:before{content:"\f479"}.fa-first-order:before{content:"\f2b0"}.fa-first-order-alt:before{content:"\f50a"}.fa-firstdraft:before{content:"\f3a1"}.fa-fish:before{content:"\f578"}.fa-fist-raised:before{content:"\f6de"}.fa-flag:before{content:"\f024"}.fa-flag-checkered:before{content:"\f11e"}.fa-flag-usa:before{content:"\f74d"}.fa-flask:before{content:"\f0c3"}.fa-flickr:before{content:"\f16e"}.fa-flipboard:before{content:"\f44d"}.fa-flushed:before{content:"\f579"}.fa-fly:before{content:"\f417"}.fa-folder:before{content:"\f07b"}.fa-folder-minus:before{content:"\f65d"}.fa-folder-open:before{content:"\f07c"}.fa-folder-plus:before{content:"\f65e"}.fa-font:before{content:"\f031"}.fa-font-awesome:before{content:"\f2b4"}.fa-font-awesome-alt:before{content:"\f35c"}.fa-font-awesome-flag:before{content:"\f425"}.fa-font-awesome-logo-full:before{content:"\f4e6"}.fa-fonticons:before{content:"\f280"}.fa-fonticons-fi:before{content:"\f3a2"}.fa-football-ball:before{content:"\f44e"}.fa-fort-awesome:before{content:"\f286"}.fa-fort-awesome-alt:before{content:"\f3a3"}.fa-forumbee:before{content:"\f211"}.fa-forward:before{content:"\f04e"}.fa-foursquare:before{content:"\f180"}.fa-free-code-camp:before{content:"\f2c5"}.fa-freebsd:before{content:"\f3a4"}.fa-frog:before{content:"\f52e"}.fa-frown:before{content:"\f119"}.fa-frown-open:before{content:"\f57a"}.fa-fulcrum:before{content:"\f50b"}.fa-funnel-dollar:before{content:"\f662"}.fa-futbol:before{content:"\f1e3"}.fa-galactic-republic:before{content:"\f50c"}.fa-galactic-senate:before{content:"\f50d"}.fa-gamepad:before{content:"\f11b"}.fa-gas-pump:before{content:"\f52f"}.fa-gavel:before{content:"\f0e3"}.fa-gem:before{content:"\f3a5"}.fa-genderless:before{content:"\f22d"}.fa-get-pocket:before{content:"\f265"}.fa-gg:before{content:"\f260"}.fa-gg-circle:before{content:"\f261"}.fa-ghost:before{content:"\f6e2"}.fa-gift:before{content:"\f06b"}.fa-gifts:before{content:"\f79c"}.fa-git:before{content:"\f1d3"}.fa-git-square:before{content:"\f1d2"}.fa-github:before{content:"\f09b"}.fa-github-alt:before{content:"\f113"}.fa-github-square:before{content:"\f092"}.fa-gitkraken:before{content:"\f3a6"}.fa-gitlab:before{content:"\f296"}.fa-gitter:before{content:"\f426"}.fa-glass-cheers:before{content:"\f79f"}.fa-glass-martini:before{content:"\f000"}.fa-glass-martini-alt:before{content:"\f57b"}.fa-glass-whiskey:before{content:"\f7a0"}.fa-glasses:before{content:"\f530"}.fa-glide:before{content:"\f2a5"}.fa-glide-g:before{content:"\f2a6"}.fa-globe:before{content:"\f0ac"}.fa-globe-africa:before{content:"\f57c"}.fa-globe-americas:before{content:"\f57d"}.fa-globe-asia:before{content:"\f57e"}.fa-globe-europe:before{content:"\f7a2"}.fa-gofore:before{content:"\f3a7"}.fa-golf-ball:before{content:"\f450"}.fa-goodreads:before{content:"\f3a8"}.fa-goodreads-g:before{content:"\f3a9"}.fa-google:before{content:"\f1a0"}.fa-google-drive:before{content:"\f3aa"}.fa-google-play:before{content:"\f3ab"}.fa-google-plus:before{content:"\f2b3"}.fa-google-plus-g:before{content:"\f0d5"}.fa-google-plus-square:before{content:"\f0d4"}.fa-google-wallet:before{content:"\f1ee"}.fa-gopuram:before{content:"\f664"}.fa-graduation-cap:before{content:"\f19d"}.fa-gratipay:before{content:"\f184"}.fa-grav:before{content:"\f2d6"}.fa-greater-than:before{content:"\f531"}.fa-greater-than-equal:before{content:"\f532"}.fa-grimace:before{content:"\f57f"}.fa-grin:before{content:"\f580"}.fa-grin-alt:before{content:"\f581"}.fa-grin-beam:before{content:"\f582"}.fa-grin-beam-sweat:before{content:"\f583"}.fa-grin-hearts:before{content:"\f584"}.fa-grin-squint:before{content:"\f585"}.fa-grin-squint-tears:before{content:"\f586"}.fa-grin-stars:before{content:"\f587"}.fa-grin-tears:before{content:"\f588"}.fa-grin-tongue:before{content:"\f589"}.fa-grin-tongue-squint:before{content:"\f58a"}.fa-grin-tongue-wink:before{content:"\f58b"}.fa-grin-wink:before{content:"\f58c"}.fa-grip-horizontal:before{content:"\f58d"}.fa-grip-lines:before{content:"\f7a4"}.fa-grip-lines-vertical:before{content:"\f7a5"}.fa-grip-vertical:before{content:"\f58e"}.fa-gripfire:before{content:"\f3ac"}.fa-grunt:before{content:"\f3ad"}.fa-guitar:before{content:"\f7a6"}.fa-gulp:before{content:"\f3ae"}.fa-h-square:before{content:"\f0fd"}.fa-hacker-news:before{content:"\f1d4"}.fa-hacker-news-square:before{content:"\f3af"}.fa-hackerrank:before{content:"\f5f7"}.fa-hammer:before{content:"\f6e3"}.fa-hamsa:before{content:"\f665"}.fa-hand-holding:before{content:"\f4bd"}.fa-hand-holding-heart:before{content:"\f4be"}.fa-hand-holding-usd:before{content:"\f4c0"}.fa-hand-lizard:before{content:"\f258"}.fa-hand-paper:before{content:"\f256"}.fa-hand-peace:before{content:"\f25b"}.fa-hand-point-down:before{content:"\f0a7"}.fa-hand-point-left:before{content:"\f0a5"}.fa-hand-point-right:before{content:"\f0a4"}.fa-hand-point-up:before{content:"\f0a6"}.fa-hand-pointer:before{content:"\f25a"}.fa-hand-rock:before{content:"\f255"}.fa-hand-scissors:before{content:"\f257"}.fa-hand-spock:before{content:"\f259"}.fa-hands:before{content:"\f4c2"}.fa-hands-helping:before{content:"\f4c4"}.fa-handshake:before{content:"\f2b5"}.fa-hanukiah:before{content:"\f6e6"}.fa-hashtag:before{content:"\f292"}.fa-hat-wizard:before{content:"\f6e8"}.fa-haykal:before{content:"\f666"}.fa-hdd:before{content:"\f0a0"}.fa-heading:before{content:"\f1dc"}.fa-headphones:before{content:"\f025"}.fa-headphones-alt:before{content:"\f58f"}.fa-headset:before{content:"\f590"}.fa-heart:before{content:"\f004"}.fa-heart-broken:before{content:"\f7a9"}.fa-heartbeat:before{content:"\f21e"}.fa-helicopter:before{content:"\f533"}.fa-highlighter:before{content:"\f591"}.fa-hiking:before{content:"\f6ec"}.fa-hippo:before{content:"\f6ed"}.fa-hips:before{content:"\f452"}.fa-hire-a-helper:before{content:"\f3b0"}.fa-history:before{content:"\f1da"}.fa-hockey-puck:before{content:"\f453"}.fa-holly-berry:before{content:"\f7aa"}.fa-home:before{content:"\f015"}.fa-hooli:before{content:"\f427"}.fa-hornbill:before{content:"\f592"}.fa-horse:before{content:"\f6f0"}.fa-horse-head:before{content:"\f7ab"}.fa-hospital:before{content:"\f0f8"}.fa-hospital-alt:before{content:"\f47d"}.fa-hospital-symbol:before{content:"\f47e"}.fa-hot-tub:before{content:"\f593"}.fa-hotel:before{content:"\f594"}.fa-hotjar:before{content:"\f3b1"}.fa-hourglass:before{content:"\f254"}.fa-hourglass-end:before{content:"\f253"}.fa-hourglass-half:before{content:"\f252"}.fa-hourglass-start:before{content:"\f251"}.fa-house-damage:before{content:"\f6f1"}.fa-houzz:before{content:"\f27c"}.fa-hryvnia:before{content:"\f6f2"}.fa-html5:before{content:"\f13b"}.fa-hubspot:before{content:"\f3b2"}.fa-i-cursor:before{content:"\f246"}.fa-icicles:before{content:"\f7ad"}.fa-id-badge:before{content:"\f2c1"}.fa-id-card:before{content:"\f2c2"}.fa-id-card-alt:before{content:"\f47f"}.fa-igloo:before{content:"\f7ae"}.fa-image:before{content:"\f03e"}.fa-images:before{content:"\f302"}.fa-imdb:before{content:"\f2d8"}.fa-inbox:before{content:"\f01c"}.fa-indent:before{content:"\f03c"}.fa-industry:before{content:"\f275"}.fa-infinity:before{content:"\f534"}.fa-info:before{content:"\f129"}.fa-info-circle:before{content:"\f05a"}.fa-instagram:before{content:"\f16d"}.fa-intercom:before{content:"\f7af"}.fa-internet-explorer:before{content:"\f26b"}.fa-invision:before{content:"\f7b0"}.fa-ioxhost:before{content:"\f208"}.fa-italic:before{content:"\f033"}.fa-itunes:before{content:"\f3b4"}.fa-itunes-note:before{content:"\f3b5"}.fa-java:before{content:"\f4e4"}.fa-jedi:before{content:"\f669"}.fa-jedi-order:before{content:"\f50e"}.fa-jenkins:before{content:"\f3b6"}.fa-jira:before{content:"\f7b1"}.fa-joget:before{content:"\f3b7"}.fa-joint:before{content:"\f595"}.fa-joomla:before{content:"\f1aa"}.fa-journal-whills:before{content:"\f66a"}.fa-js:before{content:"\f3b8"}.fa-js-square:before{content:"\f3b9"}.fa-jsfiddle:before{content:"\f1cc"}.fa-kaaba:before{content:"\f66b"}.fa-kaggle:before{content:"\f5fa"}.fa-key:before{content:"\f084"}.fa-keybase:before{content:"\f4f5"}.fa-keyboard:before{content:"\f11c"}.fa-keycdn:before{content:"\f3ba"}.fa-khanda:before{content:"\f66d"}.fa-kickstarter:before{content:"\f3bb"}.fa-kickstarter-k:before{content:"\f3bc"}.fa-kiss:before{content:"\f596"}.fa-kiss-beam:before{content:"\f597"}.fa-kiss-wink-heart:before{content:"\f598"}.fa-kiwi-bird:before{content:"\f535"}.fa-korvue:before{content:"\f42f"}.fa-landmark:before{content:"\f66f"}.fa-language:before{content:"\f1ab"}.fa-laptop:before{content:"\f109"}.fa-laptop-code:before{content:"\f5fc"}.fa-laravel:before{content:"\f3bd"}.fa-lastfm:before{content:"\f202"}.fa-lastfm-square:before{content:"\f203"}.fa-laugh:before{content:"\f599"}.fa-laugh-beam:before{content:"\f59a"}.fa-laugh-squint:before{content:"\f59b"}.fa-laugh-wink:before{content:"\f59c"}.fa-layer-group:before{content:"\f5fd"}.fa-leaf:before{content:"\f06c"}.fa-leanpub:before{content:"\f212"}.fa-lemon:before{content:"\f094"}.fa-less:before{content:"\f41d"}.fa-less-than:before{content:"\f536"}.fa-less-than-equal:before{content:"\f537"}.fa-level-down-alt:before{content:"\f3be"}.fa-level-up-alt:before{content:"\f3bf"}.fa-life-ring:before{content:"\f1cd"}.fa-lightbulb:before{content:"\f0eb"}.fa-line:before{content:"\f3c0"}.fa-link:before{content:"\f0c1"}.fa-linkedin:before{content:"\f08c"}.fa-linkedin-in:before{content:"\f0e1"}.fa-linode:before{content:"\f2b8"}.fa-linux:before{content:"\f17c"}.fa-lira-sign:before{content:"\f195"}.fa-list:before{content:"\f03a"}.fa-list-alt:before{content:"\f022"}.fa-list-ol:before{content:"\f0cb"}.fa-list-ul:before{content:"\f0ca"}.fa-location-arrow:before{content:"\f124"}.fa-lock:before{content:"\f023"}.fa-lock-open:before{content:"\f3c1"}.fa-long-arrow-alt-down:before{content:"\f309"}.fa-long-arrow-alt-left:before{content:"\f30a"}.fa-long-arrow-alt-right:before{content:"\f30b"}.fa-long-arrow-alt-up:before{content:"\f30c"}.fa-low-vision:before{content:"\f2a8"}.fa-luggage-cart:before{content:"\f59d"}.fa-lyft:before{content:"\f3c3"}.fa-magento:before{content:"\f3c4"}.fa-magic:before{content:"\f0d0"}.fa-magnet:before{content:"\f076"}.fa-mail-bulk:before{content:"\f674"}.fa-mailchimp:before{content:"\f59e"}.fa-male:before{content:"\f183"}.fa-mandalorian:before{content:"\f50f"}.fa-map:before{content:"\f279"}.fa-map-marked:before{content:"\f59f"}.fa-map-marked-alt:before{content:"\f5a0"}.fa-map-marker:before{content:"\f041"}.fa-map-marker-alt:before{content:"\f3c5"}.fa-map-pin:before{content:"\f276"}.fa-map-signs:before{content:"\f277"}.fa-markdown:before{content:"\f60f"}.fa-marker:before{content:"\f5a1"}.fa-mars:before{content:"\f222"}.fa-mars-double:before{content:"\f227"}.fa-mars-stroke:before{content:"\f229"}.fa-mars-stroke-h:before{content:"\f22b"}.fa-mars-stroke-v:before{content:"\f22a"}.fa-mask:before{content:"\f6fa"}.fa-mastodon:before{content:"\f4f6"}.fa-maxcdn:before{content:"\f136"}.fa-medal:before{content:"\f5a2"}.fa-medapps:before{content:"\f3c6"}.fa-medium:before{content:"\f23a"}.fa-medium-m:before{content:"\f3c7"}.fa-medkit:before{content:"\f0fa"}.fa-medrt:before{content:"\f3c8"}.fa-meetup:before{content:"\f2e0"}.fa-megaport:before{content:"\f5a3"}.fa-meh:before{content:"\f11a"}.fa-meh-blank:before{content:"\f5a4"}.fa-meh-rolling-eyes:before{content:"\f5a5"}.fa-memory:before{content:"\f538"}.fa-mendeley:before{content:"\f7b3"}.fa-menorah:before{content:"\f676"}.fa-mercury:before{content:"\f223"}.fa-meteor:before{content:"\f753"}.fa-microchip:before{content:"\f2db"}.fa-microphone:before{content:"\f130"}.fa-microphone-alt:before{content:"\f3c9"}.fa-microphone-alt-slash:before{content:"\f539"}.fa-microphone-slash:before{content:"\f131"}.fa-microscope:before{content:"\f610"}.fa-microsoft:before{content:"\f3ca"}.fa-minus:before{content:"\f068"}.fa-minus-circle:before{content:"\f056"}.fa-minus-square:before{content:"\f146"}.fa-mitten:before{content:"\f7b5"}.fa-mix:before{content:"\f3cb"}.fa-mixcloud:before{content:"\f289"}.fa-mizuni:before{content:"\f3cc"}.fa-mobile:before{content:"\f10b"}.fa-mobile-alt:before{content:"\f3cd"}.fa-modx:before{content:"\f285"}.fa-monero:before{content:"\f3d0"}.fa-money-bill:before{content:"\f0d6"}.fa-money-bill-alt:before{content:"\f3d1"}.fa-money-bill-wave:before{content:"\f53a"}.fa-money-bill-wave-alt:before{content:"\f53b"}.fa-money-check:before{content:"\f53c"}.fa-money-check-alt:before{content:"\f53d"}.fa-monument:before{content:"\f5a6"}.fa-moon:before{content:"\f186"}.fa-mortar-pestle:before{content:"\f5a7"}.fa-mosque:before{content:"\f678"}.fa-motorcycle:before{content:"\f21c"}.fa-mountain:before{content:"\f6fc"}.fa-mouse-pointer:before{content:"\f245"}.fa-mug-hot:before{content:"\f7b6"}.fa-music:before{content:"\f001"}.fa-napster:before{content:"\f3d2"}.fa-neos:before{content:"\f612"}.fa-network-wired:before{content:"\f6ff"}.fa-neuter:before{content:"\f22c"}.fa-newspaper:before{content:"\f1ea"}.fa-nimblr:before{content:"\f5a8"}.fa-nintendo-switch:before{content:"\f418"}.fa-node:before{content:"\f419"}.fa-node-js:before{content:"\f3d3"}.fa-not-equal:before{content:"\f53e"}.fa-notes-medical:before{content:"\f481"}.fa-npm:before{content:"\f3d4"}.fa-ns8:before{content:"\f3d5"}.fa-nutritionix:before{content:"\f3d6"}.fa-object-group:before{content:"\f247"}.fa-object-ungroup:before{content:"\f248"}.fa-odnoklassniki:before{content:"\f263"}.fa-odnoklassniki-square:before{content:"\f264"}.fa-oil-can:before{content:"\f613"}.fa-old-republic:before{content:"\f510"}.fa-om:before{content:"\f679"}.fa-opencart:before{content:"\f23d"}.fa-openid:before{content:"\f19b"}.fa-opera:before{content:"\f26a"}.fa-optin-monster:before{content:"\f23c"}.fa-osi:before{content:"\f41a"}.fa-otter:before{content:"\f700"}.fa-outdent:before{content:"\f03b"}.fa-page4:before{content:"\f3d7"}.fa-pagelines:before{content:"\f18c"}.fa-paint-brush:before{content:"\f1fc"}.fa-paint-roller:before{content:"\f5aa"}.fa-palette:before{content:"\f53f"}.fa-palfed:before{content:"\f3d8"}.fa-pallet:before{content:"\f482"}.fa-paper-plane:before{content:"\f1d8"}.fa-paperclip:before{content:"\f0c6"}.fa-parachute-box:before{content:"\f4cd"}.fa-paragraph:before{content:"\f1dd"}.fa-parking:before{content:"\f540"}.fa-passport:before{content:"\f5ab"}.fa-pastafarianism:before{content:"\f67b"}.fa-paste:before{content:"\f0ea"}.fa-patreon:before{content:"\f3d9"}.fa-pause:before{content:"\f04c"}.fa-pause-circle:before{content:"\f28b"}.fa-paw:before{content:"\f1b0"}.fa-paypal:before{content:"\f1ed"}.fa-peace:before{content:"\f67c"}.fa-pen:before{content:"\f304"}.fa-pen-alt:before{content:"\f305"}.fa-pen-fancy:before{content:"\f5ac"}.fa-pen-nib:before{content:"\f5ad"}.fa-pen-square:before{content:"\f14b"}.fa-pencil-alt:before{content:"\f303"}.fa-pencil-ruler:before{content:"\f5ae"}.fa-penny-arcade:before{content:"\f704"}.fa-people-carry:before{content:"\f4ce"}.fa-percent:before{content:"\f295"}.fa-percentage:before{content:"\f541"}.fa-periscope:before{content:"\f3da"}.fa-person-booth:before{content:"\f756"}.fa-phabricator:before{content:"\f3db"}.fa-phoenix-framework:before{content:"\f3dc"}.fa-phoenix-squadron:before{content:"\f511"}.fa-phone:before{content:"\f095"}.fa-phone-slash:before{content:"\f3dd"}.fa-phone-square:before{content:"\f098"}.fa-phone-volume:before{content:"\f2a0"}.fa-php:before{content:"\f457"}.fa-pied-piper:before{content:"\f2ae"}.fa-pied-piper-alt:before{content:"\f1a8"}.fa-pied-piper-hat:before{content:"\f4e5"}.fa-pied-piper-pp:before{content:"\f1a7"}.fa-piggy-bank:before{content:"\f4d3"}.fa-pills:before{content:"\f484"}.fa-pinterest:before{content:"\f0d2"}.fa-pinterest-p:before{content:"\f231"}.fa-pinterest-square:before{content:"\f0d3"}.fa-place-of-worship:before{content:"\f67f"}.fa-plane:before{content:"\f072"}.fa-plane-arrival:before{content:"\f5af"}.fa-plane-departure:before{content:"\f5b0"}.fa-play:before{content:"\f04b"}.fa-play-circle:before{content:"\f144"}.fa-playstation:before{content:"\f3df"}.fa-plug:before{content:"\f1e6"}.fa-plus:before{content:"\f067"}.fa-plus-circle:before{content:"\f055"}.fa-plus-square:before{content:"\f0fe"}.fa-podcast:before{content:"\f2ce"}.fa-poll:before{content:"\f681"}.fa-poll-h:before{content:"\f682"}.fa-poo:before{content:"\f2fe"}.fa-poo-storm:before{content:"\f75a"}.fa-poop:before{content:"\f619"}.fa-portrait:before{content:"\f3e0"}.fa-pound-sign:before{content:"\f154"}.fa-power-off:before{content:"\f011"}.fa-pray:before{content:"\f683"}.fa-praying-hands:before{content:"\f684"}.fa-prescription:before{content:"\f5b1"}.fa-prescription-bottle:before{content:"\f485"}.fa-prescription-bottle-alt:before{content:"\f486"}.fa-print:before{content:"\f02f"}.fa-procedures:before{content:"\f487"}.fa-product-hunt:before{content:"\f288"}.fa-project-diagram:before{content:"\f542"}.fa-pushed:before{content:"\f3e1"}.fa-puzzle-piece:before{content:"\f12e"}.fa-python:before{content:"\f3e2"}.fa-qq:before{content:"\f1d6"}.fa-qrcode:before{content:"\f029"}.fa-question:before{content:"\f128"}.fa-question-circle:before{content:"\f059"}.fa-quidditch:before{content:"\f458"}.fa-quinscape:before{content:"\f459"}.fa-quora:before{content:"\f2c4"}.fa-quote-left:before{content:"\f10d"}.fa-quote-right:before{content:"\f10e"}.fa-quran:before{content:"\f687"}.fa-r-project:before{content:"\f4f7"}.fa-radiation:before{content:"\f7b9"}.fa-radiation-alt:before{content:"\f7ba"}.fa-rainbow:before{content:"\f75b"}.fa-random:before{content:"\f074"}.fa-raspberry-pi:before{content:"\f7bb"}.fa-ravelry:before{content:"\f2d9"}.fa-react:before{content:"\f41b"}.fa-reacteurope:before{content:"\f75d"}.fa-readme:before{content:"\f4d5"}.fa-rebel:before{content:"\f1d0"}.fa-receipt:before{content:"\f543"}.fa-recycle:before{content:"\f1b8"}.fa-red-river:before{content:"\f3e3"}.fa-reddit:before{content:"\f1a1"}.fa-reddit-alien:before{content:"\f281"}.fa-reddit-square:before{content:"\f1a2"}.fa-redhat:before{content:"\f7bc"}.fa-redo:before{content:"\f01e"}.fa-redo-alt:before{content:"\f2f9"}.fa-registered:before{content:"\f25d"}.fa-renren:before{content:"\f18b"}.fa-reply:before{content:"\f3e5"}.fa-reply-all:before{content:"\f122"}.fa-replyd:before{content:"\f3e6"}.fa-republican:before{content:"\f75e"}.fa-researchgate:before{content:"\f4f8"}.fa-resolving:before{content:"\f3e7"}.fa-restroom:before{content:"\f7bd"}.fa-retweet:before{content:"\f079"}.fa-rev:before{content:"\f5b2"}.fa-ribbon:before{content:"\f4d6"}.fa-ring:before{content:"\f70b"}.fa-road:before{content:"\f018"}.fa-robot:before{content:"\f544"}.fa-rocket:before{content:"\f135"}.fa-rocketchat:before{content:"\f3e8"}.fa-rockrms:before{content:"\f3e9"}.fa-route:before{content:"\f4d7"}.fa-rss:before{content:"\f09e"}.fa-rss-square:before{content:"\f143"}.fa-ruble-sign:before{content:"\f158"}.fa-ruler:before{content:"\f545"}.fa-ruler-combined:before{content:"\f546"}.fa-ruler-horizontal:before{content:"\f547"}.fa-ruler-vertical:before{content:"\f548"}.fa-running:before{content:"\f70c"}.fa-rupee-sign:before{content:"\f156"}.fa-sad-cry:before{content:"\f5b3"}.fa-sad-tear:before{content:"\f5b4"}.fa-safari:before{content:"\f267"}.fa-sass:before{content:"\f41e"}.fa-satellite:before{content:"\f7bf"}.fa-satellite-dish:before{content:"\f7c0"}.fa-save:before{content:"\f0c7"}.fa-schlix:before{content:"\f3ea"}.fa-school:before{content:"\f549"}.fa-screwdriver:before{content:"\f54a"}.fa-scribd:before{content:"\f28a"}.fa-scroll:before{content:"\f70e"}.fa-sd-card:before{content:"\f7c2"}.fa-search:before{content:"\f002"}.fa-search-dollar:before{content:"\f688"}.fa-search-location:before{content:"\f689"}.fa-search-minus:before{content:"\f010"}.fa-search-plus:before{content:"\f00e"}.fa-searchengin:before{content:"\f3eb"}.fa-seedling:before{content:"\f4d8"}.fa-sellcast:before{content:"\f2da"}.fa-sellsy:before{content:"\f213"}.fa-server:before{content:"\f233"}.fa-servicestack:before{content:"\f3ec"}.fa-shapes:before{content:"\f61f"}.fa-share:before{content:"\f064"}.fa-share-alt:before{content:"\f1e0"}.fa-share-alt-square:before{content:"\f1e1"}.fa-share-square:before{content:"\f14d"}.fa-shekel-sign:before{content:"\f20b"}.fa-shield-alt:before{content:"\f3ed"}.fa-ship:before{content:"\f21a"}.fa-shipping-fast:before{content:"\f48b"}.fa-shirtsinbulk:before{content:"\f214"}.fa-shoe-prints:before{content:"\f54b"}.fa-shopping-bag:before{content:"\f290"}.fa-shopping-basket:before{content:"\f291"}.fa-shopping-cart:before{content:"\f07a"}.fa-shopware:before{content:"\f5b5"}.fa-shower:before{content:"\f2cc"}.fa-shuttle-van:before{content:"\f5b6"}.fa-sign:before{content:"\f4d9"}.fa-sign-in-alt:before{content:"\f2f6"}.fa-sign-language:before{content:"\f2a7"}.fa-sign-out-alt:before{content:"\f2f5"}.fa-signal:before{content:"\f012"}.fa-signature:before{content:"\f5b7"}.fa-sim-card:before{content:"\f7c4"}.fa-simplybuilt:before{content:"\f215"}.fa-sistrix:before{content:"\f3ee"}.fa-sitemap:before{content:"\f0e8"}.fa-sith:before{content:"\f512"}.fa-skating:before{content:"\f7c5"}.fa-sketch:before{content:"\f7c6"}.fa-skiing:before{content:"\f7c9"}.fa-skiing-nordic:before{content:"\f7ca"}.fa-skull:before{content:"\f54c"}.fa-skull-crossbones:before{content:"\f714"}.fa-skyatlas:before{content:"\f216"}.fa-skype:before{content:"\f17e"}.fa-slack:before{content:"\f198"}.fa-slack-hash:before{content:"\f3ef"}.fa-slash:before{content:"\f715"}.fa-sleigh:before{content:"\f7cc"}.fa-sliders-h:before{content:"\f1de"}.fa-slideshare:before{content:"\f1e7"}.fa-smile:before{content:"\f118"}.fa-smile-beam:before{content:"\f5b8"}.fa-smile-wink:before{content:"\f4da"}.fa-smog:before{content:"\f75f"}.fa-smoking:before{content:"\f48d"}.fa-smoking-ban:before{content:"\f54d"}.fa-sms:before{content:"\f7cd"}.fa-snapchat:before{content:"\f2ab"}.fa-snapchat-ghost:before{content:"\f2ac"}.fa-snapchat-square:before{content:"\f2ad"}.fa-snowboarding:before{content:"\f7ce"}.fa-snowflake:before{content:"\f2dc"}.fa-snowman:before{content:"\f7d0"}.fa-snowplow:before{content:"\f7d2"}.fa-socks:before{content:"\f696"}.fa-solar-panel:before{content:"\f5ba"}.fa-sort:before{content:"\f0dc"}.fa-sort-alpha-down:before{content:"\f15d"}.fa-sort-alpha-up:before{content:"\f15e"}.fa-sort-amount-down:before{content:"\f160"}.fa-sort-amount-up:before{content:"\f161"}.fa-sort-down:before{content:"\f0dd"}.fa-sort-numeric-down:before{content:"\f162"}.fa-sort-numeric-up:before{content:"\f163"}.fa-sort-up:before{content:"\f0de"}.fa-soundcloud:before{content:"\f1be"}.fa-sourcetree:before{content:"\f7d3"}.fa-spa:before{content:"\f5bb"}.fa-space-shuttle:before{content:"\f197"}.fa-speakap:before{content:"\f3f3"}.fa-spider:before{content:"\f717"}.fa-spinner:before{content:"\f110"}.fa-splotch:before{content:"\f5bc"}.fa-spotify:before{content:"\f1bc"}.fa-spray-can:before{content:"\f5bd"}.fa-square:before{content:"\f0c8"}.fa-square-full:before{content:"\f45c"}.fa-square-root-alt:before{content:"\f698"}.fa-squarespace:before{content:"\f5be"}.fa-stack-exchange:before{content:"\f18d"}.fa-stack-overflow:before{content:"\f16c"}.fa-stamp:before{content:"\f5bf"}.fa-star:before{content:"\f005"}.fa-star-and-crescent:before{content:"\f699"}.fa-star-half:before{content:"\f089"}.fa-star-half-alt:before{content:"\f5c0"}.fa-star-of-david:before{content:"\f69a"}.fa-star-of-life:before{content:"\f621"}.fa-staylinked:before{content:"\f3f5"}.fa-steam:before{content:"\f1b6"}.fa-steam-square:before{content:"\f1b7"}.fa-steam-symbol:before{content:"\f3f6"}.fa-step-backward:before{content:"\f048"}.fa-step-forward:before{content:"\f051"}.fa-stethoscope:before{content:"\f0f1"}.fa-sticker-mule:before{content:"\f3f7"}.fa-sticky-note:before{content:"\f249"}.fa-stop:before{content:"\f04d"}.fa-stop-circle:before{content:"\f28d"}.fa-stopwatch:before{content:"\f2f2"}.fa-store:before{content:"\f54e"}.fa-store-alt:before{content:"\f54f"}.fa-strava:before{content:"\f428"}.fa-stream:before{content:"\f550"}.fa-street-view:before{content:"\f21d"}.fa-strikethrough:before{content:"\f0cc"}.fa-stripe:before{content:"\f429"}.fa-stripe-s:before{content:"\f42a"}.fa-stroopwafel:before{content:"\f551"}.fa-studiovinari:before{content:"\f3f8"}.fa-stumbleupon:before{content:"\f1a4"}.fa-stumbleupon-circle:before{content:"\f1a3"}.fa-subscript:before{content:"\f12c"}.fa-subway:before{content:"\f239"}.fa-suitcase:before{content:"\f0f2"}.fa-suitcase-rolling:before{content:"\f5c1"}.fa-sun:before{content:"\f185"}.fa-superpowers:before{content:"\f2dd"}.fa-superscript:before{content:"\f12b"}.fa-supple:before{content:"\f3f9"}.fa-surprise:before{content:"\f5c2"}.fa-suse:before{content:"\f7d6"}.fa-swatchbook:before{content:"\f5c3"}.fa-swimmer:before{content:"\f5c4"}.fa-swimming-pool:before{content:"\f5c5"}.fa-synagogue:before{content:"\f69b"}.fa-sync:before{content:"\f021"}.fa-sync-alt:before{content:"\f2f1"}.fa-syringe:before{content:"\f48e"}.fa-table:before{content:"\f0ce"}.fa-table-tennis:before{content:"\f45d"}.fa-tablet:before{content:"\f10a"}.fa-tablet-alt:before{content:"\f3fa"}.fa-tablets:before{content:"\f490"}.fa-tachometer-alt:before{content:"\f3fd"}.fa-tag:before{content:"\f02b"}.fa-tags:before{content:"\f02c"}.fa-tape:before{content:"\f4db"}.fa-tasks:before{content:"\f0ae"}.fa-taxi:before{content:"\f1ba"}.fa-teamspeak:before{content:"\f4f9"}.fa-teeth:before{content:"\f62e"}.fa-teeth-open:before{content:"\f62f"}.fa-telegram:before{content:"\f2c6"}.fa-telegram-plane:before{content:"\f3fe"}.fa-temperature-high:before{content:"\f769"}.fa-temperature-low:before{content:"\f76b"}.fa-tencent-weibo:before{content:"\f1d5"}.fa-tenge:before{content:"\f7d7"}.fa-terminal:before{content:"\f120"}.fa-text-height:before{content:"\f034"}.fa-text-width:before{content:"\f035"}.fa-th:before{content:"\f00a"}.fa-th-large:before{content:"\f009"}.fa-th-list:before{content:"\f00b"}.fa-the-red-yeti:before{content:"\f69d"}.fa-theater-masks:before{content:"\f630"}.fa-themeco:before{content:"\f5c6"}.fa-themeisle:before{content:"\f2b2"}.fa-thermometer:before{content:"\f491"}.fa-thermometer-empty:before{content:"\f2cb"}.fa-thermometer-full:before{content:"\f2c7"}.fa-thermometer-half:before{content:"\f2c9"}.fa-thermometer-quarter:before{content:"\f2ca"}.fa-thermometer-three-quarters:before{content:"\f2c8"}.fa-think-peaks:before{content:"\f731"}.fa-thumbs-down:before{content:"\f165"}.fa-thumbs-up:before{content:"\f164"}.fa-thumbtack:before{content:"\f08d"}.fa-ticket-alt:before{content:"\f3ff"}.fa-times:before{content:"\f00d"}.fa-times-circle:before{content:"\f057"}.fa-tint:before{content:"\f043"}.fa-tint-slash:before{content:"\f5c7"}.fa-tired:before{content:"\f5c8"}.fa-toggle-off:before{content:"\f204"}.fa-toggle-on:before{content:"\f205"}.fa-toilet:before{content:"\f7d8"}.fa-toilet-paper:before{content:"\f71e"}.fa-toolbox:before{content:"\f552"}.fa-tools:before{content:"\f7d9"}.fa-tooth:before{content:"\f5c9"}.fa-torah:before{content:"\f6a0"}.fa-torii-gate:before{content:"\f6a1"}.fa-tractor:before{content:"\f722"}.fa-trade-federation:before{content:"\f513"}.fa-trademark:before{content:"\f25c"}.fa-traffic-light:before{content:"\f637"}.fa-train:before{content:"\f238"}.fa-tram:before{content:"\f7da"}.fa-transgender:before{content:"\f224"}.fa-transgender-alt:before{content:"\f225"}.fa-trash:before{content:"\f1f8"}.fa-trash-alt:before{content:"\f2ed"}.fa-tree:before{content:"\f1bb"}.fa-trello:before{content:"\f181"}.fa-tripadvisor:before{content:"\f262"}.fa-trophy:before{content:"\f091"}.fa-truck:before{content:"\f0d1"}.fa-truck-loading:before{content:"\f4de"}.fa-truck-monster:before{content:"\f63b"}.fa-truck-moving:before{content:"\f4df"}.fa-truck-pickup:before{content:"\f63c"}.fa-tshirt:before{content:"\f553"}.fa-tty:before{content:"\f1e4"}.fa-tumblr:before{content:"\f173"}.fa-tumblr-square:before{content:"\f174"}.fa-tv:before{content:"\f26c"}.fa-twitch:before{content:"\f1e8"}.fa-twitter:before{content:"\f099"}.fa-twitter-square:before{content:"\f081"}.fa-typo3:before{content:"\f42b"}.fa-uber:before{content:"\f402"}.fa-ubuntu:before{content:"\f7df"}.fa-uikit:before{content:"\f403"}.fa-umbrella:before{content:"\f0e9"}.fa-umbrella-beach:before{content:"\f5ca"}.fa-underline:before{content:"\f0cd"}.fa-undo:before{content:"\f0e2"}.fa-undo-alt:before{content:"\f2ea"}.fa-uniregistry:before{content:"\f404"}.fa-universal-access:before{content:"\f29a"}.fa-university:before{content:"\f19c"}.fa-unlink:before{content:"\f127"}.fa-unlock:before{content:"\f09c"}.fa-unlock-alt:before{content:"\f13e"}.fa-untappd:before{content:"\f405"}.fa-upload:before{content:"\f093"}.fa-ups:before{content:"\f7e0"}.fa-usb:before{content:"\f287"}.fa-user:before{content:"\f007"}.fa-user-alt:before{content:"\f406"}.fa-user-alt-slash:before{content:"\f4fa"}.fa-user-astronaut:before{content:"\f4fb"}.fa-user-check:before{content:"\f4fc"}.fa-user-circle:before{content:"\f2bd"}.fa-user-clock:before{content:"\f4fd"}.fa-user-cog:before{content:"\f4fe"}.fa-user-edit:before{content:"\f4ff"}.fa-user-friends:before{content:"\f500"}.fa-user-graduate:before{content:"\f501"}.fa-user-injured:before{content:"\f728"}.fa-user-lock:before{content:"\f502"}.fa-user-md:before{content:"\f0f0"}.fa-user-minus:before{content:"\f503"}.fa-user-ninja:before{content:"\f504"}.fa-user-plus:before{content:"\f234"}.fa-user-secret:before{content:"\f21b"}.fa-user-shield:before{content:"\f505"}.fa-user-slash:before{content:"\f506"}.fa-user-tag:before{content:"\f507"}.fa-user-tie:before{content:"\f508"}.fa-user-times:before{content:"\f235"}.fa-users:before{content:"\f0c0"}.fa-users-cog:before{content:"\f509"}.fa-usps:before{content:"\f7e1"}.fa-ussunnah:before{content:"\f407"}.fa-utensil-spoon:before{content:"\f2e5"}.fa-utensils:before{content:"\f2e7"}.fa-vaadin:before{content:"\f408"}.fa-vector-square:before{content:"\f5cb"}.fa-venus:before{content:"\f221"}.fa-venus-double:before{content:"\f226"}.fa-venus-mars:before{content:"\f228"}.fa-viacoin:before{content:"\f237"}.fa-viadeo:before{content:"\f2a9"}.fa-viadeo-square:before{content:"\f2aa"}.fa-vial:before{content:"\f492"}.fa-vials:before{content:"\f493"}.fa-viber:before{content:"\f409"}.fa-video:before{content:"\f03d"}.fa-video-slash:before{content:"\f4e2"}.fa-vihara:before{content:"\f6a7"}.fa-vimeo:before{content:"\f40a"}.fa-vimeo-square:before{content:"\f194"}.fa-vimeo-v:before{content:"\f27d"}.fa-vine:before{content:"\f1ca"}.fa-vk:before{content:"\f189"}.fa-vnv:before{content:"\f40b"}.fa-volleyball-ball:before{content:"\f45f"}.fa-volume-down:before{content:"\f027"}.fa-volume-mute:before{content:"\f6a9"}.fa-volume-off:before{content:"\f026"}.fa-volume-up:before{content:"\f028"}.fa-vote-yea:before{content:"\f772"}.fa-vr-cardboard:before{content:"\f729"}.fa-vuejs:before{content:"\f41f"}.fa-walking:before{content:"\f554"}.fa-wallet:before{content:"\f555"}.fa-warehouse:before{content:"\f494"}.fa-water:before{content:"\f773"}.fa-weebly:before{content:"\f5cc"}.fa-weibo:before{content:"\f18a"}.fa-weight:before{content:"\f496"}.fa-weight-hanging:before{content:"\f5cd"}.fa-weixin:before{content:"\f1d7"}.fa-whatsapp:before{content:"\f232"}.fa-whatsapp-square:before{content:"\f40c"}.fa-wheelchair:before{content:"\f193"}.fa-whmcs:before{content:"\f40d"}.fa-wifi:before{content:"\f1eb"}.fa-wikipedia-w:before{content:"\f266"}.fa-wind:before{content:"\f72e"}.fa-window-close:before{content:"\f410"}.fa-window-maximize:before{content:"\f2d0"}.fa-window-minimize:before{content:"\f2d1"}.fa-window-restore:before{content:"\f2d2"}.fa-windows:before{content:"\f17a"}.fa-wine-bottle:before{content:"\f72f"}.fa-wine-glass:before{content:"\f4e3"}.fa-wine-glass-alt:before{content:"\f5ce"}.fa-wix:before{content:"\f5cf"}.fa-wizards-of-the-coast:before{content:"\f730"}.fa-wolf-pack-battalion:before{content:"\f514"}.fa-won-sign:before{content:"\f159"}.fa-wordpress:before{content:"\f19a"}.fa-wordpress-simple:before{content:"\f411"}.fa-wpbeginner:before{content:"\f297"}.fa-wpexplorer:before{content:"\f2de"}.fa-wpforms:before{content:"\f298"}.fa-wpressr:before{content:"\f3e4"}.fa-wrench:before{content:"\f0ad"}.fa-x-ray:before{content:"\f497"}.fa-xbox:before{content:"\f412"}.fa-xing:before{content:"\f168"}.fa-xing-square:before{content:"\f169"}.fa-y-combinator:before{content:"\f23b"}.fa-yahoo:before{content:"\f19e"}.fa-yandex:before{content:"\f413"}.fa-yandex-international:before{content:"\f414"}.fa-yarn:before{content:"\f7e3"}.fa-yelp:before{content:"\f1e9"}.fa-yen-sign:before{content:"\f157"}.fa-yin-yang:before{content:"\f6ad"}.fa-yoast:before{content:"\f2b1"}.fa-youtube:before{content:"\f167"}.fa-youtube-square:before{content:"\f431"}.fa-zhihu:before{content:"\f63f"}.sr-only{border:0;clip:rect(0,0,0,0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.sr-only-focusable:active,.sr-only-focusable:focus{clip:auto;height:auto;margin:0;overflow:visible;position:static;width:auto}@font-face{font-family:"Font Awesome 5 Brands";font-style:normal;font-weight:normal;src:url(../webfonts/fa-brands-400.eot);src:url(../webfonts/fa-brands-400.eot?#iefix) format("embedded-opentype"),url(../webfonts/fa-brands-400.woff2) format("woff2"),url(../webfonts/fa-brands-400.woff) format("woff"),url(../webfonts/fa-brands-400.ttf) format("truetype"),url(../webfonts/fa-brands-400.svg#fontawesome) format("svg")}.fab{font-family:"Font Awesome 5 Brands"}@font-face{font-family:"Font Awesome 5 Free";font-style:normal;font-weight:400;src:url(../webfonts/fa-regular-400.eot);src:url(../webfonts/fa-regular-400.eot?#iefix) format("embedded-opentype"),url(../webfonts/fa-regular-400.woff2) format("woff2"),url(../webfonts/fa-regular-400.woff) format("woff"),url(../webfonts/fa-regular-400.ttf) format("truetype"),url(../webfonts/fa-regular-400.svg#fontawesome) format("svg")}.far{font-weight:400}@font-face{font-family:"Font Awesome 5 Free";font-style:normal;font-weight:900;src:url(../webfonts/fa-solid-900.eot);src:url(../webfonts/fa-solid-900.eot?#iefix) format("embedded-opentype"),url(../webfonts/fa-solid-900.woff2) format("woff2"),url(../webfonts/fa-solid-900.woff) format("woff"),url(../webfonts/fa-solid-900.ttf) format("truetype"),url(../webfonts/fa-solid-900.svg#fontawesome) format("svg")}.fa,.far,.fas{font-family:"Font Awesome 5 Free"}.fa,.fas{font-weight:900} \ No newline at end of file diff --git a/docs/23.4.1/css/hugo-theme.css b/docs/23.4.1/css/hugo-theme.css new file mode 100644 index 000000000..741cab196 --- /dev/null +++ b/docs/23.4.1/css/hugo-theme.css @@ -0,0 +1,254 @@ +/* Insert here special css for hugo theme, on top of any other imported css */ + + +/* Table of contents */ + +.progress ul { + list-style: none; + margin: 0; + padding: 0 5px; +} + +#TableOfContents { + font-size: 13px !important; + max-height: 85vh; + overflow: auto; + padding: 15px !important; +} + + +#TableOfContents > ul > li > ul > li > ul li { + margin-right: 8px; +} + +#TableOfContents > ul > li > a { + font-weight: bold; padding: 0 18px; margin: 0 2px; +} + +#TableOfContents > ul > li > ul > li > a { + font-weight: bold; +} + +#TableOfContents > ul > li > ul > li > ul > li > ul > li > ul > li { + display: none; +} + +body { + font-size: 16px !important; + color: #323232 !important; +} + +#body a.highlight, #body a.highlight:hover, #body a.highlight:focus { + text-decoration: none; + outline: none; + outline: 0; +} +#body a.highlight { + line-height: 1.1; + display: inline-block; +} +#body a.highlight:after { + display: block; + content: ""; + height: 1px; + width: 0%; + background-color: #0082a7; /*#CE3B2F*/ + -webkit-transition: width 0.5s ease; + -moz-transition: width 0.5s ease; + -ms-transition: width 0.5s ease; + transition: width 0.5s ease; +} +#body a.highlight:hover:after, #body a.highlight:focus:after { + width: 100%; +} +.progress { + position:absolute; + background-color: rgba(246, 246, 246, 0.97); + width: auto; + border: thin solid #ECECEC; + display:none; + z-index:200; +} + +#toc-menu { + border-right: thin solid #DAD8D8 !important; + padding-right: 1rem !important; + margin-right: 0.5rem !important; +} + +#sidebar-toggle-span { + border-right: thin solid #DAD8D8 !important; + padding-right: 0.5rem !important; + margin-right: 1rem !important; +} + +.btn { + display: inline-block !important; + padding: 6px 12px !important; + margin-bottom: 0 !important; + font-size: 14px !important; + font-weight: normal !important; + line-height: 1.42857143 !important; + text-align: center !important; + white-space: nowrap !important; + vertical-align: middle !important; + -ms-touch-action: manipulation !important; + touch-action: manipulation !important; + cursor: pointer !important; + -webkit-user-select: none !important; + -moz-user-select: none !important; + -ms-user-select: none !important; + user-select: none !important; + background-image: none !important; + border: 1px solid transparent !important; + border-radius: 4px !important; + -webkit-transition: all 0.15s !important; + -moz-transition: all 0.15s !important; + transition: all 0.15s !important; +} +.btn:focus { + /*outline: thin dotted; + outline: 5px auto -webkit-focus-ring-color; + outline-offset: -2px;*/ + outline: none !important; +} +.btn:hover, +.btn:focus { + color: #2b2b2b !important; + text-decoration: none !important; +} + +.btn-default { + color: #333 !important; + background-color: #fff !important; + border-color: #ccc !important; +} +.btn-default:hover, +.btn-default:focus, +.btn-default:active { + color: #fff !important; + background-color: #9e9e9e !important; + border-color: #9e9e9e !important; +} +.btn-default:active { + background-image: none !important; +} + +/* anchors */ +.anchor { + color: #00bdf3; + font-size: 0.5em; + cursor:pointer; + visibility:hidden; + margin-left: 0.5em; + position: absolute; + margin-top:0.1em; +} + +h2:hover .anchor, h3:hover .anchor, h4:hover .anchor, h5:hover .anchor, h6:hover .anchor { + visibility:visible; +} + +/* Redfines headers style */ + +h2, h3, h4, h5, h6 { + font-weight: 400; + line-height: 1.1; +} + +h1 a, h2 a, h3 a, h4 a, h5 a, h6 a { + font-weight: inherit; +} + +h2 { + font-size: 2.5rem; + line-height: 110% !important; + margin: 2.5rem 0 1.5rem 0; +} + +h3 { + font-size: 2rem; + line-height: 110% !important; + margin: 2rem 0 1rem 0; +} + +h4 { + font-size: 1.5rem; + line-height: 110% !important; + margin: 1.5rem 0 0.75rem 0; +} + +h5 { + font-size: 1rem; + line-height: 110% !important; + margin: 1rem 0 0.2rem 0; +} + +h6 { + font-size: 0.5rem; + line-height: 110% !important; + margin: 0.5rem 0 0.2rem 0; +} + +p { + margin: 1rem 0; +} + +figcaption h4 { + font-weight: 300 !important; + opacity: .85; + font-size: 1em; + text-align: center; + margin-top: -1.5em; +} + +.select-style { + border: 0; + width: 150px; + border-radius: 0px; + overflow: hidden; + display: inline-flex; +} + +.select-style svg { + fill: #ccc; + width: 14px; + height: 14px; + pointer-events: none; + margin: auto; +} + +.select-style svg:hover { + fill: #e6e6e6; +} + +.select-style select { + padding: 0; + width: 130%; + border: none; + box-shadow: none; + background: transparent; + background-image: none; + -webkit-appearance: none; + margin: auto; + margin-left: 0px; + margin-right: -20px; +} + +.select-style select:focus { + outline: none; +} + +.select-style :hover { + cursor: pointer; +} + +@media only all and (max-width: 47.938em) { + #breadcrumbs .links, #top-github-link-text { + display: none; + } +} + +.is-sticky #top-bar { + box-shadow: -1px 2px 5px 1px rgba(0, 0, 0, 0.1); +} \ No newline at end of file diff --git a/docs/23.4.1/css/hybrid.css b/docs/23.4.1/css/hybrid.css new file mode 100644 index 000000000..29735a189 --- /dev/null +++ b/docs/23.4.1/css/hybrid.css @@ -0,0 +1,102 @@ +/* + +vim-hybrid theme by w0ng (https://github.com/w0ng/vim-hybrid) + +*/ + +/*background color*/ +.hljs { + display: block; + overflow-x: auto; + padding: 0.5em; + background: #1d1f21; +} + +/*selection color*/ +.hljs::selection, +.hljs span::selection { + background: #373b41; +} + +.hljs::-moz-selection, +.hljs span::-moz-selection { + background: #373b41; +} + +/*foreground color*/ +.hljs { + color: #c5c8c6; +} + +/*color: fg_yellow*/ +.hljs-title, +.hljs-name { + color: #f0c674; +} + +/*color: fg_comment*/ +.hljs-comment, +.hljs-meta, +.hljs-meta .hljs-keyword { + color: #707880; +} + +/*color: fg_red*/ +.hljs-number, +.hljs-symbol, +.hljs-literal, +.hljs-deletion, +.hljs-link { + color: #cc6666 +} + +/*color: fg_green*/ +.hljs-string, +.hljs-doctag, +.hljs-addition, +.hljs-regexp, +.hljs-selector-attr, +.hljs-selector-pseudo { + color: #b5bd68; +} + +/*color: fg_purple*/ +.hljs-attribute, +.hljs-code, +.hljs-selector-id { + color: #b294bb; +} + +/*color: fg_blue*/ +.hljs-keyword, +.hljs-selector-tag, +.hljs-bullet, +.hljs-tag { + color: #81a2be; +} + +/*color: fg_aqua*/ +.hljs-subst, +.hljs-variable, +.hljs-template-tag, +.hljs-template-variable { + color: #8abeb7; +} + +/*color: fg_orange*/ +.hljs-type, +.hljs-built_in, +.hljs-builtin-name, +.hljs-quote, +.hljs-section, +.hljs-selector-class { + color: #de935f; +} + +.hljs-emphasis { + font-style: italic; +} + +.hljs-strong { + font-weight: bold; +} diff --git a/docs/23.4.1/css/nucleus.css b/docs/23.4.1/css/nucleus.css new file mode 100644 index 000000000..1897fc5d6 --- /dev/null +++ b/docs/23.4.1/css/nucleus.css @@ -0,0 +1,615 @@ +*, *::before, *::after { + -webkit-box-sizing: border-box; + -moz-box-sizing: border-box; + box-sizing: border-box; } + +@-webkit-viewport { + width: device-width; } +@-moz-viewport { + width: device-width; } +@-ms-viewport { + width: device-width; } +@-o-viewport { + width: device-width; } +@viewport { + width: device-width; } +html { + font-size: 100%; + -ms-text-size-adjust: 100%; + -webkit-text-size-adjust: 100%; } + +body { + margin: 0; } + +article, +aside, +details, +figcaption, +figure, +footer, +header, +hgroup, +main, +nav, +section, +summary { + display: block; } + +audio, +canvas, +progress, +video { + display: inline-block; + vertical-align: baseline; } + +audio:not([controls]) { + display: none; + height: 0; } + +[hidden], +template { + display: none; } + +a { + background: transparent; + text-decoration: none; } + +a:active, +a:hover { + outline: 0; } + +abbr[title] { + border-bottom: 1px dotted; } + +b, +strong { + font-weight: bold; } + +dfn { + font-style: italic; } + +mark { + background: #FFFF27; + color: #333; } + +sub, +sup { + font-size: 0.8rem; + line-height: 0; + position: relative; + vertical-align: baseline; } + +sup { + top: -0.5em; } + +sub { + bottom: -0.25em; } + +img { + border: 0; + max-width: 100%; } + +svg:not(:root) { + overflow: hidden; } + +figure { + margin: 1em 40px; } + +hr { + height: 0; } + +pre { + overflow: auto; } + +button, +input, +optgroup, +select, +textarea { + color: inherit; + font: inherit; + margin: 0; } + +button { + overflow: visible; } + +button, +select { + text-transform: none; } + +button, +html input[type="button"], +input[type="reset"], +input[type="submit"] { + -webkit-appearance: button; + cursor: pointer; } + +button[disabled], +html input[disabled] { + cursor: default; } + +button::-moz-focus-inner, +input::-moz-focus-inner { + border: 0; + padding: 0; } + +input { + line-height: normal; } + +input[type="checkbox"], +input[type="radio"] { + padding: 0; } + +input[type="number"]::-webkit-inner-spin-button, +input[type="number"]::-webkit-outer-spin-button { + height: auto; } + +input[type="search"] { + -webkit-appearance: textfield; } + +input[type="search"]::-webkit-search-cancel-button, +input[type="search"]::-webkit-search-decoration { + -webkit-appearance: none; } + +legend { + border: 0; + padding: 0; } + +textarea { + overflow: auto; } + +optgroup { + font-weight: bold; } + +table { + border-collapse: collapse; + border-spacing: 0; + table-layout: fixed; + width: 100%; } + +tr, td, th { + vertical-align: middle; } + +th, td { + padding: 0.425rem 0; } + +th { + text-align: left; } + +.container { + width: 75em; + margin: 0 auto; + padding: 0; } + @media only all and (min-width: 60em) and (max-width: 74.938em) { + .container { + width: 60em; } } + @media only all and (min-width: 48em) and (max-width: 59.938em) { + .container { + width: 48em; } } + @media only all and (min-width: 30.063em) and (max-width: 47.938em) { + .container { + width: 30em; } } + @media only all and (max-width: 30em) { + .container { + width: 100%; } } + +.grid { + display: -webkit-box; + display: -moz-box; + display: box; + display: -webkit-flex; + display: -moz-flex; + display: -ms-flexbox; + display: flex; + -webkit-flex-flow: row; + -moz-flex-flow: row; + flex-flow: row; + list-style: none; + margin: 0; + padding: 0; } + @media only all and (max-width: 47.938em) { + .grid { + -webkit-flex-flow: row wrap; + -moz-flex-flow: row wrap; + flex-flow: row wrap; } } + +.block { + -webkit-box-flex: 1; + -moz-box-flex: 1; + box-flex: 1; + -webkit-flex: 1; + -moz-flex: 1; + -ms-flex: 1; + flex: 1; + min-width: 0; + min-height: 0; } + @media only all and (max-width: 47.938em) { + .block { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 100%; + -moz-flex: 0 100%; + -ms-flex: 0 100%; + flex: 0 100%; } } + +.content { + margin: 0.625rem; + padding: 0.938rem; } + +@media only all and (max-width: 47.938em) { + body [class*="size-"] { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 100%; + -moz-flex: 0 100%; + -ms-flex: 0 100%; + flex: 0 100%; } } + +.size-1-2 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 50%; + -moz-flex: 0 50%; + -ms-flex: 0 50%; + flex: 0 50%; } + +.size-1-3 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 33.33333%; + -moz-flex: 0 33.33333%; + -ms-flex: 0 33.33333%; + flex: 0 33.33333%; } + +.size-1-4 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 25%; + -moz-flex: 0 25%; + -ms-flex: 0 25%; + flex: 0 25%; } + +.size-1-5 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 20%; + -moz-flex: 0 20%; + -ms-flex: 0 20%; + flex: 0 20%; } + +.size-1-6 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 16.66667%; + -moz-flex: 0 16.66667%; + -ms-flex: 0 16.66667%; + flex: 0 16.66667%; } + +.size-1-7 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 14.28571%; + -moz-flex: 0 14.28571%; + -ms-flex: 0 14.28571%; + flex: 0 14.28571%; } + +.size-1-8 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 12.5%; + -moz-flex: 0 12.5%; + -ms-flex: 0 12.5%; + flex: 0 12.5%; } + +.size-1-9 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 11.11111%; + -moz-flex: 0 11.11111%; + -ms-flex: 0 11.11111%; + flex: 0 11.11111%; } + +.size-1-10 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 10%; + -moz-flex: 0 10%; + -ms-flex: 0 10%; + flex: 0 10%; } + +.size-1-11 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 9.09091%; + -moz-flex: 0 9.09091%; + -ms-flex: 0 9.09091%; + flex: 0 9.09091%; } + +.size-1-12 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 8.33333%; + -moz-flex: 0 8.33333%; + -ms-flex: 0 8.33333%; + flex: 0 8.33333%; } + +@media only all and (min-width: 48em) and (max-width: 59.938em) { + .size-tablet-1-2 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 50%; + -moz-flex: 0 50%; + -ms-flex: 0 50%; + flex: 0 50%; } + + .size-tablet-1-3 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 33.33333%; + -moz-flex: 0 33.33333%; + -ms-flex: 0 33.33333%; + flex: 0 33.33333%; } + + .size-tablet-1-4 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 25%; + -moz-flex: 0 25%; + -ms-flex: 0 25%; + flex: 0 25%; } + + .size-tablet-1-5 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 20%; + -moz-flex: 0 20%; + -ms-flex: 0 20%; + flex: 0 20%; } + + .size-tablet-1-6 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 16.66667%; + -moz-flex: 0 16.66667%; + -ms-flex: 0 16.66667%; + flex: 0 16.66667%; } + + .size-tablet-1-7 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 14.28571%; + -moz-flex: 0 14.28571%; + -ms-flex: 0 14.28571%; + flex: 0 14.28571%; } + + .size-tablet-1-8 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 12.5%; + -moz-flex: 0 12.5%; + -ms-flex: 0 12.5%; + flex: 0 12.5%; } + + .size-tablet-1-9 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 11.11111%; + -moz-flex: 0 11.11111%; + -ms-flex: 0 11.11111%; + flex: 0 11.11111%; } + + .size-tablet-1-10 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 10%; + -moz-flex: 0 10%; + -ms-flex: 0 10%; + flex: 0 10%; } + + .size-tablet-1-11 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 9.09091%; + -moz-flex: 0 9.09091%; + -ms-flex: 0 9.09091%; + flex: 0 9.09091%; } + + .size-tablet-1-12 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 8.33333%; + -moz-flex: 0 8.33333%; + -ms-flex: 0 8.33333%; + flex: 0 8.33333%; } } +@media only all and (max-width: 47.938em) { + @supports not (flex-wrap: wrap) { + .grid { + display: block; + -webkit-box-lines: inherit; + -moz-box-lines: inherit; + box-lines: inherit; + -webkit-flex-wrap: inherit; + -moz-flex-wrap: inherit; + -ms-flex-wrap: inherit; + flex-wrap: inherit; } + + .block { + display: block; + -webkit-box-flex: inherit; + -moz-box-flex: inherit; + box-flex: inherit; + -webkit-flex: inherit; + -moz-flex: inherit; + -ms-flex: inherit; + flex: inherit; } } } +.first-block { + -webkit-box-ordinal-group: 0; + -webkit-order: -1; + -ms-flex-order: -1; + order: -1; } + +.last-block { + -webkit-box-ordinal-group: 2; + -webkit-order: 1; + -ms-flex-order: 1; + order: 1; } + +.fixed-blocks { + -webkit-flex-flow: row wrap; + -moz-flex-flow: row wrap; + flex-flow: row wrap; } + .fixed-blocks .block { + -webkit-box-flex: inherit; + -moz-box-flex: inherit; + box-flex: inherit; + -webkit-flex: inherit; + -moz-flex: inherit; + -ms-flex: inherit; + flex: inherit; + width: 25%; } + @media only all and (min-width: 60em) and (max-width: 74.938em) { + .fixed-blocks .block { + width: 33.33333%; } } + @media only all and (min-width: 48em) and (max-width: 59.938em) { + .fixed-blocks .block { + width: 50%; } } + @media only all and (max-width: 47.938em) { + .fixed-blocks .block { + width: 100%; } } + +body { + font-size: 1.05rem; + line-height: 1.7; } + +h1, h2, h3, h4, h5, h6 { + margin: 0.85rem 0 1.7rem 0; + text-rendering: optimizeLegibility; } + +h1 { + font-size: 3.25rem; } + +h2 { + font-size: 2.55rem; } + +h3 { + font-size: 2.15rem; } + +h4 { + font-size: 1.8rem; } + +h5 { + font-size: 1.4rem; } + +h6 { + font-size: 0.9rem; } + +p { + margin: 1.7rem 0; } + +ul, ol { + margin-top: 1.7rem; + margin-bottom: 1.7rem; } + ul ul, ul ol, ol ul, ol ol { + margin-top: 0; + margin-bottom: 0; } + +blockquote { + margin: 1.7rem 0; + padding-left: 0.85rem; } + +cite { + display: block; + font-size: 0.925rem; } + cite:before { + content: "\2014 \0020"; } + +pre { + margin: 1.7rem 0; + padding: 0.938rem; } + +code { + vertical-align: bottom; } + +small { + font-size: 0.925rem; } + +hr { + border-left: none; + border-right: none; + border-top: none; + margin: 1.7rem 0; } + +fieldset { + border: 0; + padding: 0.938rem; + margin: 0 0 1.7rem 0; } + +input, +label, +select { + display: block; } + +label { + margin-bottom: 0.425rem; } + label.required:after { + content: "*"; } + label abbr { + display: none; } + +textarea, input[type="email"], input[type="number"], input[type="password"], input[type="search"], input[type="tel"], input[type="text"], input[type="url"], input[type="color"], input[type="date"], input[type="datetime"], input[type="datetime-local"], input[type="month"], input[type="time"], input[type="week"], select[multiple=multiple] { + -webkit-transition: border-color; + -moz-transition: border-color; + transition: border-color; + border-radius: 0.1875rem; + margin-bottom: 0.85rem; + padding: 0.425rem 0.425rem; + width: 100%; } + textarea:focus, input[type="email"]:focus, input[type="number"]:focus, input[type="password"]:focus, input[type="search"]:focus, input[type="tel"]:focus, input[type="text"]:focus, input[type="url"]:focus, input[type="color"]:focus, input[type="date"]:focus, input[type="datetime"]:focus, input[type="datetime-local"]:focus, input[type="month"]:focus, input[type="time"]:focus, input[type="week"]:focus, select[multiple=multiple]:focus { + outline: none; } + +textarea { + resize: vertical; } + +input[type="checkbox"], input[type="radio"] { + display: inline; + margin-right: 0.425rem; } + +input[type="file"] { + width: 100%; } + +select { + width: auto; + max-width: 100%; + margin-bottom: 1.7rem; } + +button, +input[type="submit"] { + cursor: pointer; + user-select: none; + vertical-align: middle; + white-space: nowrap; + border: inherit; } diff --git a/docs/23.4.1/css/perfect-scrollbar.min.css b/docs/23.4.1/css/perfect-scrollbar.min.css new file mode 100644 index 000000000..ebd2cb43b --- /dev/null +++ b/docs/23.4.1/css/perfect-scrollbar.min.css @@ -0,0 +1,2 @@ +/* perfect-scrollbar v0.6.13 */ +.ps-container{-ms-touch-action:auto;touch-action:auto;overflow:hidden !important;-ms-overflow-style:none}@supports (-ms-overflow-style: none){.ps-container{overflow:auto !important}}@media screen and (-ms-high-contrast: active), (-ms-high-contrast: none){.ps-container{overflow:auto !important}}.ps-container.ps-active-x>.ps-scrollbar-x-rail,.ps-container.ps-active-y>.ps-scrollbar-y-rail{display:block;background-color:transparent}.ps-container.ps-in-scrolling.ps-x>.ps-scrollbar-x-rail{background-color:#eee;opacity:.9}.ps-container.ps-in-scrolling.ps-x>.ps-scrollbar-x-rail>.ps-scrollbar-x{background-color:#999;height:11px}.ps-container.ps-in-scrolling.ps-y>.ps-scrollbar-y-rail{background-color:#eee;opacity:.9}.ps-container.ps-in-scrolling.ps-y>.ps-scrollbar-y-rail>.ps-scrollbar-y{background-color:#999;width:11px}.ps-container>.ps-scrollbar-x-rail{display:none;position:absolute;opacity:0;-webkit-transition:background-color .2s linear, opacity .2s linear;-o-transition:background-color .2s linear, opacity .2s linear;-moz-transition:background-color .2s linear, opacity .2s linear;transition:background-color .2s linear, opacity .2s linear;bottom:0px;height:15px}.ps-container>.ps-scrollbar-x-rail>.ps-scrollbar-x{position:absolute;background-color:#aaa;-webkit-border-radius:6px;-moz-border-radius:6px;border-radius:6px;-webkit-transition:background-color .2s linear, height .2s linear, width .2s ease-in-out, -webkit-border-radius .2s ease-in-out;transition:background-color .2s linear, height .2s linear, width .2s ease-in-out, -webkit-border-radius .2s ease-in-out;-o-transition:background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out;-moz-transition:background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out, -moz-border-radius .2s ease-in-out;transition:background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out;transition:background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out, -webkit-border-radius .2s ease-in-out, -moz-border-radius .2s ease-in-out;bottom:2px;height:6px}.ps-container>.ps-scrollbar-x-rail:hover>.ps-scrollbar-x,.ps-container>.ps-scrollbar-x-rail:active>.ps-scrollbar-x{height:11px}.ps-container>.ps-scrollbar-y-rail{display:none;position:absolute;opacity:0;-webkit-transition:background-color .2s linear, opacity .2s linear;-o-transition:background-color .2s linear, opacity .2s linear;-moz-transition:background-color .2s linear, opacity .2s linear;transition:background-color .2s linear, opacity .2s linear;right:0;width:15px}.ps-container>.ps-scrollbar-y-rail>.ps-scrollbar-y{position:absolute;background-color:#aaa;-webkit-border-radius:6px;-moz-border-radius:6px;border-radius:6px;-webkit-transition:background-color .2s linear, height .2s linear, width .2s ease-in-out, -webkit-border-radius .2s ease-in-out;transition:background-color .2s linear, height .2s linear, width .2s ease-in-out, -webkit-border-radius .2s ease-in-out;-o-transition:background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out;-moz-transition:background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out, -moz-border-radius .2s ease-in-out;transition:background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out;transition:background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out, -webkit-border-radius .2s ease-in-out, -moz-border-radius .2s ease-in-out;right:2px;width:6px}.ps-container>.ps-scrollbar-y-rail:hover>.ps-scrollbar-y,.ps-container>.ps-scrollbar-y-rail:active>.ps-scrollbar-y{width:11px}.ps-container:hover.ps-in-scrolling.ps-x>.ps-scrollbar-x-rail{background-color:#eee;opacity:.9}.ps-container:hover.ps-in-scrolling.ps-x>.ps-scrollbar-x-rail>.ps-scrollbar-x{background-color:#999;height:11px}.ps-container:hover.ps-in-scrolling.ps-y>.ps-scrollbar-y-rail{background-color:#eee;opacity:.9}.ps-container:hover.ps-in-scrolling.ps-y>.ps-scrollbar-y-rail>.ps-scrollbar-y{background-color:#999;width:11px}.ps-container:hover>.ps-scrollbar-x-rail,.ps-container:hover>.ps-scrollbar-y-rail{opacity:.6}.ps-container:hover>.ps-scrollbar-x-rail:hover{background-color:#eee;opacity:.9}.ps-container:hover>.ps-scrollbar-x-rail:hover>.ps-scrollbar-x{background-color:#999}.ps-container:hover>.ps-scrollbar-y-rail:hover{background-color:#eee;opacity:.9}.ps-container:hover>.ps-scrollbar-y-rail:hover>.ps-scrollbar-y{background-color:#999} diff --git a/docs/23.4.1/css/tags.css b/docs/23.4.1/css/tags.css new file mode 100644 index 000000000..495d2f9f7 --- /dev/null +++ b/docs/23.4.1/css/tags.css @@ -0,0 +1,49 @@ +/* Tags */ + +#head-tags{ + margin-left:1em; + margin-top:1em; +} + +#body .tags a.tag-link { + display: inline-block; + line-height: 2em; + font-size: 0.8em; + position: relative; + margin: 0 16px 8px 0; + padding: 0 10px 0 12px; + background: #8451a1; + + -webkit-border-bottom-right-radius: 3px; + border-bottom-right-radius: 3px; + -webkit-border-top-right-radius: 3px; + border-top-right-radius: 3px; + + -webkit-box-shadow: 0 1px 2px rgba(0,0,0,0.2); + box-shadow: 0 1px 2px rgba(0,0,0,0.2); + color: #fff; +} + +#body .tags a.tag-link:before { + content: ""; + position: absolute; + top:0; + left: -1em; + width: 0; + height: 0; + border-color: transparent #8451a1 transparent transparent; + border-style: solid; + border-width: 1em 1em 1em 0; +} + +#body .tags a.tag-link:after { + content: ""; + position: absolute; + top: 10px; + left: 1px; + width: 5px; + height: 5px; + -webkit-border-radius: 50%; + border-radius: 100%; + background: #fff; +} diff --git a/docs/23.4.1/css/theme-blue.css b/docs/23.4.1/css/theme-blue.css new file mode 100644 index 000000000..9771ae5e3 --- /dev/null +++ b/docs/23.4.1/css/theme-blue.css @@ -0,0 +1,111 @@ + +:root{ + + --MAIN-TEXT-color:#323232; /* Color of text by default */ + --MAIN-TITLES-TEXT-color: #5e5e5e; /* Color of titles h2-h3-h4-h5 */ + --MAIN-LINK-color:#1C90F3; /* Color of links */ + --MAIN-LINK-HOVER-color:#167ad0; /* Color of hovered links */ + --MAIN-ANCHOR-color: #1C90F3; /* color of anchors on titles */ + + --MENU-HEADER-BG-color:#1C90F3; /* Background color of menu header */ + --MENU-HEADER-BORDER-color:#33a1ff; /*Color of menu header border */ + + --MENU-SEARCH-BG-color:#167ad0; /* Search field background color (by default borders + icons) */ + --MENU-SEARCH-BOX-color: #33a1ff; /* Override search field border color */ + --MENU-SEARCH-BOX-ICONS-color: #a1d2fd; /* Override search field icons color */ + + --MENU-SECTIONS-ACTIVE-BG-color:#20272b; /* Background color of the active section and its childs */ + --MENU-SECTIONS-BG-color:#252c31; /* Background color of other sections */ + --MENU-SECTIONS-LINK-color: #ccc; /* Color of links in menu */ + --MENU-SECTIONS-LINK-HOVER-color: #e6e6e6; /* Color of links in menu, when hovered */ + --MENU-SECTION-ACTIVE-CATEGORY-color: #777; /* Color of active category text */ + --MENU-SECTION-ACTIVE-CATEGORY-BG-color: #fff; /* Color of background for the active category (only) */ + + --MENU-VISITED-color: #33a1ff; /* Color of 'page visited' icons in menu */ + --MENU-SECTION-HR-color: #20272b; /* Color of
separator in menu */ + +} + +body { + color: var(--MAIN-TEXT-color) !important; +} + +textarea:focus, input[type="email"]:focus, input[type="number"]:focus, input[type="password"]:focus, input[type="search"]:focus, input[type="tel"]:focus, input[type="text"]:focus, input[type="url"]:focus, input[type="color"]:focus, input[type="date"]:focus, input[type="datetime"]:focus, input[type="datetime-local"]:focus, input[type="month"]:focus, input[type="time"]:focus, input[type="week"]:focus, select[multiple=multiple]:focus { + border-color: none; + box-shadow: none; +} + +h2, h3, h4, h5 { + color: var(--MAIN-TITLES-TEXT-color) !important; +} + +a { + color: var(--MAIN-LINK-color); +} + +.anchor { + color: var(--MAIN-ANCHOR-color); +} + +a:hover { + color: var(--MAIN-LINK-HOVER-color); +} + +#sidebar ul li.visited > a .read-icon { + color: var(--MENU-VISITED-color); +} + +#body a.highlight:after { + display: block; + content: ""; + height: 1px; + width: 0%; + -webkit-transition: width 0.5s ease; + -moz-transition: width 0.5s ease; + -ms-transition: width 0.5s ease; + transition: width 0.5s ease; + background-color: var(--MAIN-LINK-HOVER-color); +} +#sidebar { + background-color: var(--MENU-SECTIONS-BG-color); +} +#sidebar #header-wrapper { + background: var(--MENU-HEADER-BG-color); + color: var(--MENU-SEARCH-BOX-color); + border-color: var(--MENU-HEADER-BORDER-color); +} +#sidebar .searchbox { + border-color: var(--MENU-SEARCH-BOX-color); + background: var(--MENU-SEARCH-BG-color); +} +#sidebar ul.topics > li.parent, #sidebar ul.topics > li.active { + background: var(--MENU-SECTIONS-ACTIVE-BG-color); +} +#sidebar .searchbox * { + color: var(--MENU-SEARCH-BOX-ICONS-color); +} + +#sidebar a { + color: var(--MENU-SECTIONS-LINK-color); +} + +#sidebar a:hover { + color: var(--MENU-SECTIONS-LINK-HOVER-color); +} + +#sidebar ul li.active > a { + background: var(--MENU-SECTION-ACTIVE-CATEGORY-BG-color); + color: var(--MENU-SECTION-ACTIVE-CATEGORY-color) !important; +} + +#sidebar hr { + border-color: var(--MENU-SECTION-HR-color); +} + +#body .tags a.tag-link { + background-color: var(--MENU-HEADER-BG-color); +} + +#body .tags a.tag-link:before { + border-right-color: var(--MENU-HEADER-BG-color); +} \ No newline at end of file diff --git a/docs/23.4.1/css/theme-green.css b/docs/23.4.1/css/theme-green.css new file mode 100644 index 000000000..3b0b1f721 --- /dev/null +++ b/docs/23.4.1/css/theme-green.css @@ -0,0 +1,111 @@ + +:root{ + + --MAIN-TEXT-color:#323232; /* Color of text by default */ + --MAIN-TITLES-TEXT-color: #5e5e5e; /* Color of titles h2-h3-h4-h5 */ + --MAIN-LINK-color:#599a3e; /* Color of links */ + --MAIN-LINK-HOVER-color:#3f6d2c; /* Color of hovered links */ + --MAIN-ANCHOR-color: #599a3e; /* color of anchors on titles */ + + --MENU-HEADER-BG-color:#74b559; /* Background color of menu header */ + --MENU-HEADER-BORDER-color:#9cd484; /*Color of menu header border */ + + --MENU-SEARCH-BG-color:#599a3e; /* Search field background color (by default borders + icons) */ + --MENU-SEARCH-BOX-color: #84c767; /* Override search field border color */ + --MENU-SEARCH-BOX-ICONS-color: #c7f7c4; /* Override search field icons color */ + + --MENU-SECTIONS-ACTIVE-BG-color:#1b211c; /* Background color of the active section and its childs */ + --MENU-SECTIONS-BG-color:#222723; /* Background color of other sections */ + --MENU-SECTIONS-LINK-color: #ccc; /* Color of links in menu */ + --MENU-SECTIONS-LINK-HOVER-color: #e6e6e6; /* Color of links in menu, when hovered */ + --MENU-SECTION-ACTIVE-CATEGORY-color: #777; /* Color of active category text */ + --MENU-SECTION-ACTIVE-CATEGORY-BG-color: #fff; /* Color of background for the active category (only) */ + + --MENU-VISITED-color: #599a3e; /* Color of 'page visited' icons in menu */ + --MENU-SECTION-HR-color: #18211c; /* Color of
separator in menu */ + +} + +body { + color: var(--MAIN-TEXT-color) !important; +} + +textarea:focus, input[type="email"]:focus, input[type="number"]:focus, input[type="password"]:focus, input[type="search"]:focus, input[type="tel"]:focus, input[type="text"]:focus, input[type="url"]:focus, input[type="color"]:focus, input[type="date"]:focus, input[type="datetime"]:focus, input[type="datetime-local"]:focus, input[type="month"]:focus, input[type="time"]:focus, input[type="week"]:focus, select[multiple=multiple]:focus { + border-color: none; + box-shadow: none; +} + +h2, h3, h4, h5 { + color: var(--MAIN-TITLES-TEXT-color) !important; +} + +a { + color: var(--MAIN-LINK-color); +} + +.anchor { + color: var(--MAIN-ANCHOR-color); +} + +a:hover { + color: var(--MAIN-LINK-HOVER-color); +} + +#sidebar ul li.visited > a .read-icon { + color: var(--MENU-VISITED-color); +} + +#body a.highlight:after { + display: block; + content: ""; + height: 1px; + width: 0%; + -webkit-transition: width 0.5s ease; + -moz-transition: width 0.5s ease; + -ms-transition: width 0.5s ease; + transition: width 0.5s ease; + background-color: var(--MAIN-LINK-HOVER-color); +} +#sidebar { + background-color: var(--MENU-SECTIONS-BG-color); +} +#sidebar #header-wrapper { + background: var(--MENU-HEADER-BG-color); + color: var(--MENU-SEARCH-BOX-color); + border-color: var(--MENU-HEADER-BORDER-color); +} +#sidebar .searchbox { + border-color: var(--MENU-SEARCH-BOX-color); + background: var(--MENU-SEARCH-BG-color); +} +#sidebar ul.topics > li.parent, #sidebar ul.topics > li.active { + background: var(--MENU-SECTIONS-ACTIVE-BG-color); +} +#sidebar .searchbox * { + color: var(--MENU-SEARCH-BOX-ICONS-color); +} + +#sidebar a { + color: var(--MENU-SECTIONS-LINK-color); +} + +#sidebar a:hover { + color: var(--MENU-SECTIONS-LINK-HOVER-color); +} + +#sidebar ul li.active > a { + background: var(--MENU-SECTION-ACTIVE-CATEGORY-BG-color); + color: var(--MENU-SECTION-ACTIVE-CATEGORY-color) !important; +} + +#sidebar hr { + border-color: var(--MENU-SECTION-HR-color); +} + +#body .tags a.tag-link { + background-color: var(--MENU-HEADER-BG-color); +} + +#body .tags a.tag-link:before { + border-right-color: var(--MENU-HEADER-BG-color); +} \ No newline at end of file diff --git a/docs/23.4.1/css/theme-red.css b/docs/23.4.1/css/theme-red.css new file mode 100644 index 000000000..36c9278e5 --- /dev/null +++ b/docs/23.4.1/css/theme-red.css @@ -0,0 +1,111 @@ + +:root{ + + --MAIN-TEXT-color:#323232; /* Color of text by default */ + --MAIN-TITLES-TEXT-color: #5e5e5e; /* Color of titles h2-h3-h4-h5 */ + --MAIN-LINK-color:#f31c1c; /* Color of links */ + --MAIN-LINK-HOVER-color:#d01616; /* Color of hovered links */ + --MAIN-ANCHOR-color: #f31c1c; /* color of anchors on titles */ + + --MENU-HEADER-BG-color:#dc1010; /* Background color of menu header */ + --MENU-HEADER-BORDER-color:#e23131; /*Color of menu header border */ + + --MENU-SEARCH-BG-color:#b90000; /* Search field background color (by default borders + icons) */ + --MENU-SEARCH-BOX-color: #ef2020; /* Override search field border color */ + --MENU-SEARCH-BOX-ICONS-color: #fda1a1; /* Override search field icons color */ + + --MENU-SECTIONS-ACTIVE-BG-color:#2b2020; /* Background color of the active section and its childs */ + --MENU-SECTIONS-BG-color:#312525; /* Background color of other sections */ + --MENU-SECTIONS-LINK-color: #ccc; /* Color of links in menu */ + --MENU-SECTIONS-LINK-HOVER-color: #e6e6e6; /* Color of links in menu, when hovered */ + --MENU-SECTION-ACTIVE-CATEGORY-color: #777; /* Color of active category text */ + --MENU-SECTION-ACTIVE-CATEGORY-BG-color: #fff; /* Color of background for the active category (only) */ + + --MENU-VISITED-color: #ff3333; /* Color of 'page visited' icons in menu */ + --MENU-SECTION-HR-color: #2b2020; /* Color of
separator in menu */ + +} + +body { + color: var(--MAIN-TEXT-color) !important; +} + +textarea:focus, input[type="email"]:focus, input[type="number"]:focus, input[type="password"]:focus, input[type="search"]:focus, input[type="tel"]:focus, input[type="text"]:focus, input[type="url"]:focus, input[type="color"]:focus, input[type="date"]:focus, input[type="datetime"]:focus, input[type="datetime-local"]:focus, input[type="month"]:focus, input[type="time"]:focus, input[type="week"]:focus, select[multiple=multiple]:focus { + border-color: none; + box-shadow: none; +} + +h2, h3, h4, h5 { + color: var(--MAIN-TITLES-TEXT-color) !important; +} + +a { + color: var(--MAIN-LINK-color); +} + +.anchor { + color: var(--MAIN-ANCHOR-color); +} + +a:hover { + color: var(--MAIN-LINK-HOVER-color); +} + +#sidebar ul li.visited > a .read-icon { + color: var(--MENU-VISITED-color); +} + +#body a.highlight:after { + display: block; + content: ""; + height: 1px; + width: 0%; + -webkit-transition: width 0.5s ease; + -moz-transition: width 0.5s ease; + -ms-transition: width 0.5s ease; + transition: width 0.5s ease; + background-color: var(--MAIN-LINK-HOVER-color); +} +#sidebar { + background-color: var(--MENU-SECTIONS-BG-color); +} +#sidebar #header-wrapper { + background: var(--MENU-HEADER-BG-color); + color: var(--MENU-SEARCH-BOX-color); + border-color: var(--MENU-HEADER-BORDER-color); +} +#sidebar .searchbox { + border-color: var(--MENU-SEARCH-BOX-color); + background: var(--MENU-SEARCH-BG-color); +} +#sidebar ul.topics > li.parent, #sidebar ul.topics > li.active { + background: var(--MENU-SECTIONS-ACTIVE-BG-color); +} +#sidebar .searchbox * { + color: var(--MENU-SEARCH-BOX-ICONS-color); +} + +#sidebar a { + color: var(--MENU-SECTIONS-LINK-color); +} + +#sidebar a:hover { + color: var(--MENU-SECTIONS-LINK-HOVER-color); +} + +#sidebar ul li.active > a { + background: var(--MENU-SECTION-ACTIVE-CATEGORY-BG-color); + color: var(--MENU-SECTION-ACTIVE-CATEGORY-color) !important; +} + +#sidebar hr { + border-color: var(--MENU-SECTION-HR-color); +} + +#body .tags a.tag-link { + background-color: var(--MENU-HEADER-BG-color); +} + +#body .tags a.tag-link:before { + border-right-color: var(--MENU-HEADER-BG-color); +} \ No newline at end of file diff --git a/docs/23.4.1/css/theme.css b/docs/23.4.1/css/theme.css new file mode 100644 index 000000000..9b4550457 --- /dev/null +++ b/docs/23.4.1/css/theme.css @@ -0,0 +1,1141 @@ +@charset "UTF-8"; + +/* Tags */ +@import "tags.css"; + +#top-github-link, #body #breadcrumbs { + position: relative; + top: 50%; + -webkit-transform: translateY(-50%); + -moz-transform: translateY(-50%); + -o-transform: translateY(-50%); + -ms-transform: translateY(-50%); + transform: translateY(-50%); +} +.button, .button-secondary { + display: inline-block; + padding: 7px 12px; +} +.button:active, .button-secondary:active { + margin: 2px 0 -2px 0; +} +@font-face { + font-family: 'Novacento Sans Wide'; + src: url("../fonts/Novecentosanswide-UltraLight-webfont.eot"); + src: url("../fonts/Novecentosanswide-UltraLight-webfont.eot?#iefix") format("embedded-opentype"), url("../fonts/Novecentosanswide-UltraLight-webfont.woff2") format("woff2"), url("../fonts/Novecentosanswide-UltraLight-webfont.woff") format("woff"), url("../fonts/Novecentosanswide-UltraLight-webfont.ttf") format("truetype"), url("../fonts/Novecentosanswide-UltraLight-webfont.svg#novecento_sans_wideultralight") format("svg"); + font-style: normal; + font-weight: 200; +} +@font-face { + font-family: 'Work Sans'; + font-style: normal; + font-weight: 300; + src: url("../fonts/Work_Sans_300.eot?#iefix") format("embedded-opentype"), url("../fonts/Work_Sans_300.woff") format("woff"), url("../fonts/Work_Sans_300.woff2") format("woff2"), url("../fonts/Work_Sans_300.svg#WorkSans") format("svg"), url("../fonts/Work_Sans_300.ttf") format("truetype"); +} +@font-face { + font-family: 'Work Sans'; + font-style: normal; + font-weight: 500; + src: url("../fonts/Work_Sans_500.eot?#iefix") format("embedded-opentype"), url("../fonts/Work_Sans_500.woff") format("woff"), url("../fonts/Work_Sans_500.woff2") format("woff2"), url("../fonts/Work_Sans_500.svg#WorkSans") format("svg"), url("../fonts/Work_Sans_500.ttf") format("truetype"); +} +body { + background: #fff; + color: #777; +} +body #chapter h1 { + font-size: 3.5rem; +} +@media only all and (min-width: 48em) and (max-width: 59.938em) { + body #chapter h1 { + font-size: 3rem; + } +} +@media only all and (max-width: 47.938em) { + body #chapter h1 { + font-size: 2rem; + } +} +a { + color: #00bdf3; +} +a:hover { + color: #0082a7; +} +pre { + position: relative; + color: #ffffff; +} +.bg { + background: #fff; + border: 1px solid #eaeaea; +} +b, strong, label, th { + font-weight: 600; +} +.default-animation, #header #logo-svg, #header #logo-svg path, #sidebar, #sidebar ul, #body, #body .padding, #body .nav { + -webkit-transition: all 0.5s ease; + -moz-transition: all 0.5s ease; + transition: all 0.5s ease; +} +#grav-logo { + max-width: 60%; +} +#grav-logo path { + fill: #fff !important; +} +#sidebar { + font-weight: 300 !important; +} +fieldset { + border: 1px solid #ddd; +} +textarea, input[type="email"], input[type="number"], input[type="password"], input[type="search"], input[type="tel"], input[type="text"], input[type="url"], input[type="color"], input[type="date"], input[type="datetime"], input[type="datetime-local"], input[type="month"], input[type="time"], input[type="week"], select[multiple=multiple] { + background-color: white; + border: 1px solid #ddd; + box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.06); +} +textarea:hover, input[type="email"]:hover, input[type="number"]:hover, input[type="password"]:hover, input[type="search"]:hover, input[type="tel"]:hover, input[type="text"]:hover, input[type="url"]:hover, input[type="color"]:hover, input[type="date"]:hover, input[type="datetime"]:hover, input[type="datetime-local"]:hover, input[type="month"]:hover, input[type="time"]:hover, input[type="week"]:hover, select[multiple=multiple]:hover { + border-color: #c4c4c4; +} +textarea:focus, input[type="email"]:focus, input[type="number"]:focus, input[type="password"]:focus, input[type="search"]:focus, input[type="tel"]:focus, input[type="text"]:focus, input[type="url"]:focus, input[type="color"]:focus, input[type="date"]:focus, input[type="datetime"]:focus, input[type="datetime-local"]:focus, input[type="month"]:focus, input[type="time"]:focus, input[type="week"]:focus, select[multiple=multiple]:focus { + border-color: #00bdf3; + box-shadow: inset 0 1px 3px rgba(0,0,0,.06),0 0 5px rgba(0,169,218,.7) +} +#header-wrapper { + background: #8451a1; + color: #fff; + text-align: center; + border-bottom: 4px solid #9c6fb6; + padding: 1rem; +} +#header a { + display: inline-block; +} +#header #logo-svg { + width: 8rem; + height: 2rem; +} +#header #logo-svg path { + fill: #fff; +} +.searchbox { + margin-top: 1rem; + position: relative; + border: 1px solid #915eae; + background: #764890; + border-radius: 4px; +} +.searchbox label { + color: rgba(255, 255, 255, 0.8); + position: absolute; + left: 10px; + top: 3px; +} +.searchbox span { + color: rgba(255, 255, 255, 0.6); + position: absolute; + right: 10px; + top: 3px; + cursor: pointer; +} +.searchbox span:hover { + color: rgba(255, 255, 255, 0.9); +} +.searchbox input { + display: inline-block; + color: #fff; + width: 100%; + height: 30px; + background: transparent; + border: 0; + padding: 0 25px 0 30px; + margin: 0; + font-weight: 300; +} +.searchbox input::-webkit-input-placeholder { + color: rgba(255, 255, 255, 0.6); +} +.searchbox input::-moz-placeholder { + color: rgba(255, 255, 255, 0.6); +} +.searchbox input:-moz-placeholder { + color: rgba(255, 255, 255, 0.6); +} +.searchbox input:-ms-input-placeholder { + color: rgba(255, 255, 255, 0.6); +} +#sidebar-toggle-span { + display: none; +} +@media only all and (max-width: 47.938em) { + #sidebar-toggle-span { + display: inline; + } +} +#sidebar { + background-color: #322A38; + position: fixed; + top: 0; + width: 300px; + bottom: 0; + left: 0; + font-weight: 400; + font-size: 15px; +} +#sidebar a { + color: #ccc; +} +#sidebar a:hover { + color: #e6e6e6; +} +#sidebar a.subtitle { + color: rgba(204, 204, 204, 0.6); +} +#sidebar hr { + border-bottom: 1px solid #2a232f; +} +#sidebar a.padding { + padding: 0 1rem; +} +#sidebar h5 { + margin: 2rem 0 0; + position: relative; + line-height: 2; +} +#sidebar h5 a { + display: block; + margin-left: 0; + margin-right: 0; + padding-left: 1rem; + padding-right: 1rem; +} +#sidebar h5 i { + color: rgba(204, 204, 204, 0.6); + position: absolute; + right: 0.6rem; + top: 0.7rem; + font-size: 80%; +} +#sidebar h5.parent a { + background: #201b24; + color: #d9d9d9 !important; +} +#sidebar h5.active a { + background: #fff; + color: #777 !important; +} +#sidebar h5.active i { + color: #777 !important; +} +#sidebar h5 + ul.topics { + display: none; + margin-top: 0; +} +#sidebar h5.parent + ul.topics, #sidebar h5.active + ul.topics { + display: block; +} +#sidebar ul { + list-style: none; + padding: 0; + margin: 0; +} +#sidebar ul.searched a { + color: #999999; +} +#sidebar ul.searched .search-match a { + color: #e6e6e6; +} +#sidebar ul.searched .search-match a:hover { + color: white; +} +#sidebar ul.topics { + margin: 0 1rem; +} +#sidebar ul.topics.searched ul { + display: block; +} +#sidebar ul.topics ul { + display: none; + padding-bottom: 1rem; +} +#sidebar ul.topics ul ul { + padding-bottom: 0; +} +#sidebar ul.topics li.parent ul, #sidebar ul.topics > li.active ul { + display: block; +} +#sidebar ul.topics > li > a { + line-height: 2rem; + font-size: 1.1rem; +} +#sidebar ul.topics > li > a b { + opacity: 0.5; + font-weight: normal; +} +#sidebar ul.topics > li > a .fa { + margin-top: 9px; +} +#sidebar ul.topics > li.parent, #sidebar ul.topics > li.active { + background: #251f29; + margin-left: -1rem; + margin-right: -1rem; + padding-left: 1rem; + padding-right: 1rem; +} +#sidebar ul li.active > a { + background: #fff; + color: #777 !important; + margin-left: -1rem; + margin-right: -1rem; + padding-left: 1rem; + padding-right: 1rem; +} +#sidebar ul li { + padding: 0; +} +#sidebar ul li.visited + span { + margin-right: 16px; +} +#sidebar ul li a { + display: block; + padding: 2px 0; +} +#sidebar ul li a span { + text-overflow: ellipsis; + overflow: hidden; + white-space: nowrap; + display: block; +} +#sidebar ul li > a { + padding: 4px 0; +} +#sidebar ul li.visited > a .read-icon { + color: #9c6fb6; + display: inline; +} +#sidebar ul li li { + padding-left: 1rem; + text-indent: 0.2rem; +} +#main { + background: #f7f7f7; + margin: 0 0 1.563rem 0; +} +#body { + position: relative; + margin-left: 300px; + min-height: 100%; +} +#body img, #body .video-container { + margin: 3rem auto; + display: block; + text-align: center; +} +#body img.border, #body .video-container.border { + border: 2px solid #e6e6e6 !important; + padding: 2px; +} +#body img.shadow, #body .video-container.shadow { + box-shadow: 0 10px 30px rgba(0, 0, 0, 0.1); +} +#body img.inline { + display: inline !important; + margin: 0 !important; + vertical-align: bottom; +} +#body .bordered { + border: 1px solid #ccc; +} +#body .padding { + padding: 3rem 6rem; +} +@media only all and (max-width: 59.938em) { + #body .padding { + position: static; + padding: 15px 3rem; + } +} +@media only all and (max-width: 47.938em) { + #body .padding { + padding: 5px 1rem; + } +} +#body h1 + hr { + margin-top: -1.7rem; + margin-bottom: 3rem; +} +@media only all and (max-width: 59.938em) { + #body #navigation { + position: static; + margin-right: 0 !important; + width: 100%; + display: table; + } +} +#body .nav { + position: fixed; + top: 0; + bottom: 0; + width: 4rem; + font-size: 50px; + height: 100%; + cursor: pointer; + display: table; + text-align: center; +} +#body .nav > i { + display: table-cell; + vertical-align: middle; + text-align: center; +} +@media only all and (max-width: 59.938em) { + #body .nav { + display: table-cell; + position: static; + top: auto; + width: 50%; + text-align: center; + height: 100px; + line-height: 100px; + padding-top: 0; + } + #body .nav > i { + display: inline-block; + } +} +#body .nav:hover { + background: #F6F6F6; +} +#body .nav.nav-pref { + left: 0; +} +#body .nav.nav-next { + right: 0; +} +#body-inner { + margin-bottom: 5rem; +} +#chapter { + display: flex; + align-items: center; + justify-content: center; + height: 100%; + padding: 2rem 0; +} +#chapter #body-inner { + padding-bottom: 3rem; + max-width: 80%; +} +#chapter h3 { + font-family: "Work Sans", "Helvetica", "Tahoma", "Geneva", "Arial", sans-serif; + font-weight: 300; + text-align: center; +} +#chapter h1 { + font-size: 5rem; + border-bottom: 4px solid #F0F2F4; +} +#chapter p { + text-align: center; + font-size: 1.2rem; +} +#footer { + padding: 3rem 1rem; + color: #b3b3b3; + font-size: 13px; +} +#footer p { + margin: 0; +} +body { + font-family: "Work Sans", "Helvetica", "Tahoma", "Geneva", "Arial", sans-serif; + font-weight: 300; + line-height: 1.6; + font-size: 18px !important; +} +h2, h3, h4, h5, h6 { + font-family: "Work Sans", "Helvetica", "Tahoma", "Geneva", "Arial", sans-serif; + text-rendering: optimizeLegibility; + color: #5e5e5e; + font-weight: 400; + letter-spacing: -1px; +} +h1 { + font-family: "Novacento Sans Wide", "Helvetica", "Tahoma", "Geneva", "Arial", sans-serif; + text-align: center; + text-transform: uppercase; + color: #222; + font-weight: 200; +} +blockquote { + border-left: 10px solid #F0F2F4; +} +blockquote p { + font-size: 1.1rem; + color: #999; +} +blockquote cite { + display: block; + text-align: right; + color: #666; + font-size: 1.2rem; +} +div.notices { + margin: 2rem 0; + position: relative; +} +div.notices p { + padding: 15px; + display: block; + font-size: 1rem; + margin-top: 0rem; + margin-bottom: 0rem; + color: #666; +} +div.notices p:first-child:before { + position: absolute; + top: 2px; + color: #fff; + font-family: "Font Awesome 5 Free"; + font-weight: 900; + content: "\f06a"; + left: 10px; +} +div.notices p:first-child:after { + position: absolute; + top: 2px; + color: #fff; + left: 2rem; +} +div.notices.info p { + border-top: 30px solid #F0B37E; + background: #FFF2DB; +} +div.notices.info p:first-child:after { + content: 'Info'; +} +div.notices.warning p { + border-top: 30px solid rgba(217, 83, 79, 0.8); + background: #FAE2E2; +} +div.notices.warning p:first-child:after { + content: 'Warning'; +} +div.notices.note p { + border-top: 30px solid #6AB0DE; + background: #E7F2FA; +} +div.notices.note p:first-child:after { + content: 'Note'; +} +div.notices.tip p { + border-top: 30px solid rgba(92, 184, 92, 0.8); + background: #E6F9E6; +} +div.notices.tip p:first-child:after { + content: 'Tip'; +} + +/* attachments shortcode */ + +section.attachments { + margin: 2rem 0; + position: relative; +} + +section.attachments label { + font-weight: 400; + padding-left: 0.5em; + padding-top: 0.2em; + padding-bottom: 0.2em; + margin: 0; +} + +section.attachments .attachments-files { + padding: 15px; + display: block; + font-size: 1rem; + margin-top: 0rem; + margin-bottom: 0rem; + color: #666; +} + +section.attachments.orange label { + color: #fff; + background: #F0B37E; +} + +section.attachments.orange .attachments-files { + background: #FFF2DB; +} + +section.attachments.green label { + color: #fff; + background: rgba(92, 184, 92, 0.8); +} + +section.attachments.green .attachments-files { + background: #E6F9E6; +} + +section.attachments.blue label { + color: #fff; + background: #6AB0DE; +} + +section.attachments.blue .attachments-files { + background: #E7F2FA; +} + +section.attachments.grey label { + color: #fff; + background: #505d65; +} + +section.attachments.grey .attachments-files { + background: #f4f4f4; +} + +/* Children shortcode */ + +/* Children shortcode */ +.children p { + font-size: small; + margin-top: 0px; + padding-top: 0px; + margin-bottom: 0px; + padding-bottom: 0px; +} +.children-li p { + font-size: small; + font-style: italic; + +} +.children-h2 p, .children-h3 p { + font-size: small; + margin-top: 0px; + padding-top: 0px; + margin-bottom: 0px; + padding-bottom: 0px; +} +.children h3,.children h2 { + margin-bottom: 0px; + margin-top: 5px; +} + +code, kbd, pre, samp { + font-family: "Consolas", menlo, monospace; + font-size: 92%; +} +code { + border-radius: 2px; + white-space: nowrap; + color: #5e5e5e; + background: #FFF7DD; + border: 1px solid #fbf0cb; + padding: 0px 2px; +} +code + .copy-to-clipboard { + margin-left: -1px; + border-left: 0 !important; + font-size: inherit !important; + vertical-align: middle; + height: 21px; + top: 0; +} +pre { + padding: 1rem; + margin: 2rem 0; + background: #282c34; + border: 0; + border-radius: 2px; + line-height: 1.15; +} +pre code { + color: whitesmoke; + background: inherit; + white-space: inherit; + border: 0; + padding: 0; + margin: 0; + font-size: 15px; +} +hr { + border-bottom: 4px solid #F0F2F4; +} +.page-title { + margin-top: -25px; + padding: 25px; + float: left; + clear: both; + background: #9c6fb6; + color: #fff; +} +#body a.anchor-link { + color: #ccc; +} +#body a.anchor-link:hover { + color: #9c6fb6; +} +#body-inner .tabs-wrapper.ui-theme-badges { + background: #1d1f21; +} +#body-inner .tabs-wrapper.ui-theme-badges .tabs-nav li { + font-size: 0.9rem; + text-transform: uppercase; +} +#body-inner .tabs-wrapper.ui-theme-badges .tabs-nav li a { + background: #35393c; +} +#body-inner .tabs-wrapper.ui-theme-badges .tabs-nav li.current a { + background: #4d5257; +} +#body-inner pre { + white-space: pre-wrap; +} +.tabs-wrapper pre { + margin: 1rem 0; + border: 0; + padding: 0; + background: inherit; +} +table { + border: 1px solid #eaeaea; + table-layout: auto; +} +th { + background: #f7f7f7; + padding: 0.5rem; +} +td { + padding: 0.5rem; + border: 1px solid #eaeaea; +} +.button { + background: #9c6fb6; + color: #fff; + box-shadow: 0 3px 0 #00a5d4; +} +.button:hover { + background: #00a5d4; + box-shadow: 0 3px 0 #008db6; + color: #fff; +} +.button:active { + box-shadow: 0 1px 0 #008db6; +} +.button-secondary { + background: #F8B450; + color: #fff; + box-shadow: 0 3px 0 #f7a733; +} +.button-secondary:hover { + background: #f7a733; + box-shadow: 0 3px 0 #f69b15; + color: #fff; +} +.button-secondary:active { + box-shadow: 0 1px 0 #f69b15; +} +.bullets { + margin: 1.7rem 0; + margin-left: -0.85rem; + margin-right: -0.85rem; + overflow: auto; +} +.bullet { + float: left; + padding: 0 0.85rem; +} +.two-column-bullet { + width: 50%; +} +@media only all and (max-width: 47.938em) { + .two-column-bullet { + width: 100%; + } +} +.three-column-bullet { + width: 33.33333%; +} +@media only all and (max-width: 47.938em) { + .three-column-bullet { + width: 100%; + } +} +.four-column-bullet { + width: 25%; +} +@media only all and (max-width: 47.938em) { + .four-column-bullet { + width: 100%; + } +} +.bullet-icon { + float: left; + background: #9c6fb6; + padding: 0.875rem; + width: 3.5rem; + height: 3.5rem; + border-radius: 50%; + color: #fff; + font-size: 1.75rem; + text-align: center; +} +.bullet-icon-1 { + background: #9c6fb6; +} +.bullet-icon-2 { + background: #00f3d8; +} +.bullet-icon-3 { + background: #e6f300; +} +.bullet-content { + margin-left: 4.55rem; +} +.tooltipped { + position: relative; +} +.tooltipped:after { + position: absolute; + z-index: 1000000; + display: none; + padding: 5px 8px; + font: normal normal 11px/1.5 "Work Sans", "Helvetica", "Tahoma", "Geneva", "Arial", sans-serif; + color: #fff; + text-align: center; + text-decoration: none; + text-shadow: none; + text-transform: none; + letter-spacing: normal; + word-wrap: break-word; + white-space: pre; + pointer-events: none; + content: attr(aria-label); + background: rgba(0, 0, 0, 0.8); + border-radius: 3px; + -webkit-font-smoothing: subpixel-antialiased; +} +.tooltipped:before { + position: absolute; + z-index: 1000001; + display: none; + width: 0; + height: 0; + color: rgba(0, 0, 0, 0.8); + pointer-events: none; + content: ""; + border: 5px solid transparent; +} +.tooltipped:hover:before, .tooltipped:hover:after, .tooltipped:active:before, .tooltipped:active:after, .tooltipped:focus:before, .tooltipped:focus:after { + display: inline-block; + text-decoration: none; +} +.tooltipped-s:after, .tooltipped-se:after, .tooltipped-sw:after { + top: 100%; + right: 50%; + margin-top: 5px; +} +.tooltipped-s:before, .tooltipped-se:before, .tooltipped-sw:before { + top: auto; + right: 50%; + bottom: -5px; + margin-right: -5px; + border-bottom-color: rgba(0, 0, 0, 0.8); +} +.tooltipped-se:after { + right: auto; + left: 50%; + margin-left: -15px; +} +.tooltipped-sw:after { + margin-right: -15px; +} +.tooltipped-n:after, .tooltipped-ne:after, .tooltipped-nw:after { + right: 50%; + bottom: 100%; + margin-bottom: 5px; +} +.tooltipped-n:before, .tooltipped-ne:before, .tooltipped-nw:before { + top: -5px; + right: 50%; + bottom: auto; + margin-right: -5px; + border-top-color: rgba(0, 0, 0, 0.8); +} +.tooltipped-ne:after { + right: auto; + left: 50%; + margin-left: -15px; +} +.tooltipped-nw:after { + margin-right: -15px; +} +.tooltipped-s:after, .tooltipped-n:after { + transform: translateX(50%); +} +.tooltipped-w:after { + right: 100%; + bottom: 50%; + margin-right: 5px; + transform: translateY(50%); +} +.tooltipped-w:before { + top: 50%; + bottom: 50%; + left: -5px; + margin-top: -5px; + border-left-color: rgba(0, 0, 0, 0.8); +} +.tooltipped-e:after { + bottom: 50%; + left: 100%; + margin-left: 5px; + transform: translateY(50%); +} +.tooltipped-e:before { + top: 50%; + right: -5px; + bottom: 50%; + margin-top: -5px; + border-right-color: rgba(0, 0, 0, 0.8); +} +.highlightable { + padding: 1rem 0 1rem; + overflow: auto; + position: relative; +} +.hljs::selection, .hljs span::selection { + background: #b7b7b7; +} +.lightbox-active #body { + overflow: visible; +} +.lightbox-active #body .padding { + overflow: visible; +} +#github-contrib i { + vertical-align: middle; +} +.featherlight img { + margin: 0 !important; +} +.lifecycle #body-inner ul { + list-style: none; + margin: 0; + padding: 2rem 0 0; + position: relative; +} +.lifecycle #body-inner ol { + margin: 1rem 0 1rem 0; + padding: 2rem; + position: relative; +} +.lifecycle #body-inner ol li { + margin-left: 1rem; +} +.lifecycle #body-inner ol strong, .lifecycle #body-inner ol label, .lifecycle #body-inner ol th { + text-decoration: underline; +} +.lifecycle #body-inner ol ol { + margin-left: -1rem; +} +.lifecycle #body-inner h3[class*='level'] { + font-size: 20px; + position: absolute; + margin: 0; + padding: 4px 10px; + right: 0; + z-index: 1000; + color: #fff; + background: #1ABC9C; +} +.lifecycle #body-inner ol h3 { + margin-top: 1rem !important; + right: 2rem !important; +} +.lifecycle #body-inner .level-1 + ol { + background: #f6fefc; + border: 4px solid #1ABC9C; + color: #16A085; +} +.lifecycle #body-inner .level-1 + ol h3 { + background: #2ECC71; +} +.lifecycle #body-inner .level-2 + ol { + background: #f7fdf9; + border: 4px solid #2ECC71; + color: #27AE60; +} +.lifecycle #body-inner .level-2 + ol h3 { + background: #3498DB; +} +.lifecycle #body-inner .level-3 + ol { + background: #f3f9fd; + border: 4px solid #3498DB; + color: #2980B9; +} +.lifecycle #body-inner .level-3 + ol h3 { + background: #34495E; +} +.lifecycle #body-inner .level-4 + ol { + background: #e4eaf0; + border: 4px solid #34495E; + color: #2C3E50; +} +.lifecycle #body-inner .level-4 + ol h3 { + background: #34495E; +} +#top-bar { + background: #F6F6F6; + border-radius: 2px; + padding: 0 1rem; + height: 0; + min-height: 3rem; +} +#top-github-link { + position: relative; + z-index: 1; + float: right; + display: block; +} +#body #breadcrumbs { + height: auto; + margin-bottom: 0; + padding-left: 0; + line-height: 1.4; + overflow: hidden; + white-space: nowrap; + text-overflow: ellipsis; + width: 70%; + display: inline-block; + float: left; +} +#body #breadcrumbs span { + padding: 0 0.1rem; +} +@media only all and (max-width: 59.938em) { + #sidebar { + width: 230px; + } + #body { + margin-left: 230px; + } +} +@media only all and (max-width: 47.938em) { + #sidebar { + width: 230px; + left: -230px; + } + #body { + margin-left: 0; + width: 100%; + } + .sidebar-hidden { + overflow: hidden; + } + .sidebar-hidden #sidebar { + left: 0; + } + .sidebar-hidden #body { + margin-left: 230px; + overflow: hidden; + } + .sidebar-hidden #overlay { + position: absolute; + left: 0; + right: 0; + top: 0; + bottom: 0; + z-index: 10; + background: rgba(255, 255, 255, 0.5); + cursor: pointer; + } +} +.copy-to-clipboard { + background-image: url(../images/clippy.svg); + background-position: 50% 50%; + background-size: 16px 16px; + background-repeat: no-repeat; + width: 27px; + height: 1.45rem; + top: -1px; + display: inline-block; + vertical-align: middle; + position: relative; + color: #5e5e5e; + background-color: #FFF7DD; + margin-left: -.2rem; + cursor: pointer; + border-radius: 0 2px 2px 0; + margin-bottom: 1px; +} +.copy-to-clipboard:hover { + background-color: #E8E2CD; +} +pre .copy-to-clipboard { + position: absolute; + right: 4px; + top: 4px; + background-color: #949bab; + color: #ccc; + border-radius: 2px; +} +pre .copy-to-clipboard:hover { + background-color: #656c72; + color: #fff; +} +.parent-element { + -webkit-transform-style: preserve-3d; + -moz-transform-style: preserve-3d; + transform-style: preserve-3d; +} + +#sidebar ul.topics > li > a .read-icon { + margin-top: 9px; +} + +#sidebar ul { + list-style: none; + padding: 0; + margin: 0; +} + +#sidebar #shortcuts li { + padding: 2px 0; + list-style: none; +} + +#sidebar ul li .read-icon { + display: none; + float: right; + font-size: 13px; + min-width: 16px; + margin: 4px 0 0 0; + text-align: right; +} +#sidebar ul li.visited > a .read-icon { + color: #00bdf3; + display: inline; +} + +#sidebar #shortcuts h3 { + font-family: "Novacento Sans Wide", "Helvetica", "Tahoma", "Geneva", "Arial", sans-serif; + color: white ; + margin-top:1rem; + padding-left: 1rem; +} +#homelinks { + background-color: #9c6fb6; + color: #fff; + padding: 7px 0; + border-bottom: 4px solid #9c6fb6; +} +#searchResults { + text-align: left; +} + +option { + color: initial; +} diff --git a/docs/23.4.1/fonts/Inconsolata.eot b/docs/23.4.1/fonts/Inconsolata.eot new file mode 100644 index 000000000..0a705d653 Binary files /dev/null and b/docs/23.4.1/fonts/Inconsolata.eot differ diff --git a/docs/23.4.1/fonts/Inconsolata.svg b/docs/23.4.1/fonts/Inconsolata.svg new file mode 100644 index 000000000..b7f97c875 --- /dev/null +++ b/docs/23.4.1/fonts/Inconsolata.svg @@ -0,0 +1,359 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/fonts/Inconsolata.ttf b/docs/23.4.1/fonts/Inconsolata.ttf new file mode 100644 index 000000000..4b8a36d24 Binary files /dev/null and b/docs/23.4.1/fonts/Inconsolata.ttf differ diff --git a/docs/23.4.1/fonts/Inconsolata.woff b/docs/23.4.1/fonts/Inconsolata.woff new file mode 100644 index 000000000..6f39625e5 Binary files /dev/null and b/docs/23.4.1/fonts/Inconsolata.woff differ diff --git a/docs/23.4.1/fonts/Novecentosanswide-Normal-webfont.eot b/docs/23.4.1/fonts/Novecentosanswide-Normal-webfont.eot new file mode 100644 index 000000000..9984682fc Binary files /dev/null and b/docs/23.4.1/fonts/Novecentosanswide-Normal-webfont.eot differ diff --git a/docs/23.4.1/fonts/Novecentosanswide-Normal-webfont.svg b/docs/23.4.1/fonts/Novecentosanswide-Normal-webfont.svg new file mode 100644 index 000000000..c412ea8c1 --- /dev/null +++ b/docs/23.4.1/fonts/Novecentosanswide-Normal-webfont.svg @@ -0,0 +1,1019 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/23.4.1/fonts/Novecentosanswide-Normal-webfont.ttf b/docs/23.4.1/fonts/Novecentosanswide-Normal-webfont.ttf new file mode 100644 index 000000000..8cfb62dd5 Binary files /dev/null and b/docs/23.4.1/fonts/Novecentosanswide-Normal-webfont.ttf differ diff --git a/docs/23.4.1/fonts/Novecentosanswide-Normal-webfont.woff b/docs/23.4.1/fonts/Novecentosanswide-Normal-webfont.woff new file mode 100644 index 000000000..d5c429079 Binary files /dev/null and b/docs/23.4.1/fonts/Novecentosanswide-Normal-webfont.woff differ diff --git a/docs/23.4.1/fonts/Novecentosanswide-Normal-webfont.woff2 b/docs/23.4.1/fonts/Novecentosanswide-Normal-webfont.woff2 new file mode 100644 index 000000000..eefb4a318 Binary files /dev/null and b/docs/23.4.1/fonts/Novecentosanswide-Normal-webfont.woff2 differ diff --git a/docs/23.4.1/fonts/Novecentosanswide-UltraLight-webfont.eot b/docs/23.4.1/fonts/Novecentosanswide-UltraLight-webfont.eot new file mode 100644 index 000000000..2a26561f9 Binary files /dev/null and b/docs/23.4.1/fonts/Novecentosanswide-UltraLight-webfont.eot differ diff --git a/docs/23.4.1/fonts/Novecentosanswide-UltraLight-webfont.svg b/docs/23.4.1/fonts/Novecentosanswide-UltraLight-webfont.svg new file mode 100644 index 000000000..e642ab076 --- /dev/null +++ b/docs/23.4.1/fonts/Novecentosanswide-UltraLight-webfont.svg @@ -0,0 +1,918 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/23.4.1/fonts/Novecentosanswide-UltraLight-webfont.ttf b/docs/23.4.1/fonts/Novecentosanswide-UltraLight-webfont.ttf new file mode 100644 index 000000000..9ce9c7f99 Binary files /dev/null and b/docs/23.4.1/fonts/Novecentosanswide-UltraLight-webfont.ttf differ diff --git a/docs/23.4.1/fonts/Novecentosanswide-UltraLight-webfont.woff b/docs/23.4.1/fonts/Novecentosanswide-UltraLight-webfont.woff new file mode 100644 index 000000000..381650c98 Binary files /dev/null and b/docs/23.4.1/fonts/Novecentosanswide-UltraLight-webfont.woff differ diff --git a/docs/23.4.1/fonts/Novecentosanswide-UltraLight-webfont.woff2 b/docs/23.4.1/fonts/Novecentosanswide-UltraLight-webfont.woff2 new file mode 100644 index 000000000..7e659549b Binary files /dev/null and b/docs/23.4.1/fonts/Novecentosanswide-UltraLight-webfont.woff2 differ diff --git a/docs/23.4.1/fonts/Work_Sans_200.eot b/docs/23.4.1/fonts/Work_Sans_200.eot new file mode 100644 index 000000000..4052e4f94 Binary files /dev/null and b/docs/23.4.1/fonts/Work_Sans_200.eot differ diff --git a/docs/23.4.1/fonts/Work_Sans_200.svg b/docs/23.4.1/fonts/Work_Sans_200.svg new file mode 100644 index 000000000..58ab4ba22 --- /dev/null +++ b/docs/23.4.1/fonts/Work_Sans_200.svg @@ -0,0 +1,332 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/fonts/Work_Sans_200.ttf b/docs/23.4.1/fonts/Work_Sans_200.ttf new file mode 100644 index 000000000..68019e1cc Binary files /dev/null and b/docs/23.4.1/fonts/Work_Sans_200.ttf differ diff --git a/docs/23.4.1/fonts/Work_Sans_200.woff b/docs/23.4.1/fonts/Work_Sans_200.woff new file mode 100644 index 000000000..a1bd9e469 Binary files /dev/null and b/docs/23.4.1/fonts/Work_Sans_200.woff differ diff --git a/docs/23.4.1/fonts/Work_Sans_200.woff2 b/docs/23.4.1/fonts/Work_Sans_200.woff2 new file mode 100644 index 000000000..20c68a75c Binary files /dev/null and b/docs/23.4.1/fonts/Work_Sans_200.woff2 differ diff --git a/docs/23.4.1/fonts/Work_Sans_300.eot b/docs/23.4.1/fonts/Work_Sans_300.eot new file mode 100644 index 000000000..ace799382 Binary files /dev/null and b/docs/23.4.1/fonts/Work_Sans_300.eot differ diff --git a/docs/23.4.1/fonts/Work_Sans_300.svg b/docs/23.4.1/fonts/Work_Sans_300.svg new file mode 100644 index 000000000..f29d0c8a1 --- /dev/null +++ b/docs/23.4.1/fonts/Work_Sans_300.svg @@ -0,0 +1,331 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/fonts/Work_Sans_300.ttf b/docs/23.4.1/fonts/Work_Sans_300.ttf new file mode 100644 index 000000000..35387c235 Binary files /dev/null and b/docs/23.4.1/fonts/Work_Sans_300.ttf differ diff --git a/docs/23.4.1/fonts/Work_Sans_300.woff b/docs/23.4.1/fonts/Work_Sans_300.woff new file mode 100644 index 000000000..8d789eae9 Binary files /dev/null and b/docs/23.4.1/fonts/Work_Sans_300.woff differ diff --git a/docs/23.4.1/fonts/Work_Sans_300.woff2 b/docs/23.4.1/fonts/Work_Sans_300.woff2 new file mode 100644 index 000000000..f6e216d64 Binary files /dev/null and b/docs/23.4.1/fonts/Work_Sans_300.woff2 differ diff --git a/docs/23.4.1/fonts/Work_Sans_500.eot b/docs/23.4.1/fonts/Work_Sans_500.eot new file mode 100644 index 000000000..9df692942 Binary files /dev/null and b/docs/23.4.1/fonts/Work_Sans_500.eot differ diff --git a/docs/23.4.1/fonts/Work_Sans_500.svg b/docs/23.4.1/fonts/Work_Sans_500.svg new file mode 100644 index 000000000..4b030b790 --- /dev/null +++ b/docs/23.4.1/fonts/Work_Sans_500.svg @@ -0,0 +1,333 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/fonts/Work_Sans_500.ttf b/docs/23.4.1/fonts/Work_Sans_500.ttf new file mode 100644 index 000000000..5b8cc5342 Binary files /dev/null and b/docs/23.4.1/fonts/Work_Sans_500.ttf differ diff --git a/docs/23.4.1/fonts/Work_Sans_500.woff b/docs/23.4.1/fonts/Work_Sans_500.woff new file mode 100644 index 000000000..df058514f Binary files /dev/null and b/docs/23.4.1/fonts/Work_Sans_500.woff differ diff --git a/docs/23.4.1/fonts/Work_Sans_500.woff2 b/docs/23.4.1/fonts/Work_Sans_500.woff2 new file mode 100644 index 000000000..b06c54df0 Binary files /dev/null and b/docs/23.4.1/fonts/Work_Sans_500.woff2 differ diff --git a/docs/23.4.1/idm-products/enterprise-deployments/enterprise-deployment-automation/index.html b/docs/23.4.1/idm-products/enterprise-deployments/enterprise-deployment-automation/index.html new file mode 100644 index 000000000..0e4313bee --- /dev/null +++ b/docs/23.4.1/idm-products/enterprise-deployments/enterprise-deployment-automation/index.html @@ -0,0 +1,3954 @@ + + + + + + + + + + + + b. Enterprise Deployment Guide Automation Scripts :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + b. Enterprise Deployment Guide Automation Scripts +

+ + + + + + + +

Enterprise Deployment Automation

+

The Enterprise Deployment Automation scripts allow you to deploy the entire Oracle Identity and Access Management suite in a production environment. You can use the scripts to:

+ + + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/enterprise-deployments/enterprise-deployment-automation/index.xml b/docs/23.4.1/idm-products/enterprise-deployments/enterprise-deployment-automation/index.xml new file mode 100644 index 000000000..1e8083a4c --- /dev/null +++ b/docs/23.4.1/idm-products/enterprise-deployments/enterprise-deployment-automation/index.xml @@ -0,0 +1,14 @@ + + + + b. Enterprise Deployment Guide Automation Scripts on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/enterprise-deployments/enterprise-deployment-automation/ + Recent content in b. Enterprise Deployment Guide Automation Scripts on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/enterprise-deployments/enterprise-deployment-guide/index.html b/docs/23.4.1/idm-products/enterprise-deployments/enterprise-deployment-guide/index.html new file mode 100644 index 000000000..a720af542 --- /dev/null +++ b/docs/23.4.1/idm-products/enterprise-deployments/enterprise-deployment-guide/index.html @@ -0,0 +1,3960 @@ + + + + + + + + + + + + a. Enterprise Deployment Guide :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + a. Enterprise Deployment Guide +

+ + + + + + + +

Enterprise Deployment Guide

+

The Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster is a step by step guide that describes how to deploy the entire Oracle Identity and Access Management Suite in a production environment. It incorporates best practices learned over many years to ensure that your Identity and Access Management deployment maintains the highest levels of Availability and Security.

+

It includes:

+
    +
  • Preparing your On-premises Kubernetes, or Oracle Cloud Infrastructure Container Engine for Kubernetes (OCI OKE), for an Identity Management (IDM) Deployment.
  • +
  • Deploying and configuring Oracle Unified Directory (OUD) seeding data needed by other IDM products.
  • +
  • Deploying and Configuring an Ingress Controller.
  • +
  • Deploying and Configuring the WebLogic Kubernetes Operator
  • +
  • Deploying and Configuring Oracle Access Management (OAM) and integrating with OUD.
  • +
  • Deploying and Configuring Oracle Identity Governance (OIG) and integrating with OUD and OAM.
  • +
  • Deploying and Configuring Oracle Identity Role Intelligence (OIRI) and integrating with OIG.
  • +
  • Deploying and configuring Oracle Advanced Authentication (OAA) and Oracle Adaptive Risk Management (OARM) and integrating with OAM.
  • +
  • Deploying and Configuring Monitoring and Centralised logging and configuring IDM to send monitoring and logging information to it.
  • +
+

Additionally, as per Enterprise Deployment Automation, all of the above can be automated using open source scripts.

+ + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/enterprise-deployments/enterprise-deployment-guide/index.xml b/docs/23.4.1/idm-products/enterprise-deployments/enterprise-deployment-guide/index.xml new file mode 100644 index 000000000..24f0f5487 --- /dev/null +++ b/docs/23.4.1/idm-products/enterprise-deployments/enterprise-deployment-guide/index.xml @@ -0,0 +1,14 @@ + + + + a. Enterprise Deployment Guide on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/enterprise-deployments/enterprise-deployment-guide/ + Recent content in a. Enterprise Deployment Guide on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/enterprise-deployments/index.html b/docs/23.4.1/idm-products/enterprise-deployments/index.html new file mode 100644 index 000000000..26dd975c6 --- /dev/null +++ b/docs/23.4.1/idm-products/enterprise-deployments/index.html @@ -0,0 +1,4002 @@ + + + + + + + + + + + + Enterprise Deployments :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + Enterprise Deployments +

+ + + + + + + +

Enterprise Deployments of Oracle Identity Management

+

The entire Oracle Identity and Access Management Suite can be deployed in a production environment. See the following sections:

+ + + + + + + + + + + + + + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/enterprise-deployments/index.xml b/docs/23.4.1/idm-products/enterprise-deployments/index.xml new file mode 100644 index 000000000..ef10fe4c3 --- /dev/null +++ b/docs/23.4.1/idm-products/enterprise-deployments/index.xml @@ -0,0 +1,14 @@ + + + + Enterprise Deployments on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/enterprise-deployments/ + Recent content in Enterprise Deployments on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/index.html b/docs/23.4.1/idm-products/index.html new file mode 100644 index 000000000..d460ef6b6 --- /dev/null +++ b/docs/23.4.1/idm-products/index.html @@ -0,0 +1,4103 @@ + + + + + + + + + + + + Oracle Identity Management on Kubernetes :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + Oracle Identity Management on Kubernetes +

+ + + + + + + +

Oracle Fusion Middleware on Kubernetes

+

Oracle supports the deployment of the following Oracle Identity Management products on Kubernetes. Click on the appropriate document link below to get started on configuring the product.

+

Please note the following:

+
    +
  • +

    The individual product guides below for Oracle Access Management, Oracle Identity Governance, Oracle Unified Directory, and Oracle Unified Directory Services Manager, are for configuring that product on a Kubernetes cluster where no other Oracle Identity Management products will be deployed. For example, if you are deploying Oracle Access Management (OAM) only, then you can follow the Oracle Access Management guide. If you are deploying multiple Oracle Identity Management products on the same Kubernetes cluster, then you must follow the Enterprise Deployment Guide outlined in Enterprise Deployments. Please note, you also have the option to follow the Enterprise Deployment Guide even if you are only installing one product, such as OAM for example.

    +
  • +
  • +

    The individual product guides do not explain how to configure a Kubernetes cluster given the product can be deployed on any compliant Kubernetes vendor. If you need to understand how to configure a Kubernetes cluster ready for an Oracle Identity Management deployment, you should follow the Enterprise Deployment Guide in Enterprise Deployments.

    +
  • +
  • +

    The Enterprise Deployment Automation section also contains details on automation scripts that can:

    +
      +
    • Automate the creation of a Kubernetes cluster on Oracle Cloud Infrastructure (OCI), ready for the deployment of Oracle Identity Management products.
    • +
    • Automate the deployment of Oracle Identity Management products on any compliant Kubernetes cluster.
    • +
    +
  • +
+ + + + + + + +
    + + + + + + + + + + + + + + + + + + + +

    +Enterprise Deployments +

    + + + + + +

    The complete Oracle Identity Management suite can be deployed in a production environment

    + + + + + + + + + + + + +

    +Oracle Access Management +

    + + + + + +

    The WebLogic Kubernetes Operator supports deployment of Oracle Access Management (OAM).

    + + + + + + + + + + + + +

    +Oracle Identity Governance +

    + + + + + +

    The WebLogic Kubernetes Operator supports deployment of Oracle Identity Governance (OIG).

    + + + + + + + + + + + + +

    +Oracle Internet Directory +

    + + + + + +

    Oracle Internet Directory provides a comprehensive Directory Solution for robust Identity Management

    + + + + + + + + + + + + +

    +Oracle Unified Directory +

    + + + + + +

    Oracle Unified Directory provides a comprehensive Directory Solution for robust Identity Management

    + + + + + + + + + + + + +

    +Oracle Unified Directory Services Manager +

    + + + + + +

    Oracle Unified Directory Services Manager provides an interface for managing instances of Oracle Unified Directory

    + + + + + + + + +
+ + + + + + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/index.xml b/docs/23.4.1/idm-products/index.xml new file mode 100644 index 000000000..283858173 --- /dev/null +++ b/docs/23.4.1/idm-products/index.xml @@ -0,0 +1,15 @@ + + + + Oracle Identity Management on Kubernetes on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/ + Recent content in Oracle Identity Management on Kubernetes on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + Thu, 18 Apr 2019 06:46:23 -0500 + + + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oam/configure-ingress/index.html b/docs/23.4.1/idm-products/oam/configure-ingress/index.html new file mode 100644 index 000000000..e5b6b1c65 --- /dev/null +++ b/docs/23.4.1/idm-products/oam/configure-ingress/index.html @@ -0,0 +1,4280 @@ + + + + + + + + + + + + Configure an Ingress for an OAM domain :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + Configure an Ingress for an OAM domain +

+ + + + + + + +

Setting up an ingress for NGINX for the OAM Domain

+

The instructions below explain how to set up NGINX as an ingress for the OAM domain with SSL termination.

+

Note: All the steps below should be performed on the master node.

+
    +
  1. Generate a SSL Certificate
  2. +
  3. Install NGINX
  4. +
  5. Create an Ingress for the Domain
  6. +
  7. Verify that you can access the domain URL
  8. +
+

Generate a SSL Certificate

+
    +
  1. +

    Generate a private key and certificate signing request (CSR) using a tool of your choice. Send the CSR to your certificate authority (CA) to generate the certificate.

    +

    If you want to use a certificate for testing purposes you can generate a self signed certificate using openssl:

    +
    $ mkdir <workdir>/ssl
    +$ cd <workdir>/ssl
    +$ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=<nginx-hostname>"
    +

    For example:

    +
    $ mkdir /scratch/OAMK8S/ssl
    +$ cd /scratch/OAMK8S/ssl
    +$ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=masternode.example.com"
    +

    Note: The CN should match the host.domain of the master node in order to prevent hostname problems during certificate verification.

    +

    The output will look similar to the following:

    +
    Generating a 2048 bit RSA private key
    +..........................................+++
    +.......................................................................................................+++
    +writing new private key to 'tls.key'
    +-----
    +
  2. +
  3. +

    Create a secret for SSL by running the following command:

    +
    $ kubectl -n oamns create secret tls <domain_uid>-tls-cert --key <workdir>/tls.key --cert <workdir>/tls.crt
    +

    For example:

    +
    $ kubectl -n oamns create secret tls accessdomain-tls-cert --key /scratch/OAMK8S/ssl/tls.key --cert /scratch/OAMK8S/ssl/tls.crt
    +

    The output will look similar to the following:

    +
    secret/accessdomain-tls-cert created
    +
  4. +
+

Install NGINX

+

Use helm to install NGINX.

+
    +
  1. +

    Add the helm chart repository for NGINX using the following command:

    +
    $ helm repo add stable https://kubernetes.github.io/ingress-nginx
    +

    The output will look similar to the following:

    +
    "stable" has been added to your repositories
    +
  2. +
  3. +

    Update the repository using the following command:

    +
    $ helm repo update
    +

    The output will look similar to the following:

    +
    Hang tight while we grab the latest from your chart repositories...
    +...Successfully got an update from the "stable" chart repository
    +Update Complete. ⎈ Happy Helming!⎈
    +
  4. +
+
Install NGINX using helm
+

If you can connect directly to the master node IP address from a browser, then install NGINX with the --set controller.service.type=NodePort parameter.

+

If you are using a Managed Service for your Kubernetes cluster, for example Oracle Kubernetes Engine (OKE) on Oracle Cloud Infrastructure (OCI), and connect from a browser to the Load Balancer IP address, then use the --set controller.service.type=LoadBalancer parameter. This instructs the Managed Service to setup a Load Balancer to direct traffic to the NGINX ingress.

+
    +
  1. +

    To install NGINX use the following helm command depending on if you are using NodePort or LoadBalancer:

    +

    a) Using NodePort

    +
    $ helm install nginx-ingress -n <domain_namespace> --set controller.extraArgs.default-ssl-certificate=<domain_namespace>/<ssl_secret> --set controller.service.type=NodePort --set controller.admissionWebhooks.enabled=false stable/ingress-nginx
    +

    For example:

    +
    $ helm install nginx-ingress -n oamns --set controller.extraArgs.default-ssl-certificate=oamns/accessdomain-tls-cert --set controller.service.type=NodePort --set controller.admissionWebhooks.enabled=false stable/ingress-nginx
    +

    The output will look similar to the following:

    +
    NAME: nginx-ingress
    +LAST DEPLOYED: <DATE>
    +
    +NAMESPACE: oamns
    +STATUS: deployed
    +REVISION: 1
    +TEST SUITE: None
    +NOTES:
    +The nginx-ingress controller has been installed.
    +Get the application URL by running these commands:
    +  export HTTP_NODE_PORT=$(kubectl --namespace oamns get services -o jsonpath="{.spec.ports[0].nodePort}" nginx-ingress-controller)
    +  export HTTPS_NODE_PORT=$(kubectl --namespace oamns get services -o jsonpath="{.spec.ports[1].nodePort}" nginx-ingress-controller)
    +  export NODE_IP=$(kubectl --namespace oamns get nodes -o jsonpath="{.items[0].status.addresses[1].address}")
    +
    +  echo "Visit http://$NODE_IP:$HTTP_NODE_PORT to access your application via HTTP."
    +  echo "Visit https://$NODE_IP:$HTTPS_NODE_PORT to access your application via HTTPS."
    +
    +An example Ingress that makes use of the controller:
    +
    +  apiVersion: networking.k8s.io/v1
    +  kind: Ingress
    +  metadata:
    +    annotations:
    +      kubernetes.io/ingress.class: nginx
    +    name: example
    +    namespace: foo
    +  spec:
    +    ingressClassName: example-class
    +    rules:
    +       - host: www.example.com
    +        http:
    +          paths:
    +            - path: /
    +              pathType: Prefix
    +              backend:
    +                service:
    +                  name: exampleService
    +                  port: 80
    +    # This section is only required if TLS is to be enabled for the Ingress
    +    tls:
    +      - hosts:
    +        - www.example.com
    +        secretName: example-tls
    +
    +
    +If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided:
    +
    +  apiVersion: v1
    +  kind: Secret
    +  metadata:
    +    name: example-tls
    +    namespace: foo
    +  data:
    +    tls.crt: <base64 encoded cert>
    +    tls.key: <base64 encoded key>
    +  type: kubernetes.io/tls
    +

    b) Using LoadBalancer

    +
    $ helm install nginx-ingress -n oamns --set controller.extraArgs.default-ssl-certificate=oamns/accessdomain-tls-cert  --set controller.service.type=LoadBalancer --set controller.admissionWebhooks.enabled=false stable/ingress-nginx
    +

    The output will look similar to the following:

    +
    $ helm install nginx-ingress -n oamns --set controller.extraArgs.default-ssl-certificate=oamns/accessdomain-tls-cert  --set controller.service.type=LoadBalancer --set controller.admissionWebhooks.enabled=false stable/ingress-nginx
    +   
    +NAME: nginx-ingress
    +LAST DEPLOYED: <DATE>
    +NAMESPACE: nginxssl
    +STATUS: deployed
    +REVISION: 1
    +TEST SUITE: None
    +NOTES:
    +The ingress-nginx controller has been installed.
    +It may take a few minutes for the LoadBalancer IP to be available.
    +You can watch the status by running 'kubectl --namespace oamns get services -o wide -w nginx-ingress-ingress-nginx-controller'
    +
    +An example Ingress that makes use of the controller:
    +
    +  apiVersion: networking.k8s.io/v1
    +  kind: Ingress
    +  metadata:
    +    annotations:
    +      kubernetes.io/ingress.class: nginx
    +    name: example
    +    namespace: foo
    +  spec:
    +    ingressClassName: example-class
    +    rules:
    +       - host: www.example.com
    +        http:
    +          paths:
    +            - path: /
    +              pathType: Prefix
    +              backend:
    +                service:
    +                  name: exampleService
    +                  port: 80
    +    # This section is only required if TLS is to be enabled for the Ingress
    +    tls:
    +      - hosts:
    +        - www.example.com
    +        secretName: example-tls
    +
    +
    +If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided:
    +
    +  apiVersion: v1
    +  kind: Secret
    +  metadata:
    +    name: example-tls
    +    namespace: foo
    +  data:
    +    tls.crt: <base64 encoded cert>
    +    tls.key: <base64 encoded key>
    +  type: kubernetes.io/tls
    +
  2. +
+

Create an Ingress for the Domain

+
    +
  1. +

    Navigate to the following directory:

    +
    $ cd $WORKDIR/kubernetes/charts/ingress-per-domain
    +
  2. +
  3. +

    Edit the values.yaml and change the domainUID: parameter to match your domainUID, for example domainUID: accessdomain. The file should look as follows:

    +
    # Load balancer type.  Supported values are: NGINX
    +type: NGINX
    +
    +# Type of Configuration Supported Values are : SSL and NONSSL
    +sslType: SSL
    +
    +# domainType. Supported values are: oam
    +domainType: oam
    +
    +
    +#WLS domain as backend to the load balancer
    +wlsDomain:
    +  domainUID: accessdomain
    +  adminServerName: AdminServer
    +  adminServerPort: 7001
    +  adminServerSSLPort:
    +  oamClusterName: oam_cluster
    +  oamManagedServerPort: 14100
    +  oamManagedServerSSLPort:
    +  policyClusterName: policy_cluster
    +  policyManagedServerPort: 15100
    +  policyManagedServerSSLPort:
    +	 
    +# Host  specific values
    +hostName:
    +  enabled: false
    +  admin: 
    +  runtime: 
    +
  4. +
  5. +

    Run the following helm command to install the ingress:

    +
    $ cd $WORKDIR
    +$ helm install oam-nginx kubernetes/charts/ingress-per-domain --namespace <domain_namespace> --values kubernetes/charts/ingress-per-domain/values.yaml
    +

    For example:

    +
    $ cd $WORKDIR
    +$ helm install oam-nginx kubernetes/charts/ingress-per-domain --namespace oamns --values kubernetes/charts/ingress-per-domain/values.yaml
    +

    The output will look similar to the following:

    +
    NAME: oam-nginx
    +LAST DEPLOYED: <DATE>
    +NAMESPACE: oamns
    +STATUS: deployed
    +REVISION: 1
    +TEST SUITE: None
    +
  6. +
  7. +

    Run the following command to show the ingress is created successfully:

    +
    $ kubectl get ing -n <domain_namespace>
    +

    For example:

    +
    $ kubectl get ing -n oamns
    +

    The output will look similar to the following:

    +
    NAME                 CLASS    HOSTS   ADDRESS          PORTS   AGE
    +accessdomain-nginx   <none>   *                        80      5s
    +
  8. +
  9. +

    Find the node port of NGINX using the following command:

    +
    $ kubectl --namespace <domain_namespace> get services -o jsonpath="{.spec.ports[1].nodePort}" nginx-ingress-ingress-nginx-controller
    +

    For example:

    +
    $ kubectl --namespace oamns get services -o jsonpath="{.spec.ports[1].nodePort}" nginx-ingress-ingress-nginx-controller
    +

    The output will look similar to the following:

    +
    31051
    +
  10. +
  11. +

    Run the following command to check the ingress:

    +
    $ kubectl describe ing <domainUID>-nginx -n <domain_namespace>
    +

    For example:

    +
    $ kubectl describe ing accessdomain-nginx -n oamns
    +

    The output will look similar to the following:

    +
    Name:             accessdomain-nginx
    +Namespace:        oamns
    +Address:          10.106.70.55
    +Ingress Class:    <none>
    +Default backend:  default-http-backend:80 (<error: endpoints "default-http-backend" not found>)
    +Rules:
    +  Host        Path  Backends
    +  ----        ----  --------
    +  *
    +              /console                        accessdomain-adminserver:7001 (10.244.1.18:7001)
    +              /consolehelp                    accessdomain-adminserver:7001 (10.244.1.18:7001)
    +              /rreg/rreg                      accessdomain-adminserver:7001 (10.244.1.18:7001)
    +              /em                             accessdomain-adminserver:7001 (10.244.1.18:7001)
    +              /oamconsole                     accessdomain-adminserver:7001 (10.244.1.18:7001)
    +              /dms                            accessdomain-adminserver:7001 (10.244.1.18:7001)
    +              /oam/services/rest              accessdomain-adminserver:7001 (10.244.1.18:7001)
    +              /iam/admin/config               accessdomain-adminserver:7001 (10.244.1.18:7001)
    +              /iam/admin/diag                 accessdomain-adminserver:7001 (10.244.1.18:7001)
    +              /iam/access                     accessdomain-cluster-oam-cluster:14100 (10.244.1.20:14100,10.244.2.13:14100)
    +              /oam/admin/api                  accessdomain-adminserver:7001 (10.244.1.18:7001)
    +              /oam/services/rest/access/api   accessdomain-cluster-oam-cluster:14100 (10.244.1.20:14100,10.244.2.13:14100)
    +              /access                         accessdomain-cluster-policy-cluster:15100 (10.244.1.19:15100,10.244.2.12:15100)
    +              /                               accessdomain-cluster-oam-cluster:14100 (10.244.1.20:14100,10.244.2.13:14100)
    +Annotations:  kubernetes.io/ingress.class: nginx
    +              meta.helm.sh/release-name: oam-nginx
    +              meta.helm.sh/release-namespace: oamns
    +              nginx.ingress.kubernetes.io/configuration-snippet:
    +                more_clear_input_headers "WL-Proxy-Client-IP" "WL-Proxy-SSL";
    +                more_set_input_headers "X-Forwarded-Proto: https";
    +                more_set_input_headers "WL-Proxy-SSL: true";
    +              nginx.ingress.kubernetes.io/enable-access-log: false
    +              nginx.ingress.kubernetes.io/ingress.allow-http: false
    +              nginx.ingress.kubernetes.io/proxy-buffer-size: 2000k
    +Events:
    +  Type    Reason  Age                From                      Message
    +  ----    ------  ----               ----                      -------
    +  Normal  Sync    14m (x2 over 15m)  nginx-ingress-controller  Scheduled for sync
    +
  12. +
  13. +

    To confirm that the new ingress is successfully routing to the domain’s server pods, run the following command to send a request to the URL for the ‘WebLogic ReadyApp framework’:

    +
    $ curl -v -k https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/weblogic/ready
    +

    For example:

    +

    a) For NodePort

    +
    $ curl -v -k https://masternode.example.com:31051/weblogic/ready
    +

    b) For LoadBalancer:

    +
    $ curl -v -k https://loadbalancer.example.com/weblogic/ready
    +

    The output will look similar to the following:

    +
    *   Trying 12.345.67.89...
    +* Connected to 12.345.67.89 (12.345.67.89) port 31051 (#0)
    +* Initializing NSS with certpath: sql:/etc/pki/nssdb
    +* skipping SSL peer certificate verification
    +* SSL connection using TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
    +* Server certificate:
    +*       subject: CN=masternode.example.com
    +*       start date: <DATE>
    +*       expire date: <DATE>
    +*       common name: masternode.example.com
    +*       issuer: CN=masternode.example.com
    +> GET /weblogic/ready HTTP/1.1
    +> User-Agent: curl/7.29.0
    +> Host: masternode.example.com:31051
    +> Accept: */*
    +>
    +< HTTP/1.1 200 OK
    +< Date: Mon, 12 Jul 2021 15:06:12 GMT
    +< Content-Length: 0
    +< Connection: keep-alive
    +< Strict-Transport-Security: max-age=15724800; includeSubDomains
    +<
    +* Connection #0 to host 12.345.67.89 left intact
    +
  14. +
+

Verify that you can access the domain URL

+

After setting up the NGINX ingress, verify that the domain applications are accessible through the NGINX ingress port (for example 31051) as per Validate Domain URLs

+ + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oam/configure-ingress/index.xml b/docs/23.4.1/idm-products/oam/configure-ingress/index.xml new file mode 100644 index 000000000..363603206 --- /dev/null +++ b/docs/23.4.1/idm-products/oam/configure-ingress/index.xml @@ -0,0 +1,14 @@ + + + + Configure an Ingress for an OAM domain on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oam/configure-ingress/ + Recent content in Configure an Ingress for an OAM domain on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oam/create-oam-domains/index.html b/docs/23.4.1/idm-products/oam/create-oam-domains/index.html new file mode 100644 index 000000000..5f321524c --- /dev/null +++ b/docs/23.4.1/idm-products/oam/create-oam-domains/index.html @@ -0,0 +1,4746 @@ + + + + + + + + + + + + Create OAM domains :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+ +
+ +
+ +
+ +

+ + Create OAM domains +

+ + + + + + + +
    +
  1. +

    Introduction

    +
  2. +
  3. +

    Prerequisites

    +
  4. +
  5. +

    Prepare the create domain script

    +
  6. +
  7. +

    Run the create domain script

    +
  8. +
  9. +

    Set the OAM server memory parameters

    +
  10. +
  11. +

    Initializing the domain

    +
  12. +
  13. +

    Verify the results

    +

    a. Verify the domain, pods and services

    +

    b. Verify the domain

    +

    c. Verify the pods

    +
  14. +
+

Introduction

+

The OAM deployment scripts demonstrate the creation of an OAM domain home on an existing Kubernetes persistent volume (PV) and persistent volume claim (PVC). The scripts also generate the domain YAML file, which can then be used to start the Kubernetes artifacts of the corresponding domain.

+

Prerequisites

+

Before you begin, perform the following steps:

+
    +
  1. Review the Domain resource documentation.
  2. +
  3. Ensure that you have executed all the preliminary steps documented in Prepare your environment.
  4. +
  5. Ensure that the database is up and running.
  6. +
+

Prepare the create domain script

+

The sample scripts for Oracle Access Management domain deployment are available at $WORKDIR/kubernetes/create-access-domain.

+
    +
  1. +

    Make a copy of the create-domain-inputs.yaml file:

    +
    $ cd $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv
    +$ cp create-domain-inputs.yaml create-domain-inputs.yaml.orig   
    +
  2. +
  3. +

    Edit the create-domain-inputs.yaml and modify the following parameters. Save the file when complete:

    +
    domainUID: <domain_uid>
    +domainHome: /u01/oracle/user_projects/domains/<domain_uid>
    +image: <image_name>:<tag>
    +imagePullSecretName: <container_registry_secret>
    +weblogicCredentialsSecretName: <kubernetes_domain_secret>
    +logHome: /u01/oracle/user_projects/domains/logs/<domain_uid>
    +namespace: <domain_namespace>
    +persistentVolumeClaimName: <pvc_name>
    +rcuSchemaPrefix: <rcu_prefix>
    +rcuDatabaseURL: <rcu_db_host>:<rcu_db_port>/<rcu_db_service_name>
    +rcuCredentialsSecret: <kubernetes_rcu_secret>   
    +

    For example:

    +
    domainUID: accessdomain
    +domainHome: /u01/oracle/user_projects/domains/accessdomain
    +image: container-registry.oracle.com/middleware/oam_cpu:12.2.1.4-jdk8-ol7-<October`23>
    +imagePullSecretName: orclcred
    +weblogicCredentialsSecretName: accessdomain-credentials
    +logHome: /u01/oracle/user_projects/domains/logs/accessdomain
    +namespace: oamns
    +persistentVolumeClaimName: accessdomain-domain-pvc
    +rcuSchemaPrefix: OAMK8S
    +rcuDatabaseURL: mydatabasehost.example.com:1521/orcl.example.com
    +rcuCredentialsSecret: accessdomain-rcu-credentials
    +
  4. +
+

A full list of parameters in the create-domain-inputs.yaml file are shown below:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterDefinitionDefault
adminPortPort number for the Administration Server inside the Kubernetes cluster.7001
adminNodePortPort number of the Administration Server outside the Kubernetes cluster.30701
adminServerNameName of the Administration Server.AdminServer
clusterNameName of the WebLogic cluster instance to generate for the domain. By default the cluster name is oam_cluster for the OAM domain.oam_cluster
configuredManagedServerCountNumber of Managed Server instances to generate for the domain.5
createDomainFilesDirDirectory on the host machine to locate all the files to create a WebLogic domain, including the script that is specified in the createDomainScriptName property. By default, this directory is set to the relative path wlst, and the create script will use the built-in WLST offline scripts in the wlst directory to create the WebLogic domain. It can also be set to the relative path wdt, and then the built-in WDT scripts will be used instead. An absolute path is also supported to point to an arbitrary directory in the file system. The built-in scripts can be replaced by the user-provided scripts or model files as long as those files are in the specified directory. Files in this directory are put into a Kubernetes config map, which in turn is mounted to the createDomainScriptsMountPath, so that the Kubernetes pod can use the scripts and supporting files to create a domain home.wlst
createDomainScriptsMountPathMount path where the create domain scripts are located inside a pod. The create-domain.sh script creates a Kubernetes job to run the script (specified in the createDomainScriptName property) in a Kubernetes pod to create a domain home. Files in the createDomainFilesDir directory are mounted to this location in the pod, so that the Kubernetes pod can use the scripts and supporting files to create a domain home./u01/weblogic
createDomainScriptNameScript that the create domain script uses to create a WebLogic domain. The create-domain.sh script creates a Kubernetes job to run this script to create a domain home. The script is located in the in-pod directory that is specified in the createDomainScriptsMountPath property. If you need to provide your own scripts to create the domain home, instead of using the built-it scripts, you must use this property to set the name of the script that you want the create domain job to run.create-domain-job.sh
domainHomeHome directory of the OAM domain. If not specified, the value is derived from the domainUID as /shared/domains/<domainUID>./u01/oracle/user_projects/domains/accessdomain
domainPVMountPathMount path of the domain persistent volume./u01/oracle/user_projects/domains
domainUIDUnique ID that will be used to identify this particular domain. Used as the name of the generated WebLogic domain as well as the name of the Kubernetes domain resource. This ID must be unique across all domains in a Kubernetes cluster. This ID cannot contain any character that is not valid in a Kubernetes service name.accessdomain
domainTypeType of the domain. Mandatory input for OAM domains. You must provide one of the supported domain type value: oam (deploys an OAM domain)oam
exposeAdminNodePortBoolean indicating if the Administration Server is exposed outside of the Kubernetes cluster.false
exposeAdminT3ChannelBoolean indicating if the T3 administrative channel is exposed outside the Kubernetes cluster.true
imageOAM container image. The operator requires OAM 12.2.1.4. Refer to Obtain the OAM container image for details on how to obtain or create the image.oracle/oam:12.2.1.4.0
imagePullPolicyWebLogic container image pull policy. Legal values are IfNotPresent, Always, or NeverIfNotPresent
imagePullSecretNameName of the Kubernetes secret to access the container registry to pull the OAM container image. The presence of the secret will be validated when this parameter is specified.
includeServerOutInPodLogBoolean indicating whether to include the server .out to the pod’s stdout.true
initialManagedServerReplicasNumber of Managed Servers to initially start for the domain.2
javaOptionsJava options for starting the Administration Server and Managed Servers. A Java option can have references to one or more of the following pre-defined variables to obtain WebLogic domain information: $(DOMAIN_NAME), $(DOMAIN_HOME), $(ADMIN_NAME), $(ADMIN_PORT), and $(SERVER_NAME).-Dweblogic.StdoutDebugEnabled=false
logHomeThe in-pod location for the domain log, server logs, server out, and Node Manager log files. If not specified, the value is derived from the domainUID as /shared/logs/<domainUID>./u01/oracle/user_projects/domains/logs/accessdomain
managedServerNameBaseBase string used to generate Managed Server names.oam_server
managedServerPortPort number for each Managed Server.8001
namespaceKubernetes namespace in which to create the domain.accessns
persistentVolumeClaimNameName of the persistent volume claim created to host the domain home. If not specified, the value is derived from the domainUID as <domainUID>-weblogic-sample-pvc.accessdomain-domain-pvc
productionModeEnabledBoolean indicating if production mode is enabled for the domain.true
serverStartPolicyDetermines which WebLogic Server instances will be started. Legal values are Never, IfNeeded, AdminOnly.IfNeeded
t3ChannelPortPort for the T3 channel of the NetworkAccessPoint.30012
t3PublicAddressPublic address for the T3 channel. This should be set to the public address of the Kubernetes cluster. This would typically be a load balancer address. For development environments only: In a single server (all-in-one) Kubernetes deployment, this may be set to the address of the master, or at the very least, it must be set to the address of one of the worker nodes.If not provided, the script will attempt to set it to the IP address of the Kubernetes cluster
weblogicCredentialsSecretNameName of the Kubernetes secret for the Administration Server’s user name and password. If not specified, then the value is derived from the domainUID as <domainUID>-weblogic-credentials.accessdomain-domain-credentials
weblogicImagePullSecretNameName of the Kubernetes secret for the container registry, used to pull the WebLogic Server image.
serverPodCpuRequest, serverPodMemoryRequest, serverPodCpuCLimit, serverPodMemoryLimitThe maximum amount of compute resources allowed, and minimum amount of compute resources required, for each server pod. Please refer to the Kubernetes documentation on Managing Compute Resources for Containers for details.Resource requests and resource limits are not specified.
rcuSchemaPrefixThe schema prefix to use in the database, for example OAM1. You may wish to make this the same as the domainUID in order to simplify matching domains to their RCU schemas.OAM1
rcuDatabaseURLThe database URL.oracle-db.default.svc.cluster.local:1521/devpdb.k8s
rcuCredentialsSecretThe Kubernetes secret containing the database credentials.accessdomain-rcu-credentials
datasourceTypeType of JDBC datasource applicable for the OAM domain. Legal values are agl and generic. Choose agl for Active GridLink datasource and generic for Generic datasource. For enterprise deployments, Oracle recommends that you use GridLink data sources to connect to Oracle RAC databases. See the Enterprise Deployment Guide for further details.generic
+

Note that the names of the Kubernetes resources in the generated YAML files may be formed with the +value of some of the properties specified in the create-inputs.yaml file. Those properties include +the adminServerName, clusterName and managedServerNameBase. If those values contain any +characters that are invalid in a Kubernetes service name, those characters are converted to +valid values in the generated YAML files. For example, an uppercase letter is converted to a +lowercase letter and an underscore ("_") is converted to a hyphen ("-").

+

The sample demonstrates how to create an OAM domain home and associated Kubernetes resources for a domain +that has one cluster only. In addition, the sample provides the capability for users to supply their own scripts +to create the domain home for other use cases. The generated domain YAML file could also be modified to cover more use cases.

+

Run the create domain script

+
    +
  1. +

    Run the create domain script, specifying your inputs file and an output directory to store the +generated artifacts:

    +
    $ cd $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv
    +$ ./create-domain.sh -i create-domain-inputs.yaml -o /<path to output-directory>
    +

    For example:

    +
    $ cd $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv
    +$ ./create-domain.sh -i create-domain-inputs.yaml -o output
    +

    The output will look similar to the following:

    +
    Input parameters being used
    +export version="create-weblogic-sample-domain-inputs-v1"
    +export adminPort="7001"
    +export adminServerName="AdminServer"
    +export domainUID="accessdomain"
    +export domainType="oam"
    +export domainHome="/u01/oracle/user_projects/domains/accessdomain"
    +export serverStartPolicy="IfNeeded"
    +export clusterName="oam_cluster"
    +export configuredManagedServerCount="5"
    +export initialManagedServerReplicas="2"
    +export managedServerNameBase="oam_server"
    +export managedServerPort="14100"
    +export image="container-registry.oracle.com/middleware/oam_cpu:12.2.1.4-jdk8-ol7-<October`23>"
    +export imagePullPolicy="IfNotPresent"
    +export imagePullSecretName="orclcred"
    +export productionModeEnabled="true"
    +export weblogicCredentialsSecretName="accessdomain-credentials"
    +export includeServerOutInPodLog="true"
    +export logHome="/u01/oracle/user_projects/domains/logs/accessdomain"
    +export httpAccessLogInLogHome="true"
    +export t3ChannelPort="30012"
    +export exposeAdminT3Channel="false"
    +export adminNodePort="30701"
    +export exposeAdminNodePort="false"
    +export namespace="oamns"
    +javaOptions=-Dweblogic.StdoutDebugEnabled=false
    +export persistentVolumeClaimName="accessdomain-domain-pvc"
    +export domainPVMountPath="/u01/oracle/user_projects/domains"
    +export createDomainScriptsMountPath="/u01/weblogic"
    +export createDomainScriptName="create-domain-job.sh"
    +export createDomainFilesDir="wlst"
    +export rcuSchemaPrefix="OAMK8S"
    +export rcuDatabaseURL="mydatabasehost.example.com:1521/orcl.example.com"
    +export rcuCredentialsSecret="accessdomain-rcu-credentials"
    +export datasourceType="generic"
    +
    +validateWlsDomainName called with accessdomain
    +createFiles - valuesInputFile is create-domain-inputs.yaml
    +createDomainScriptName is create-domain-job.sh
    +Generating output/weblogic-domains/accessdomain/create-domain-job.yaml
    +Generating output/weblogic-domains/accessdomain/delete-domain-job.yaml
    +Generating output/weblogic-domains/accessdomain/domain.yaml
    +Checking to see if the secret accessdomain-credentials exists in namespace oamns
    +configmap/accessdomain-create-oam-infra-domain-job-cm created
    +Checking the configmap accessdomain-create-oam-infra-domain-job-cm was created
    +configmap/accessdomain-create-oam-infra-domain-job-cm labeled
    +Checking if object type job with name accessdomain-create-oam-infra-domain-job exists
    +No resources found in oamns namespace.
    +Creating the domain by creating the job output/weblogic-domains/accessdomain/create-domain-job.yaml
    +job.batch/accessdomain-create-oam-infra-domain-job created
    +Waiting for the job to complete...
    +status on iteration 1 of 20
    +pod accessdomain-create-oam-infra-domain-job-6tgw4 status is Running
    +status on iteration 2 of 20
    +pod accessdomain-create-oam-infra-domain-job-6tgw4 status is Running
    +status on iteration 3 of 20
    +pod accessdomain-create-oam-infra-domain-job-6tgw4 status is Running
    +status on iteration 4 of 20
    +pod accessdomain-create-oam-infra-domain-job-6tgw4 status is Running
    +status on iteration 5 of 20
    +pod accessdomain-create-oam-infra-domain-job-6tgw4 status is Running
    +status on iteration 6 of 20
    +pod accessdomain-create-oam-infra-domain-job-6tgw4 status is Completed
    +
    +Domain accessdomain was created and will be started by the WebLogic Kubernetes Operator
    +
    +The following files were generated:
    +  output/weblogic-domains/accessdomain/create-domain-inputs.yaml
    +  output/weblogic-domains/accessdomain/create-domain-job.yaml
    +  output/weblogic-domains/accessdomain/domain.yaml
    +

    Note: If the domain creation fails, refer to the Troubleshooting section.

    +

    The command creates a domain.yaml file required for domain creation.

    +
  2. +
+

Set the OAM server memory parameters

+

By default, the java memory parameters assigned to the oam_server cluster are very small. The minimum recommended values are -Xms4096m -Xmx8192m. However, Oracle recommends you to set these to -Xms8192m -Xmx8192m in a production environment.

+
    +
  1. +

    Navigate to the /output/weblogic-domains/<domain_uid> directory:

    +
    $ cd $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv/output/weblogic-domains/<domain_uid>
    +

    For example:

    +
    $ cd $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv/output/weblogic-domains/accessdomain
    +
  2. +
  3. +

    Edit the domain.yaml file and inside name: accessdomain-oam-cluster, add the memory setting as below:

    +
      serverPod:
    +    env:
    +    - name: USER_MEM_ARGS
    +      value: "-XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m"
    +    resources:
    +      limits:
    +        cpu: "2"
    +        memory: "8Gi"
    +      requests:
    +        cpu: "1000m"
    +        memory: "4Gi"
    +

    For example:

    +
    apiVersion: weblogic.oracle/v1
    +kind: Cluster
    +metadata:
    +  name: accessdomain-oam-cluster
    +  namespace: oamns
    +spec:
    +  clusterName: oam_cluster
    +  serverService:
    +    precreateService: true
    +  serverPod:
    +    env:
    +    - name: USER_MEM_ARGS
    +      value: "-XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m"
    +    resources:
    +      limits:
    +        cpu: "2"
    +        memory: "8Gi"
    +    requests:
    +        cpu: "1000m"
    +        memory: "4Gi"
    +  replicas: 1
    +
    +

    Note: The above CPU and memory values are for development environments only. For Enterprise Deployments, please review the performance recommendations and sizing requirements in Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster.

    +

    Note: Limits and requests for CPU resources are measured in CPU units. One CPU in Kubernetes is equivalent to 1 vCPU/Core for cloud providers, and 1 hyperthread on bare-metal Intel processors. An “m” suffix in a CPU attribute indicates ‘milli-CPU’, so 500m is 50% of a CPU. Memory can be expressed in various units, where one Mi is one IEC unit mega-byte (1024^2), and one Gi is one IEC unit giga-byte (1024^3). For more information, see Resource Management for Pods and Containers, Assign Memory Resources to Containers and Pods, and Assign CPU Resources to Containers and Pods.

    +

    Note: The parameters above are also utilized by the Kubernetes Horizontal Pod Autoscaler (HPA). For more details on HPA, see Kubernetes Horizontal Pod Autoscaler.

    +

    Note: If required you can also set the same resources and limits for the accessdomain-policy-cluster.

    +
  4. +
  5. +

    In the domain.yaml locate the section of the file starting with adminServer:. Under the env: tag add the following CLASSPATH entries. This is required for running the idmconfigtool from the Administration Server.

    +
    - name: CLASSPATH
    +  value: "/u01/oracle/wlserver/server/lib/weblogic.jar"
    +

    For example:

    +
    # adminServer is used to configure the desired behavior for starting the administration server.
    +adminServer:
    +  # adminService:
    +  #   channels:
    +  # The Admin Server's NodePort
    +  #    - channelName: default
    +  #      nodePort: 30701
    +  # Uncomment to export the T3Channel as a service
    +  #    - channelName: T3Channel
    +  serverPod:
    +    # an (optional) list of environment variable to be set on the admin servers
    +    env:
    +    - name: USER_MEM_ARGS
    +      value: "-Djava.security.egd=file:/dev/./urandom -Xms512m -Xmx1024m "
    +    - name: CLASSPATH
    +      value: "/u01/oracle/wlserver/server/lib/weblogic.jar"
    +
  6. +
  7. +

    If required, you can add the optional parameter maxClusterConcurrentStartup to the spec section of the domain.yaml. This parameter specifies the number of managed servers to be started in sequence per cluster. For example if you updated the initialManagedServerReplicas to 4 in create-domain-inputs.yaml and only had 2 nodes, then setting maxClusterConcurrentStartup: 1 will start one managed server at a time on each node, rather than starting them all at once. This can be useful to take the strain off individual nodes at startup. Below is an example with the parameter added:

    +
    apiVersion: "weblogic.oracle/v9"
    +kind: Domain
    +metadata:
    +  name: accessdomain
    +  namespace: oamns
    +  labels:
    +    weblogic.domainUID: accessdomain
    +spec:
    +  # The WebLogic Domain Home
    +  domainHome: /u01/oracle/user_projects/domains/accessdomain
    +  maxClusterConcurrentStartup: 1
    +
    +  # The domain home source type
    +  # Set to PersistentVolume for domain-in-pv, Image for domain-in-image, or FromModel for model-in-image
    +  domainHomeSourceType: PersistentVolume
    +  ....
    +
  8. +
  9. +

    Save the changes to domain.yaml

    +
  10. +
+

Initializing the domain

+
    +
  1. +

    Create the Kubernetes resource using the following command:

    +
    $ kubectl apply -f $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv/output/weblogic-domains/<domain_uid>/domain.yaml
    +

    For example:

    +
    $ kubectl apply -f $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv/output/weblogic-domains/accessdomain/domain.yaml
    +

    The output will look similar to the following:

    +
    domain.weblogic.oracle/accessdomain created
    +cluster.weblogic.oracle/accessdomain-oam-cluster created
    +cluster.weblogic.oracle/accessdomain-policy-cluster created
    +
  2. +
+

Verify the results

+

Verify the domain, pods and services

+
    +
  1. +

    Verify the domain, servers pods and services are created and in the READY state with a status of 1/1, by running the following command:

    +
    $ kubectl get all,domains -n <domain_namespace>
    +

    For example:

    +
    $ kubectl get all,domains -n oamns
    +

    The output will look similar to the following:

    +
    NAME                                                 READY   STATUS      RESTARTS   AGE
    +pod/accessdomain-adminserver                         1/1     Running     0          11m
    +pod/accessdomain-create-oam-infra-domain-job-7c9r9   0/1     Completed   0          18m
    +pod/accessdomain-oam-policy-mgr1                     1/1     Running     0          3m31s
    +pod/accessdomain-oam-server1                         1/1     Running     0          3m31s
    +pod/helper                                           1/1     Running     0          33m
    +
    +NAME                                          TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)     AGE
    +service/accessdomain-adminserver              ClusterIP   None            <none>        7001/TCP    11m
    +service/accessdomain-cluster-oam-cluster      ClusterIP   10.101.59.154   <none>        14100/TCP   3m31s
    +service/accessdomain-cluster-policy-cluster   ClusterIP   10.98.236.51    <none>        15100/TCP   3m31s
    +service/accessdomain-oam-policy-mgr1          ClusterIP   None            <none>        15100/TCP   3m31s
    +service/accessdomain-oam-policy-mgr2          ClusterIP   10.104.92.12    <none>        15100/TCP   3m31s
    +service/accessdomain-oam-policy-mgr3          ClusterIP   10.96.244.37    <none>        15100/TCP   3m31s
    +service/accessdomain-oam-policy-mgr4          ClusterIP   10.105.201.23   <none>        15100/TCP   3m31s
    +service/accessdomain-oam-policy-mgr5          ClusterIP   10.110.12.227   <none>        15100/TCP   3m31s
    +service/accessdomain-oam-server1              ClusterIP   None            <none>        14100/TCP   3m31s
    +service/accessdomain-oam-server2              ClusterIP   10.96.137.33    <none>        14100/TCP   3m31s
    +service/accessdomain-oam-server3              ClusterIP   10.103.178.35   <none>        14100/TCP   3m31s
    +service/accessdomain-oam-server4              ClusterIP   10.97.254.78    <none>        14100/TCP   3m31s
    +service/accessdomain-oam-server5              ClusterIP   10.105.65.104   <none>        14100/TCP   3m31s
    +
    +NAME                                                 COMPLETIONS   DURATION   AGE
    +job.batch/accessdomain-create-oam-infra-domain-job   1/1           2m6s       18m
    +
    +NAME                                  AGE
    +domain.weblogic.oracle/accessdomain   12m
    +   
    +NAME                                                  AGE
    +cluster.weblogic.oracle/accessdomain-oam-cluster      11m
    +cluster.weblogic.oracle/accessdomain-policy-cluster   11m
    +

    Note: It will take several minutes before all the services listed above show. When a pod has a STATUS of 0/1 the pod is started but the OAM server associated with it is currently starting. While the pods are starting you can check the startup status in the pod logs, by running the following command:

    +
    $ kubectl logs accessdomain-adminserver -n oamns
    +$ kubectl logs accessdomain-oam-policy-mgr1 -n oamns
    +$ kubectl logs accessdomain-oam-server1 -n oamns
    +etc..
    +

    The default domain created by the script has the following characteristics:

    +
      +
    • An Administration Server named AdminServer listening on port 7001.
    • +
    • A configured OAM cluster named oam_cluster of size 5.
    • +
    • A configured Policy Manager cluster named policy_cluster of size 5.
    • +
    • One started OAM Managed Server, named oam_server1, listening on port 14100.
    • +
    • One started Policy Manager Managed Servers named oam-policy-mgr1, listening on port 15100.
    • +
    • Log files that are located in <persistent_volume>/logs/<domainUID>.
    • +
    +
  2. +
+

Verify the domain

+
    +
  1. +

    Run the following command to describe the domain:

    +
    $ kubectl describe domain <domain_uid> -n <domain_namespace>
    +

    For example:

    +
    $ kubectl describe domain accessdomain -n oamns
    +

    The output will look similar to the following:

    +
    Name:         accessdomain
    +Namespace:    oamns
    +Labels:       weblogic.domainUID=accessdomain
    +Annotations:  <none>
    +API Version:  weblogic.oracle/v9
    +Kind:         Domain
    +Metadata:
    +  Creation Timestamp:  <DATE>
    +  Generation:          1
    +  Managed Fields:
    +    API Version:  weblogic.oracle/v9
    +    Fields Type:  FieldsV1
    +    fieldsV1:
    +      f:metadata:
    +        f:annotations:
    +          .:
    +          f:kubectl.kubernetes.io/last-applied-configuration:
    +        f:labels:
    +          .:
    +          f:weblogic.domainUID:
    +      f:spec:
    +        .:
    +        f:adminServer:
    +          .:
    +          f:adminChannelPortForwardingEnabled:
    +          f:serverPod:
    +            .:
    +            f:env:
    +          f:serverStartPolicy:
    +        f:clusters:
    +        f:dataHome:
    +        f:domainHome:
    +        f:domainHomeSourceType:
    +        f:failureRetryIntervalSeconds:
    +        f:failureRetryLimitMinutes:
    +        f:httpAccessLogInLogHome:
    +        f:image:
    +        f:imagePullPolicy:
    +        f:imagePullSecrets:
    +        f:includeServerOutInPodLog:
    +        f:logHome:
    +        f:logHomeEnabled:
    +        f:logHomeLayout:
    +        f:maxClusterConcurrentShutdown:
    +        f:maxClusterConcurrentStartup:
    +        f:maxClusterUnavailable:
    +        f:replicas:
    +        f:serverPod:
    +          .:
    +          f:env:
    +          f:volumeMounts:
    +          f:volumes:
    +        f:serverStartPolicy:
    +        f:webLogicCredentialsSecret:
    +          .:
    +          f:name:
    +    Manager:      kubectl-client-side-apply
    +    Operation:    Update
    +    Time:         <DATE>
    +    API Version:  weblogic.oracle/v9
    +    Fields Type:  FieldsV1
    +    fieldsV1:
    +      f:status:
    +        .:
    +        f:clusters:
    +        f:conditions:
    +        f:observedGeneration:
    +        f:servers:
    +        f:startTime:
    +    Manager:         Kubernetes Java Client
    +    Operation:       Update
    +    Subresource:     status
    +    Time:            <DATE>
    +  Resource Version:  2074089
    +  UID:               e194d483-7383-4359-adb9-bf97de36518b
    +Spec:
    +  Admin Server:
    +    Admin Channel Port Forwarding Enabled:  true
    +    Server Pod:
    +      Env:
    +        Name:             USER_MEM_ARGS
    +        Value:            -Djava.security.egd=file:/dev/./urandom -Xms512m -Xmx1024m
    +        Name:             CLASSPATH
    +        Value:            /u01/oracle/wlserver/server/lib/weblogic.jar
    +    Server Start Policy:  IfNeeded
    +  Clusters:
    +    Name:                          accessdomain-oam-cluster
    +    Name:                          accessdomain-policy-cluster
    +  Data Home:
    +  Domain Home:                     /u01/oracle/user_projects/domains/accessdomain
    +  Domain Home Source Type:         PersistentVolume
    +  Failure Retry Interval Seconds:  120
    +  Failure Retry Limit Minutes:     1440
    +  Http Access Log In Log Home:     true
    +  Image:                           container-registry.oracle.com/middleware/oam_cpu:12.2.1.4-jdk8-ol7-<October'23>
    +  Image Pull Policy:               IfNotPresent
    +  Image Pull Secrets:
    +    Name:                           orclcred
    +  Include Server Out In Pod Log:    true
    +  Log Home:                         /u01/oracle/user_projects/domains/logs/accessdomain
    +  Log Home Enabled:                 true
    +  Log Home Layout:                  ByServers
    +  Max Cluster Concurrent Shutdown:  1
    +  Max Cluster Concurrent Startup:   0
    +  Max Cluster Unavailable:          1
    +  Replicas:                         1
    +  Server Pod:
    +    Env:
    +      Name:   JAVA_OPTIONS
    +      Value:  -Dweblogic.StdoutDebugEnabled=false
    +      Name:   USER_MEM_ARGS
    +      Value:  -Djava.security.egd=file:/dev/./urandom -Xms256m -Xmx1024m
    +    Volume Mounts:
    +      Mount Path:  /u01/oracle/user_projects/domains
    +      Name:        weblogic-domain-storage-volume
    +    Volumes:
    +      Name:  weblogic-domain-storage-volume
    +      Persistent Volume Claim:
    +        Claim Name:     accessdomain-domain-pvc
    +  Server Start Policy:  IfNeeded
    +  Web Logic Credentials Secret:
    +    Name:  accessdomain-credentials
    +Status:
    +  Clusters:
    +    Cluster Name:  oam_cluster
    +    Conditions:
    +      Last Transition Time:  <DATE>
    +      Status:                True
    +      Type:                  Available
    +      Last Transition Time:  <DATE>
    +      Status:                True
    +      Type:                  Completed
    +    Label Selector:          weblogic.domainUID=accessdomain,weblogic.clusterName=oam_cluster
    +    Maximum Replicas:        5
    +    Minimum Replicas:        0
    +    Observed Generation:     1
    +    Ready Replicas:          1
    +    Replicas:                1
    +    Replicas Goal:           1
    +    Cluster Name:            policy_cluster
    +    Conditions:
    +      Last Transition Time:  <DATE>
    +      Status:                True
    +      Type:                  Available
    +      Last Transition Time:  <DATE>
    +      Status:                True
    +      Type:                  Completed
    +    Label Selector:          weblogic.domainUID=accessdomain,weblogic.clusterName=policy_cluster
    +    Maximum Replicas:        5
    +    Minimum Replicas:        0
    +    Observed Generation:     1
    +    Ready Replicas:          1
    +    Replicas:                1
    +    Replicas Goal:           1
    +  Conditions:
    +    Last Transition Time:  <DATE>
    +    Status:                True
    +    Type:                  Available
    +    Last Transition Time:  <DATE>
    +    Status:                True
    +    Type:                  Completed
    +  Observed Generation:     1
    +  Servers:
    +    Health:
    +      Activation Time:  <DATE>
    +      Overall Health:   ok
    +      Subsystems:
    +        Subsystem Name:  ServerRuntime
    +        Symptoms:
    +    Node Name:     worker-node2
    +    Pod Phase:     Running
    +    Pod Ready:     True
    +    Server Name:   AdminServer
    +    State:         RUNNING
    +    State Goal:    RUNNING
    +    Cluster Name:  oam_cluster
    +    Health:
    +      Activation Time:  <DATE>
    +      Overall Health:   ok
    +      Subsystems:
    +        Subsystem Name:  ServerRuntime
    +       Symptoms:
    +    Node Name:     worker-node1
    +    Pod Phase:     Running
    +    Pod Ready:     True
    +    Server Name:   oam_server1
    +    State:         RUNNING
    +    State Goal:    RUNNING
    +    Cluster Name:  oam_cluster
    +    Server Name:   oam_server2
    +    State:         SHUTDOWN
    +    State Goal:    SHUTDOWN
    +    Cluster Name:  oam_cluster
    +    Server Name:   oam_server3
    +    State:         SHUTDOWN
    +    State Goal:    SHUTDOWN
    +    Cluster Name:  oam_cluster
    +    Server Name:   oam_server4
    +    State:         SHUTDOWN
    +    State Goal:    SHUTDOWN
    +    Cluster Name:  oam_cluster
    +    Server Name:   oam_server5
    +    State:         SHUTDOWN
    +    State Goal:    SHUTDOWN
    +    Cluster Name:  policy_cluster
    +    Health:
    +      Activation Time:  <DATE>
    +      Overall Health:   ok
    +      Subsystems:
    +        Subsystem Name:  ServerRuntime
    +        Symptoms:
    +    Node Name:     worker-node1
    +    Pod Phase:     Running
    +    Pod Ready:     True
    +    Server Name:   oam_policy_mgr1
    +    State:         RUNNING
    +    State Goal:    RUNNING
    +    Cluster Name:  policy_cluster
    +    Server Name:   oam_policy_mgr2
    +    State:         SHUTDOWN
    +    State Goal:    SHUTDOWN
    +    Cluster Name:  policy_cluster
    +    Server Name:   oam_policy_mgr3
    +    State:         SHUTDOWN
    +    State Goal:    SHUTDOWN
    +    Cluster Name:  policy_cluster
    +    Server Name:   oam_policy_mgr4
    +    State:         SHUTDOWN
    +    State Goal:    SHUTDOWN
    +    Cluster Name:  policy_cluster
    +    Server Name:   oam_policy_mgr5
    +    State:         SHUTDOWN
    +    State Goal:    SHUTDOWN
    +  Start Time:      <DATE>
    +Events:
    +  Type    Reason     Age    From               Message
    +  ----    ------     ----   ----               -------
    +  Normal  Created    15m    weblogic.operator  Domain accessdomain was created.
    +  Normal  Available  2m56s  weblogic.operator  Domain accessdomain is available: a sufficient number of its servers have reached the ready state.
    +  Normal  Completed  2m56s  weblogic.operator  Domain accessdomain is complete because all of the following are true: there is no failure detected, there are no pending server shutdowns, and all servers expected to be running are ready and at their target image, auxiliary images, restart version, and introspect version.
    +

    In the Status section of the output, the available servers and clusters are listed.

    +
  2. +
+

Verify the pods

+
    +
  1. +

    Run the following command to see the pods running the servers and which nodes they are running on:

    +
    $ kubectl get pods -n <domain_namespace> -o wide
    +

    For example:

    +
    $ kubectl get pods -n oamns -o wide
    +

    The output will look similar to the following:

    +
    NAME                                            READY   STATUS      RESTARTS   AGE     IP            NODE             NOMINATED NODE   READINESS GATES
    +accessdomain-adminserver                         1/1     Running     0          18m   10.244.6.63   10.250.42.252   <none>           <none>
    +accessdomain-create-oam-infra-domain-job-7c9r9   0/1     Completed   0          25m   10.244.6.61   10.250.42.252   <none>           <none>
    +accessdomain-oam-policy-mgr1                     1/1     Running     0          10m   10.244.5.13   10.250.42.255   <none>           <none>
    +accessdomain-oam-server1                         1/1     Running     0          10m   10.244.5.12   10.250.42.255   <none>           <none>
    +helper                                           1/1     Running     0          40m   10.244.6.60   10.250.42.252   <none>           <none>
    +

    You are now ready to configure an Ingress to direct traffic for your OAM domain as per Configure an Ingress for an OAM domain.

    +
  2. +
+ + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oam/create-oam-domains/index.xml b/docs/23.4.1/idm-products/oam/create-oam-domains/index.xml new file mode 100644 index 000000000..b7aed141f --- /dev/null +++ b/docs/23.4.1/idm-products/oam/create-oam-domains/index.xml @@ -0,0 +1,14 @@ + + + + Create OAM domains on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oam/create-oam-domains/ + Recent content in Create OAM domains on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oam/create-or-update-image/index.html b/docs/23.4.1/idm-products/oam/create-or-update-image/index.html new file mode 100644 index 000000000..20706c8de --- /dev/null +++ b/docs/23.4.1/idm-products/oam/create-or-update-image/index.html @@ -0,0 +1,4240 @@ + + + + + + + + + + + + Create or update an image :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + Create or update an image +

+ + + + + + + +

As described in Prepare Your Environment you can create your own OAM container image. If you have access to the My Oracle Support (MOS), and there is a need to build a new image with an interim or one off patch, it is recommended to use the WebLogic Image Tool to build an Oracle Access Management image for production deployments.

+

Create or update an Oracle Access Management image using the WebLogic Image Tool

+

Using the WebLogic Image Tool, you can create a new Oracle Access Management image with PSU’s and interim patches or update an existing image with one or more interim patches.

+
+

Recommendations:

+
    +
  • Use create for creating a new Oracle Access Management image containing the Oracle Access Management binaries, bundle patch and interim patches. This is the recommended approach if you have access to the OAM patches because it optimizes the size of the image.
  • +
  • Use update for patching an existing Oracle Access Management image with a single interim patch. Note that the patched image size may increase considerably due to additional image layers introduced by the patch application tool.
  • +
+
+

Create an image

+

Set up the WebLogic Image Tool

+ +
Prerequisites
+

Verify that your environment meets the following prerequisites:

+
    +
  • Docker client and daemon on the build machine, with minimum Docker version 18.03.1.ce.
  • +
  • Bash version 4.0 or later, to enable the command complete feature.
  • +
  • JAVA_HOME environment variable set to the appropriate JDK location e.g: /scratch/export/oracle/product/jdk
  • +
+
Set up the WebLogic Image Tool
+

To set up the WebLogic Image Tool:

+
    +
  1. +

    Create a working directory and change to it:

    +
    $ mdir <workdir>
    +$ cd <workdir>
    +

    For example:

    +
    $ mkdir /scratch/imagetool-setup
    +$ cd /scratch/imagetool-setup
    +
  2. +
  3. +

    Download the latest version of the WebLogic Image Tool from the releases page.

    +
    $ wget https://github.com/oracle/weblogic-image-tool/releases/download/release-X.X.X/imagetool.zip
    +

    where X.X.X is the latest release referenced on the releases page.

    +
  4. +
  5. +

    Unzip the release ZIP file in the imagetool-setup directory.

    +
    $ unzip imagetool.zip
    +
  6. +
  7. +

    Execute the following commands to set up the WebLogic Image Tool:

    +
    $ cd <workdir>/imagetool-setup/imagetool/bin
    +$ source setup.sh
    +

    For example:

    +
    $ cd /scratch/imagetool-setup/imagetool/bin
    +$ source setup.sh
    +
  8. +
+
Validate setup
+

To validate the setup of the WebLogic Image Tool:

+
    +
  1. +

    Enter the following command to retrieve the version of the WebLogic Image Tool:

    +
    $ imagetool --version
    +
  2. +
  3. +

    Enter imagetool then press the Tab key to display the available imagetool commands:

    +
    $ imagetool <TAB>
    +cache   create  help    rebase  update
    +
  4. +
+
WebLogic Image Tool build directory
+

The WebLogic Image Tool creates a temporary Docker context directory, prefixed by wlsimgbuilder_temp, every time the tool runs. Under normal circumstances, this context directory will be deleted. However, if the process is aborted or the tool is unable to remove the directory, it is safe for you to delete it manually. By default, the WebLogic Image Tool creates the Docker context directory under the user’s home directory. If you prefer to use a different directory for the temporary context, set the environment variable WLSIMG_BLDDIR:

+
$ export WLSIMG_BLDDIR="/path/to/buid/dir"
+
WebLogic Image Tool cache
+

The WebLogic Image Tool maintains a local file cache store. This store is used to look up where the Java, WebLogic Server installers, and WebLogic Server patches reside in the local file system. By default, the cache store is located in the user’s $HOME/cache directory. Under this directory, the lookup information is stored in the .metadata file. All automatically downloaded patches also reside in this directory. You can change the default cache store location by setting the environment variable WLSIMG_CACHEDIR:

+
$ export WLSIMG_CACHEDIR="/path/to/cachedir"
+
Set up additional build scripts
+

Creating an Oracle Access Management container image using the WebLogic Image Tool requires additional container scripts for Oracle Access Management domains.

+
    +
  1. +

    Clone the docker-images repository to set up those scripts. In these steps, this directory is DOCKER_REPO:

    +
    $ cd <workdir>/imagetool-setup
    +$ git clone https://github.com/oracle/docker-images.git
    +

    For example:

    +
    $ cd /scratch/imagetool-setup
    +$ git clone https://github.com/oracle/docker-images.git
    +
  2. +
+
+

Note: If you want to create the image continue with the following steps, otherwise to update the image see update an image.

+
+

Create an image

+

After setting up the WebLogic Image Tool, follow these steps to use the WebLogic Image Tool to create a new Oracle Access Management image.

+
Download the Oracle Access Management installation binaries and patches
+

You must download the required Oracle Access Management installation binaries and patches as listed below from the Oracle Software Delivery Cloud and save them in a directory of your choice.

+

The installation binaries and patches required are:

+
    +
  • +

    Oracle Identity and Access Management 12.2.1.4.0

    +
      +
    • fmw_12.2.1.4.0_idm.jar
    • +
    +
  • +
  • +

    Oracle Fusion Middleware 12c Infrastructure 12.2.1.4.0

    +
      +
    • fmw_12.2.1.4.0_infrastructure.jar
    • +
    +
  • +
  • +

    OAM and FMW Infrastructure Patches:

    +
      +
    • View document ID 2723908.1 on My Oracle Support. In the Container Image Download/Patch Details section, locate the Oracle Access Management (OAM) table. For the latest PSU click the README link in the Documentation column. In the README, locate the “Installed Software” section. All the patch numbers to be download are listed here. Download all these individual patches from My Oracle Support.
    • +
    +
  • +
  • +

    Oracle JDK v8

    +
      +
    • jdk-8uXXX-linux-x64.tar.gz where XXX is the JDK version referenced in the README above.
    • +
    +
  • +
+
Update required build files
+

The following files in the code repository location <imagetool-setup-location>/docker-images/OracleAccessManagement/imagetool/12.2.1.4.0 are used for creating the image:

+
    +
  • additionalBuildCmds.txt
  • +
  • buildArgs
  • +
+
    +
  1. +

    Edit the <workdir>/imagetool-setup/docker-images/OracleAccessManagement/imagetool/12.2.1.4.0/buildArgs file and change %DOCKER_REPO%, %JDK_VERSION% and %BUILDTAG% appropriately.

    +

    For example:

    +
    create
    +--jdkVersion=8u301
    +--type oam
    +--version=12.2.1.4.0
    +--tag=oam-latestpsu:12.2.1.4.0
    +--pull
    +--installerResponseFile /scratch/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file,/scratch/imagetool-setup/docker-images/OracleAccessManagement/dockerfiles/12.2.1.4.0/install/iam.response
    +--additionalBuildCommands /scratch/imagetool-setup/docker-images/OracleAccessManagement/imagetool/12.2.1.4.0/addtionalBuildCmds.txt
    +--additionalBuildFiles /scratch/imagetool-setup/docker-images/OracleAccessManagement/dockerfiles/12.2.1.4.0/container-scripts
    +
  2. +
  3. +

    Edit the <workdir>/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file and under the GENERIC section add the line INSTALL_TYPE="Fusion Middleware Infrastructure”. For example:

    +
    [GENERIC]
    +INSTALL_TYPE="Fusion Middleware Infrastructure"
    +DECLINE_SECURITY_UPDATES=true
    +SECURITY_UPDATES_VIA_MYORACLESUPPORT=false
    +
  4. +
+
Create the image
+
    +
  1. +

    Add a JDK package to the WebLogic Image Tool cache. For example:

    +
    $ imagetool cache addInstaller --type jdk --version 8uXXX --path <download location>/jdk-8uXXX-linux-x64.tar.gz
    +

    where XXX is the JDK version downloaded

    +
  2. +
  3. +

    Add the downloaded installation binaries to the WebLogic Image Tool cache. For example:

    +
    $ imagetool cache addInstaller --type fmw --version 12.2.1.4.0 --path <download location>/fmw_12.2.1.4.0_infrastructure.jar
    +
    +$ imagetool cache addInstaller --type OAM --version 12.2.1.4.0 --path <download location>/fmw_12.2.1.4.0_idm.jar
    +
  4. +
  5. +

    Add the downloaded OPatch patch to the WebLogic Image Tool cache. For example:

    +
    $ imagetool cache addEntry --key 28186730_13.9.4.2.8 --value <download location>/p28186730_139428_Generic.zip
    +
  6. +
  7. +

    Add the rest of the downloaded product patches to the WebLogic Image Tool cache:

    +
    $ imagetool cache addEntry --key <patch>_12.2.1.4.0 --value <download location>/p<patch>_122140_Generic.zip
    +

    For example:

    +
    $ imagetool cache addEntry --key 32971905_12.2.1.4.0 --value <download location>/p32971905_122140_Generic.zip
    +
    +$ imagetool cache addEntry --key 20812896_12.2.1.4.0 --value <download location>/p20812896_122140_Generic.zip
    +
    +$ imagetool cache addEntry --key 32880070_12.2.1.4.0 --value <download location>/p32880070_122140_Generic.zip
    +
    +$ imagetool cache addEntry --key 33059296_12.2.1.4.0 --value <download location>/p33059296_122140_Generic.zip
    +
    +$ imagetool cache addEntry --key 32905339_12.2.1.4.0 --value <download location>/p32905339_122140_Generic.zip
    +
    +$ imagetool cache addEntry --key 33084721_12.2.1.4.0 --value <download location>/p33084721_122140_Generic.zip
    +
    +$ imagetool cache addEntry --key 31544353_12.2.1.4.0 --value <download location>/p31544353_122140_Linux-x86-64.zip
    +
    +$ imagetool cache addEntry --key 32957281_12.2.1.4.0 --value <download location>/p32957281_122140_Generic.zip
    +
    +$ imagetool cache addEntry --key 33093748_12.2.1.4.0 --value <download location>/p33093748_122140_Generic.zip
    +
  8. +
  9. +

    Edit the <workdir>/imagetool-setup/docker-images/OracleAccessManagement/imagetool/12.2.1.4.0/buildArgs file and append the product patches and opatch patch as follows:

    +
    --patches 32971905_12.2.1.4.0,20812896_12.2.1.4.0,32880070_12.2.1.4.0,33059296_12.2.1.4.0,32905339_12.2.1.4.0,33084721_12.2.1.4.0,31544353_12.2.1.4.0,32957281_12.2.1.4.0,33093748_12.2.1.4.0
    +--opatchBugNumber=28186730_13.9.4.2.8
    +

    An example buildArgs file is now as follows:

    +
    create
    +--jdkVersion=8u301
    +--type oam
    +--version=12.2.1.4.0
    +--tag=oam-latestpsu:12.2.1.4.0
    +--pull
    +--installerResponseFile /scratch/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file,/scratch/imagetool-setup/docker-images/OracleAccessManagement/dockerfiles/12.2.1.4.0/install/iam.response
    +--additionalBuildCommands /scratch/imagetool-setup/docker-images/OracleAccessManagement/imagetool/12.2.1.4.0/additionalBuildCmds.txt
    +--additionalBuildFiles /scratch/imagetool-setup/docker-images/OracleAccessManagement/dockerfiles/12.2.1.4.0/container-scripts
    +--patches 32971905_12.2.1.4.0,20812896_12.2.1.4.0,32880070_12.2.1.4.0,33059296_12.2.1.4.0,32905339_12.2.1.4.0,33084721_12.2.1.4.0,31544353_12.2.1.4.0,32957281_12.2.1.4.0,33093748_12.2.1.4.0
    +--opatchBugNumber=28186730_13.9.4.2.8
    +
    +

    Note: In the buildArgs file:

    +
      +
    • --jdkVersion value must match the --version value used in the imagetool cache addInstaller command for --type jdk.
    • +
    • --version value must match the --version value used in the imagetool cache addInstaller command for --type OAM.
    • +
    +
    +

    Refer to this page for the complete list of options available with the WebLogic Image Tool create command.

    +
  10. +
  11. +

    Create the Oracle Access Management image:

    +
    $ imagetool @<absolute path to buildargs file> --fromImage ghcr.io/oracle/oraclelinux:7-slim
    +
    +

    Note: Make sure that the absolute path to the buildargs file is prepended with a @ character, as shown in the example above.

    +
    +

    For example:

    +
    $ imagetool @<imagetool-setup-location>/docker-images/OracleAccessManagement/imagetool/12.2.1.4.0/buildArgs --fromImage ghcr.io/oracle/oraclelinux:7-slim
    +
  12. +
  13. +

    Check the created image using the docker images command:

    +
    $ docker images | grep oam
    +

    The output will look similar to the following:

    +
    oam-latestpsu                                       12.2.1.4.0                     ad732fc7c16b        About a minute ago   3.35GB
    +
  14. +
  15. +

    Run the following command to save the container image to a tar file:

    +
    $ docker save -o <path>/<file>.tar <image>
    +

    For example:

    +
    $ docker save -o $WORKDIR/oam-latestpsu.tar oam-latestpsu:12.2.1.4.0
    +
  16. +
+

Update an image

+

The steps below show how to update an existing Oracle Access Management image with an interim patch.

+

The container image to be patched must be loaded in the local docker images repository before attempting these steps.

+

In the examples below the image oracle/oam:12.2.1.4.0 is updated with an interim patch.

+
$ docker images
+
+REPOSITORY     TAG          IMAGE ID          CREATED             SIZE
+oracle/oam     12.2.1.4.0   b051804ba15f      3 months ago        3.34GB
+
    +
  1. +

    Set up the WebLogic Image Tool.

    +
  2. +
  3. +

    Download the required interim patch and latest Opatch (28186730) from My Oracle Support. and save them in a directory of your choice.

    +
  4. +
  5. +

    Add the OPatch patch to the WebLogic Image Tool cache, for example:

    +
    $ imagetool cache addEntry --key 28186730_13.9.4.2.8 --value <downloaded-patches-location>/p28186730_139428_Generic.zip
    +
  6. +
  7. +

    Execute the imagetool cache addEntry command for each patch to add the required patch(es) to the WebLogic Image Tool cache. For example, to add patch p32701831_12214210607_Generic.zip:

    +
    $ imagetool cache addEntry --key=32701831_12.2.1.4.210607 --value <downloaded-patches-location>/p32701831_12214210607_Generic.zip
    +
  8. +
  9. +

    Provide the following arguments to the WebLogic Image Tool update command:

    +
      +
    • –-fromImage - Identify the image that needs to be updated. In the example below, the image to be updated is oracle/oam:12.2.1.4.0.
    • +
    • –-patches - Multiple patches can be specified as a comma-separated list.
    • +
    • --tag - Specify the new tag to be applied for the image being built.
    • +
    +

    Refer here for the complete list of options available with the WebLogic Image Tool update command.

    +
    +

    Note: The WebLogic Image Tool cache should have the latest OPatch zip. The WebLogic Image Tool will update the OPatch if it is not already updated in the image.

    +
    +

    For example:

    +
    $ imagetool update --fromImage oracle/oam:12.2.1.4.0 --tag=oracle/oam-new:12.2.1.4.0 --patches=32701831_12.2.1.4.210607 --opatchBugNumber=28186730_13.9.4.2.8
    +
    +

    Note: If the command fails because the files in the image being upgraded are not owned by oracle:oracle, then add the parameter --chown <userid>:<groupid> to correspond with the values returned in the error.

    +
    +
  10. +
  11. +

    Check the built image using the docker images command:

    +
    $ docker images | grep oam
    +

    The output will look similar to the following:

    +
    REPOSITORY         TAG          IMAGE ID        CREATED             SIZE
    +oracle/oam-new     12.2.1.4.0   78ccd1ad67eb    5 minutes ago       3.8GB
    +oracle/oam         12.2.1.4.0   b051804ba15f    3 months ago        3.34GB
    +
  12. +
  13. +

    Run the following command to save the patched container image to a tar file:

    +
    $ docker save -o <path>/<file>.tar <image>
    +

    For example:

    +
    $ docker save -o $WORKDIR/oam-new.tar oracle/oam-new:12.2.1.4.0
    +
  14. +
+ + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oam/create-or-update-image/index.xml b/docs/23.4.1/idm-products/oam/create-or-update-image/index.xml new file mode 100644 index 000000000..6b6ac8473 --- /dev/null +++ b/docs/23.4.1/idm-products/oam/create-or-update-image/index.xml @@ -0,0 +1,14 @@ + + + + Create or update an image on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oam/create-or-update-image/ + Recent content in Create or update an image on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oam/index.html b/docs/23.4.1/idm-products/oam/index.html new file mode 100644 index 000000000..b4f0fc16f --- /dev/null +++ b/docs/23.4.1/idm-products/oam/index.html @@ -0,0 +1,4192 @@ + + + + + + + + + + + + Oracle Access Management :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + Oracle Access Management +

+ + + + + + + +

Oracle Access Management on Kubernetes

+

Oracle supports the deployment of Oracle Access Management on Kubernetes. See the following sections:

+ + + + + + + + + + + + + + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oam/index.xml b/docs/23.4.1/idm-products/oam/index.xml new file mode 100644 index 000000000..747a5b8db --- /dev/null +++ b/docs/23.4.1/idm-products/oam/index.xml @@ -0,0 +1,14 @@ + + + + Oracle Access Management on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oam/ + Recent content in Oracle Access Management on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oam/introduction/index.html b/docs/23.4.1/idm-products/oam/introduction/index.html new file mode 100644 index 000000000..367f8cc43 --- /dev/null +++ b/docs/23.4.1/idm-products/oam/introduction/index.html @@ -0,0 +1,3994 @@ + + + + + + + + + + + + Introduction :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+ +
+ +
+ +
+ +

+ + Introduction +

+ + + + + + + +

The WebLogic Kubernetes Operator supports deployment of Oracle Access Management (OAM).

+

In this release, OAM domains are supported using the “domain on a persistent volume” +model only, where the domain home is located in a persistent volume (PV).

+

The WebLogic Kubernetes Operator has several key features to assist you with deploying and managing Oracle Access Management domains in a Kubernetes +environment. You can:

+
    +
  • Create OAM instances in a Kubernetes persistent volume. This persistent volume can reside in an NFS file system or other Kubernetes volume types.
  • +
  • Start servers based on declarative startup parameters and desired states.
  • +
  • Expose the OAM Services through external access.
  • +
  • Scale OAM domains by starting and stopping Managed Servers on demand.
  • +
  • Publish operator and WebLogic Server logs into Elasticsearch and interact with them in Kibana.
  • +
  • Monitor the OAM instance using Prometheus and Grafana.
  • +
+

Current production release

+

The current production release for the Oracle Access Management domain deployment on Kubernetes is 23.4.1. This release uses the WebLogic Kubernetes Operator version 4.1.2.

+

For 4.0.X WebLogic Kubernetes Operator refer to Version 23.3.1

+

For 3.4.X WebLogic Kubernetes Operator refer to Version 23.1.1

+

Recent changes and known issues

+

See the Release Notes for recent changes and known issues for Oracle Access Management domain deployment on Kubernetes.

+

Limitations

+

See here for limitations in this release.

+

Getting started

+

This documentation explains how to configure OAM on a Kubernetes cluster where no other Oracle Identity Management products will be deployed. For detailed information about this type of deployment, start at Prerequisites and follow this documentation sequentially. Please note that this documentation does not explain how to configure a Kubernetes cluster given the product can be deployed on any compliant Kubernetes vendor.

+

If you are deploying multiple Oracle Identity Management products on the same Kubernetes cluster, then you must follow the Enterprise Deployment Guide outlined in Enterprise Deployments. +Please note, you also have the option to follow the Enterprise Deployment Guide even if you are only installing OAM and no other Oracle Identity Management products.

+

Note: If you need to understand how to configure a Kubernetes cluster ready for an Oracle Access Management deployment, you should follow the Enterprise Deployment Guide referenced in Enterprise Deployments. The Enterprise Deployment Automation section also contains details on automation scripts that can:

+
    +
  • Automate the creation of a Kubernetes cluster on Oracle Cloud Infrastructure (OCI), ready for the deployment of Oracle Identity Management products.
  • +
  • Automate the deployment of Oracle Identity Management products on any compliant Kubernetes cluster.
  • +
+

Documentation for earlier releases

+

To view documentation for an earlier release, see:

+ + + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oam/introduction/index.xml b/docs/23.4.1/idm-products/oam/introduction/index.xml new file mode 100644 index 000000000..bec622dce --- /dev/null +++ b/docs/23.4.1/idm-products/oam/introduction/index.xml @@ -0,0 +1,14 @@ + + + + Introduction on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oam/introduction/ + Recent content in Introduction on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oam/manage-oam-domains/delete-domain-home/index.html b/docs/23.4.1/idm-products/oam/manage-oam-domains/delete-domain-home/index.html new file mode 100644 index 000000000..a1416d087 --- /dev/null +++ b/docs/23.4.1/idm-products/oam/manage-oam-domains/delete-domain-home/index.html @@ -0,0 +1,4026 @@ + + + + + + + + + + + + f. Delete the OAM domain home :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + f. Delete the OAM domain home +

+ + + + + + +

Sometimes in production, but most likely in testing environments, you might want to remove the domain home that is generated using the create-domain.sh script.

+
    +
  1. +

    Run the following command to delete the domain:

    +
    $ cd $WORKDIR/kubernetes/delete-domain
    +$ ./delete-weblogic-domain-resources.sh -d <domain_uid>
    +

    For example:

    +
    $ cd $WORKDIR/kubernetes/delete-domain
    +$ ./delete-weblogic-domain-resources.sh -d accessdomain
    +
  2. +
  3. +

    Drop the RCU schemas as follows:

    +
    $ kubectl exec -it helper -n <domain_namespace> -- /bin/bash
    +[oracle@helper ~]$
    +[oracle@helper ~]$ export CONNECTION_STRING=<db_host.domain>:<db_port>/<service_name>
    +[oracle@helper ~]$ export RCUPREFIX=<rcu_schema_prefix>
    +   
    +/u01/oracle/oracle_common/bin/rcu -silent -dropRepository -databaseType ORACLE -connectString $CONNECTION_STRING \
    +-dbUser sys -dbRole sysdba -selectDependentsForComponents true -schemaPrefix $RCUPREFIX \
    +-component MDS -component IAU -component IAU_APPEND -component IAU_VIEWER -component OPSS \
    +-component WLS -component STB -component OAM -f < /tmp/pwd.txt
    +

    For example:

    +
    $ kubectl exec -it helper -n oamns -- /bin/bash
    +[oracle@helper ~]$ export CONNECTION_STRING=mydatabasehost.example.com:1521/orcl.example.com
    +[oracle@helper ~]$ export RCUPREFIX=OAMK8S
    +/u01/oracle/oracle_common/bin/rcu -silent -dropRepository -databaseType ORACLE -connectString $CONNECTION_STRING \
    +-dbUser sys -dbRole sysdba -selectDependentsForComponents true -schemaPrefix $RCUPREFIX \
    +-component MDS -component IAU -component IAU_APPEND -component IAU_VIEWER -component OPSS \
    +-component WLS -component STB -component OAM -f < /tmp/pwd.txt
    +
  4. +
  5. +

    Delete the contents of the persistent volume, for example:

    +
    $ rm -rf <persistent_volume>/accessdomainpv/*
    +

    For example:

    +
    $ rm -rf /scratch/shared/accessdomainpv/*
    +
  6. +
  7. +

    Delete the WebLogic Kubernetes Operator, by running the following command:

    +
    $ helm delete weblogic-kubernetes-operator -n opns
    +
  8. +
  9. +

    Delete the label from the OAM namespace:

    +
    $ kubectl label namespaces <domain_namespace> weblogic-operator-
    +

    For example:

    +
    $ kubectl label namespaces oamns weblogic-operator-
    +
  10. +
  11. +

    Delete the service account for the operator:

    +
    $ kubectl delete serviceaccount <sample-kubernetes-operator-sa> -n <domain_namespace>
    +

    For example:

    +
    $ kubectl delete serviceaccount op-sa -n opns
    +
  12. +
  13. +

    Delete the operator namespace:

    +
    $ kubectl delete namespace <sample-kubernetes-operator-ns>
    +

    For example:

    +
    $ kubectl delete namespace opns
    +
  14. +
  15. +

    To delete NGINX:

    +
    $ helm delete oam-nginx -n <domain_namespace>
    +

    For example:

    +
    $ helm delete oam-nginx -n oamns
    +

    Then run:

    +
    $ helm delete nginx-ingress -n <domain_namespace>
    +

    For example:

    +
    $ helm delete nginx-ingress -n oamns
    +
  16. +
  17. +

    Delete the OAM namespace:

    +
    $ kubectl delete namespace <domain_namespace>
    +

    For example:

    +
    $ kubectl delete namespace oamns
    +
  18. +
+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oam/manage-oam-domains/domain-lifecycle/index.html b/docs/23.4.1/idm-products/oam/manage-oam-domains/domain-lifecycle/index.html new file mode 100644 index 000000000..c4a0ad55b --- /dev/null +++ b/docs/23.4.1/idm-products/oam/manage-oam-domains/domain-lifecycle/index.html @@ -0,0 +1,4315 @@ + + + + + + + + + + + + a. Domain Life Cycle :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+ +
+ +
+ +
+ +

+ + a. Domain Life Cycle +

+ + + + + + +
    +
  1. View existing OAM servers
  2. +
  3. Starting/Scaling up OAM Managed servers
  4. +
  5. Stopping/Scaling down OAM Managed servers
  6. +
  7. Starting/Scaling up OAM Policy Managed servers
  8. +
  9. Stopping/Scaling down OAM Policy Managed servers
  10. +
  11. Stopping and starting the Administration Server and Managed Servers
  12. +
  13. Domain lifecycle sample scripts
  14. +
+

As OAM domains use the WebLogic Kubernetes Operator, domain lifecyle operations are managed using the WebLogic Kubernetes Operator itself.

+

This document shows the basic operations for starting, stopping and scaling servers in the OAM domain.

+

For more detailed information refer to Domain Life Cycle in the WebLogic Kubernetes Operator documentation.

+ +

Do not use the WebLogic Server Administration Console or Oracle Enterprise Manager Console to start or stop servers.

+
+ +

Note: The instructions below are for starting, stopping, or scaling servers manually. If you wish to use autoscaling, see Kubernetes Horizontal Pod Autoscaler. Please note, if you have enabled autoscaling, it is recommended to delete the autoscaler before running the commands below.

+

View existing OAM servers

+

The default OAM deployment starts the Administration Server (AdminServer), one OAM Managed Server (oam_server1) and one OAM Policy Manager server (oam_policy_mgr1).

+

The deployment also creates, but doesn’t start, four extra OAM Managed Servers (oam-server2 to oam-server5) and four more OAM Policy Manager servers (oam_policy_mgr2 to oam_policy_mgr5).

+

All these servers are visible in the WebLogic Server Console https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console by navigating to Domain Structure > oamcluster > Environment > Servers.

+

To view the running servers using kubectl, run the following command:

+
$ kubectl get pods -n <domain_namespace>
+

For example:

+
$ kubectl get pods -n oamns
+

The output should look similar to the following:

+
NAME                                                     READY   STATUS      RESTARTS   AGE
+accessdomain-adminserver                                 1/1     Running     0          3h29m
+accessdomain-create-oam-infra-domain-job-7c9r9           0/1     Completed   0          3h36m
+accessdomain-oam-policy-mgr1                             1/1     Running     0          3h21m
+accessdomain-oam-server1                                 1/1     Running     0          3h21m
+helper                                                   1/1     Running     0          3h51m
+nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq   1/1     Running     0          55m
+

Starting/Scaling up OAM Managed Servers

+

The number of OAM Managed Servers running is dependent on the replicas parameter configured for the oam-cluster. To start more OAM Managed Servers perform the following steps:

+
    +
  1. +

    Run the following kubectl command to edit the oam-cluster:

    +
    $ kubectl edit cluster accessdomain-oam-cluster -n <domain_namespace>
    +

    For example:

    +
    $ kubectl edit cluster accessdomain-oam-cluster -n oamns
    +

    Note: This opens an edit session for the oam-cluster where parameters can be changed using standard vi commands.

    +
  2. +
  3. +

    In the edit session, search for spec:, and then look for the replicas parameter under clusterName: oam_cluster. By default the replicas parameter is set to “1” hence one OAM Managed Server is started (oam_server1):

    +
    ...
    +spec:
    +  clusterName: oam_cluster
    +  replicas: 1
    +  serverPod:
    +    env:
    +    - name: USER_MEM_ARGS
    +      value: -XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom -Xms8192m
    +        -Xmx8192m
    +...
    +
  4. +
  5. +

    To start more OAM Managed Servers, increase the replicas value as desired. In the example below, two more managed servers will be started by setting replicas to “3”:

    +
    ...
    +spec:
    +  clusterName: oam_cluster
    +  replicas: 3
    +  serverPod:
    +    env:
    +    - name: USER_MEM_ARGS
    +      value: -XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom -Xms8192m
    +        -Xmx8192m
    +...
    +
  6. +
  7. +

    Save the file and exit (:wq!)

    +

    The output will look similar to the following:

    +
    cluster.weblogic.oracle/accessdomain-oam-cluster edited
    +
  8. +
  9. +

    Run the following kubectl command to view the pods:

    +
    $ kubectl get pods -n <domain_namespace>
    +

    For example:

    +
    $ kubectl get pods -n oamns
    +

    The output will look similar to the following:

    +
    NAME                                                     READY   STATUS      RESTARTS   AGE
    +accessdomain-adminserver                                 1/1     Running     0          3h33m
    +accessdomain-create-oam-infra-domain-job-7c9r9           0/1     Completed   0          3h40m
    +accessdomain-oam-policy-mgr1                             1/1     Running     0          3h25m
    +accessdomain-oam-server1                                 1/1     Running     0          3h25m
    +accessdomain-oam-server2                                 0/1     Running     0          3h25m
    +accessdomain-oam-server3                                 0/1     Pending     0          9s
    +helper                                                   1/1     Running     0          3h55m
    +nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq   1/1     Running     0          59m
    +

    Two new pods (accessdomain-oam-server2 and accessdomain-oam-server3) are started, but currently have a READY status of 0/1. This means oam_server2 and oam_server3 are not currently running but are in the process of starting. The servers will take several minutes to start so keep executing the command until READY shows 1/1:

    +
    NAME                                                     READY   STATUS      RESTARTS   AGE
    +accessdomain-adminserver                                 1/1     Running     0          3h37m
    +accessdomain-create-oam-infra-domain-job-7c9r9           0/1     Completed   0          3h43m
    +accessdomain-oam-policy-mgr1                             1/1     Running     0          3h29m
    +accessdomain-oam-server1                                 1/1     Running     0          3h29m
    +accessdomain-oam-server2                                 1/1     Running     0          3h29m
    +accessdomain-oam-server3                                 1/1     Running     0          3m45s
    +helper                                                   1/1     Running     0          3h59m
    +nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq   1/1     Running     0          63m
    +
    +

    Note: To check what is happening during server startup when READY is 0/1, run the following command to view the log of the pod that is starting:

    +
    $ kubectl logs <pod> -n <domain_namespace>
    +

    For example:

    +
    $ kubectl logs accessdomain-oam-server3 -n oamns
    +
  10. +
+

Stopping/Scaling down OAM Managed Servers

+

As mentioned in the previous section, the number of OAM Managed Servers running is dependent on the replicas parameter configured for the cluster. To stop one or more OAM Managed Servers, perform the following:

+
    +
  1. +

    Run the following kubectl command to edit the oam-cluster:

    +
    $ kubectl edit cluster accessdomain-oam-cluster -n <domain_namespace>
    +

    For example:

    +
    $ kubectl edit cluster accessdomain-oam-cluster -n oamns
    +
  2. +
  3. +

    In the edit session, search for spec:, and then look for the replicas parameter under clusterName: oam_cluster. In the example below replicas is set to “3”, hence three OAM Managed Servers are started (access-domain-oam_server1 - access-domain-oam_server3):

    +
    ...
    +spec:
    +  clusterName: oam_cluster
    +  replicas: 3
    +  serverPod:
    +    env:
    +    - name: USER_MEM_ARGS
    +      value: -XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom -Xms8192m
    +        -Xmx8192m
    +...
    +
  4. +
  5. +

    To stop OAM Managed Servers, decrease the replicas value as desired. In the example below, we will stop two managed servers by setting replicas to “1”:

    +
    spec:
    +  clusterName: oam_cluster
    +  replicas: 1
    +  serverPod:
    +    env:
    +    - name: USER_MEM_ARGS
    +      value: -XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom -Xms8192m
    +        -Xmx8192m
    +...
    +
  6. +
  7. +

    Save the file and exit (:wq!)

    +
  8. +
  9. +

    Run the following kubectl command to view the pods:

    +
    $ kubectl get pods -n <domain_namespace>
    +

    For example:

    +
    $ kubectl get pods -n oamns
    +

    The output will look similar to the following:

    +
    NAME                                                     READY   STATUS        RESTARTS   AGE
    +accessdomain-adminserver                                 1/1     Running       0          3h45m
    +accessdomain-create-oam-infra-domain-job-7c9r9           0/1     Completed     0          3h51m
    +accessdomain-oam-policy-mgr1                             1/1     Running       0          3h37m
    +accessdomain-oam-server1                                 1/1     Running       0          3h37m
    +accessdomain-oam-server2                                 1/1     Running       0          3h37m
    +accessdomain-oam-server3                                 1/1     Terminating   0          11m
    +helper                                                   1/1     Running       0          4h6m
    +nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq   1/1     Running       0          71m
    +

    One pod now has a STATUS of Terminating (accessdomain-oam-server3). The server will take a minute or two to stop. Once terminated the other pod (accessdomain-oam-server2) will move to Terminating and then stop. Keep executing the command until the pods have disappeared:

    +
    NAME                                            READY   STATUS      RESTARTS   AGE
    +accessdomain-adminserver                                 1/1     Running     0          3h48m
    +accessdomain-create-oam-infra-domain-job-7c9r9           0/1     Completed   0          3h54m
    +accessdomain-oam-policy-mgr1                             1/1     Running     0          3h40m
    +accessdomain-oam-server1                                 1/1     Running     0          3h40m
    +helper                                                   1/1     Running     0          4h9m
    +nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq   1/1     Running     0          74m
    +
  10. +
+

Starting/Scaling up OAM Policy Managed Servers

+

The number of OAM Policy Managed Servers running is dependent on the replicas parameter configured for the policy-cluster. To start more OAM Policy Managed Servers perform the following steps:

+
    +
  1. +

    Run the following kubectl command to edit the policy-cluster:

    +
    $ kubectl edit cluster accessdomain-policy-cluster -n <domain_namespace>
    +

    For example:

    +
    $ kubectl edit cluster accessdomain-policy-cluster -n oamns
    +

    Note: This opens an edit session for the policy-cluster where parameters can be changed using standard vi commands.

    +
  2. +
  3. +

    In the edit session, search for spec:, and then look for the replicas parameter under clusterName: policy_cluster. By default the replicas parameter is set to “1” hence one OAM Policy Managed Server is started (oam_policy_mgr1):

    +
    ...
    +spec:
    +  clusterName: policy_cluster
    +  replicas: 1
    +  serverService:
    +    precreateService: true
    +...
    +
  4. +
  5. +

    To start more OAM Policy Managed Servers, increase the replicas value as desired. In the example below, two more managed servers will be started by setting replicas to “3”:

    +
    ...
    +spec:
    +  clusterName: policy_cluster
    +  replicas: 3
    +  serverService:
    +    precreateService: true
    +...
    +
  6. +
  7. +

    Save the file and exit (:wq!)

    +

    The output will look similar to the following:

    +
    cluster.weblogic.oracle/accessdomain-policy-cluster edited
    +

    After saving the changes two new pods will be started (accessdomain-oam-policy-mgr2 and accessdomain-oam-policy-mgr3). After a few minutes they will have a READY status of 1/1. In the example below accessdomain-oam-policy-mgr2 and accessdomain-oam-policy-mgr3 are started:

    +
    NAME                                                     READY   STATUS      RESTARTS   AGE
    +accessdomain-adminserver                                 1/1     Running     0          3h43m
    +accessdomain-create-oam-infra-domain-job-7c9r9           0/1     Completed   0          3h49m
    +accessdomain-oam-policy-mgr1                             1/1     Running     0          3h35m
    +accessdomain-oam-policy-mgr2                             1/1     Running     0          3h35m
    +accessdomain-oam-policy-mgr3                             1/1     Running     0          4m18s
    +accessdomain-oam-server1                                 1/1     Running     0          3h35m
    +helper                                                   1/1     Running     0          4h4m
    +nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq   1/1     Running     0          69m
    +
  8. +
+

Stopping/Scaling down OAM Policy Managed Servers

+

As mentioned in the previous section, the number of OAM Policy Managed Servers running is dependent on the replicas parameter configured for the cluster. To stop one or more OAM Policy Managed Servers, perform the following:

+
    +
  1. +

    Run the following kubectl command to edit the policy-cluster:

    +
    $ kubectl edit cluster accessdomain-policy-cluster -n <domain_namespace>
    +

    For example:

    +
    $ kubectl edit cluster accessdomain-policy-cluster -n oamns
    +
  2. +
  3. +

    In the edit session, search for spec:, and then look for the replicas parameter under clusterName: policy_cluster. To stop OAM Policy Managed Servers, decrease the replicas value as desired. In the example below, we will stop two managed servers by setting replicas to “1”:

    +
    ...
    +spec:
    +  clusterName: policy_cluster
    +  replicas: 1
    +  serverService:
    +    precreateService: true
    +...
    +

    After saving the changes one pod will move to a STATUS of Terminating (accessdomain-oam-policy-mgr3).

    +
    NAME                                            READY   STATUS        RESTARTS   AGE
    +accessdomain-adminserver                                 1/1     Running       0          3h49m
    +accessdomain-create-oam-infra-domain-job-7c9r9           0/1     Completed     0          3h55m
    +accessdomain-oam-policy-mgr1                             1/1     Running       0          3h41m
    +accessdomain-oam-policy-mgr2                             1/1     Running       0          3h41m
    +accessdomain-oam-policy-mgr3                             1/1     Terminating   0          10m
    +accessdomain-oam-server1                                 1/1     Running       0          3h41m
    +helper                                                   1/1     Running       0          4h11m
    +nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq   1/1     Running       0          75m
    +

    The pods will take a minute or two to stop, so keep executing the command until the pods has disappeared:

    +
    NAME                                                     READY   STATUS      RESTARTS   AGE
    +accessdomain-adminserver                                 1/1     Running     0          3h50m
    +accessdomain-create-oam-infra-domain-job-7c9r9           0/1     Completed   0          3h57m
    +accessdomain-oam-policy-mgr1                             1/1     Running     0          3h42m
    +accessdomain-oam-server1                                 1/1     Running     0          3h42m
    +helper                                                   1/1     Running     0          4h12m
    +nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq   1/1     Running     0          76m
    +
  4. +
+

Stopping and Starting the Administration Server and Managed Servers

+

To stop all the OAM Managed Servers and the Administration Server in one operation:

+
    +
  1. +

    Run the following kubectl command to edit the domain:

    +
    $ kubectl edit domain <domain_uid> -n <domain_namespace>
    +

    For example:

    +
    $ kubectl edit domain accessdomain -n oamns
    +
  2. +
  3. +

    In the edit session, search for serverStartPolicy: IfNeeded under the domain spec:

    +
    ...
    +   volumeMounts:
    +   - mountPath: /u01/oracle/user_projects/domains
    +     name: weblogic-domain-storage-volume
    +   volumes:
    +   - name: weblogic-domain-storage-volume
    +     persistentVolumeClaim:
    +       claimName: accessdomain-domain-pvc
    + serverStartPolicy: IfNeeded
    + webLogicCredentialsSecret:
    + name: accessdomain-credentials
    +...
    +
  4. +
  5. +

    Change serverStartPolicy: IfNeeded to Never as follows:

    +
    ...
    +   volumeMounts:
    +   - mountPath: /u01/oracle/user_projects/domains
    +     name: weblogic-domain-storage-volume
    +   volumes:
    +   - name: weblogic-domain-storage-volume
    +     persistentVolumeClaim:
    +       claimName: accessdomain-domain-pvc
    + serverStartPolicy: Never
    + webLogicCredentialsSecret:
    + name: accessdomain-credentials
    +...
    +
  6. +
  7. +

    Save the file and exit (:wq!).

    +
  8. +
  9. +

    Run the following kubectl command to view the pods:

    +
    $ kubectl get pods -n <domain_namespace>
    +

    For example:

    +
    $ kubectl get pods -n oamns
    +

    The output will look similar to the following:

    +
    NAME                                                     READY   STATUS        RESTARTS   AGE
    +accessdomain-adminserver                                 1/1     Terminating   0          3h52m
    +accessdomain-create-oam-infra-domain-job-7c9r9           0/1     Completed     0          3h59m
    +accessdomain-oam-policy-mgr1                             1/1     Terminating   0          3h44m
    +accessdomain-oam-server1                                 1/1     Terminating   0          3h44m
    +helper                                                   1/1     Running       0          4h14m
    +nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq   1/1     Running       0          78m
    +

    The Administration Server pods and Managed Server pods will move to a STATUS of Terminating. After a few minutes, run the command again and the pods should have disappeared:

    +
    NAME                                                     READY   STATUS      RESTARTS   AGE
    +accessdomain-create-oam-infra-domain-job-7c9r9           0/1     Completed   0          4h
    +helper                                                   1/1     Running     0          4h15m
    +nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq   1/1     Running     0          80m
    +
  10. +
  11. +

    To start the Administration Server and Managed Servers up again, repeat the previous steps but change serverStartPolicy: Never to IfNeeded as follows:

    +
    ...
    +   volumeMounts:
    +   - mountPath: /u01/oracle/user_projects/domains
    +     name: weblogic-domain-storage-volume
    +   volumes:
    +   - name: weblogic-domain-storage-volume
    +     persistentVolumeClaim:
    +       claimName: accessdomain-domain-pvc
    + serverStartPolicy: IfNeeded
    + webLogicCredentialsSecret:
    + name: accessdomain-credentials
    +...
    +
  12. +
  13. +

    Run the following kubectl command to view the pods:

    +
    $ kubectl get pods -n <domain_namespace>
    +

    For example:

    +
    $ kubectl get pods -n oamns
    +

    The output will look similar to the following:

    +
    NAME                                                     READY   STATUS      RESTARTS   AGE
    +accessdomain-create-oam-infra-domain-job-7c9r9           0/1     Completed   0          4h1m
    +accessdomain-introspector-jwqxw                          1/1     Running     0          10s
    +helper                                                   1/1     Running     0          4h17m
    +nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq   1/1     Running     0          81m
    +

    The Administration Server pod will start followed by the OAM Managed Servers pods. This process will take several minutes, so keep executing the command until all the pods are running with READY status 1/1 :

    +
    NAME                                                     READY   STATUS      RESTARTS   AGE  
    +accessdomain-adminserver                                 1/1     Running     0          10m
    +accessdomain-create-oam-infra-domain-job-7c9r9           0/1     Completed   0          4h12m
    +accessdomain-oam-policy-mgr1                             1/1     Running     0          7m35s
    +accessdomain-oam-server1                                 1/1     Running     0          7m35s
    +helper                                                   1/1     Running     0          4h28m
    +nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq   1/1     Running     0          92m
    +
  14. +
+

Domain lifecycle sample scripts

+

The WebLogic Kubernetes Operator provides sample scripts to start up or shut down a specific Managed Server or cluster in a deployed domain, or the entire deployed domain.

+

Note: Prior to running these scripts, you must have previously created and deployed the domain.

+

The scripts are located in the $WORKDIR/kubernetes/domain-lifecycle directory. For more information, see the README.

+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oam/manage-oam-domains/hpa/index.html b/docs/23.4.1/idm-products/oam/manage-oam-domains/hpa/index.html new file mode 100644 index 000000000..e344afa82 --- /dev/null +++ b/docs/23.4.1/idm-products/oam/manage-oam-domains/hpa/index.html @@ -0,0 +1,4255 @@ + + + + + + + + + + + + e. Kubernetes Horizontal Pod Autoscaler :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + e. Kubernetes Horizontal Pod Autoscaler +

+ + + + + + +
    +
  1. Prerequisite configuration
  2. +
  3. Deploy the Kubernetes Metrics Server +
      +
    1. Troubleshooting
    2. +
    +
  4. +
  5. Deploy HPA
  6. +
  7. Testing HPA
  8. +
  9. Delete the HPA
  10. +
  11. Other considerations
  12. +
+

Kubernetes Horizontal Pod Autoscaler (HPA) is supported from Weblogic Kubernetes Operator 4.0.X and later.

+

HPA allows automatic scaling (up and down) of the OAM Managed Servers. If load increases then extra OAM Managed Servers will be started as required, up to the value configuredManagedServerCount defined when the domain was created (see Prepare the create domain script). Similarly, if load decreases, OAM Managed Servers will be automatically shutdown.

+

For more information on HPA, see Horizontal Pod Autoscaling.

+

The instructions below show you how to configure and run an HPA to scale an OAM cluster (accessdomain-oam-cluster) resource, based on CPU utilization or memory resource metrics. If required, you can also perform the following for the accessdomain-policy-cluster.

+

Note: If you enable HPA and then decide you want to start/stop/scale OAM Managed servers manually as per Domain Life Cycle, it is recommended to delete HPA beforehand as per Delete the HPA.

+

Prerequisite configuration

+

In order to use HPA, the OAM domain must have been created with the required resources parameter as per Set the OAM server memory parameters. For example:

+
serverPod:
+  env:
+  - name: USER_MEM_ARGS
+    value: "-XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m"
+  resources:
+    limits:
+      cpu: "2"
+      memory: "8Gi"
+    requests:
+      cpu: "1000m"
+      memory: "4Gi"
+

If you created the OAM domain without setting these parameters, then you can update the domain using the following steps:

+
    +
  1. +

    Run the following command to edit the cluster:

    +
    $ kubectl edit cluster accessdomain-oam-cluster -n oamns
    +

    Note: This opens an edit session for the oam-cluster where parameters can be changed using standard vi commands.

    +
  2. +
  3. +

    In the edit session, search for spec:, and then look for the replicas parameter under clusterName: oam_cluster. Change the entry so it looks as follows:

    +
    spec:
    +  clusterName: oam_cluster
    +  replicas: 1
    +  serverPod:
    +    env:
    +    - name: USER_MEM_ARGS
    +      value: -XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m
    +    resources:
    +      limits:
    +        cpu: "2"
    +        memory: 8Gi
    +      requests:
    +        cpu: 1000m
    +        memory: 4Gi
    +  serverService:
    +    precreateService: true
    +    ...
    +
  4. +
  5. +

    Save the file and exit (:wq!)

    +

    The output will look similar to the following:

    +
    cluster.weblogic.oracle/accessdomain-oam-cluster edited
    +

    The OAM Managed Server pods will then automatically be restarted.

    +
  6. +
+

Deploy the Kubernetes Metrics Server

+

Before deploying HPA you must deploy the Kubernetes Metrics Server.

+
    +
  1. +

    Check to see if the Kubernetes Metrics Server is already deployed:

    +
    $ kubectl get pods -n kube-system | grep metric
    +

    If a row is returned as follows, then Kubernetes Metric Server is deployed and you can move to Deploy HPA.

    +
    metrics-server-d9694457-mf69d           1/1     Running   0             5m13s
    +
  2. +
  3. +

    If no rows are returned by the previous command, then the Kubernetes Metric Server needs to be deployed. Run the following commands to get the components.yaml:

    +
    $ mkdir $WORKDIR/kubernetes/hpa
    +$ cd $WORKDIR/kubernetes/hpa
    +$ wget https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
    +
  4. +
  5. +

    Deploy the Kubernetes Metrics Server by running the following command:

    +
    $ kubectl apply -f components.yaml
    +

    The output will look similar to the following:

    +
    serviceaccount/metrics-server created
    +clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created
    +clusterrole.rbac.authorization.k8s.io/system:metrics-server created
    +rolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader created
    +clusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator created
    +clusterrolebinding.rbac.authorization.k8s.io/system:metrics-server created
    +service/metrics-server created
    +deployment.apps/metrics-server created
    +apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created
    +
  6. +
  7. +

    Run the following command to check Kubernetes Metric Server is running:

    +
    $ kubectl get pods -n kube-system | grep metric
    +

    Make sure the pod has a READY status of 1/1:

    +
    metrics-server-d9694457-mf69d           1/1     Running   0             39s
    +
  8. +
+

Troubleshooting

+

If the Kubernetes Metric Server does not reach the READY 1/1 state, run the following commands:

+
$ kubectl describe pod <metrics-server-pod> -n kube-system
+$ kubectl logs <metrics-server-pod> -n kube-system
+

If you see errors such as:

+
Readiness probe failed: HTTP probe failed with statuscode: 500
+

and:

+
E0907 13:07:50.937308       1 scraper.go:140] "Failed to scrape node" err="Get \"https://100.105.18.113:10250/metrics/resource\": x509: cannot validate certificate for 100.105.18.113 because it doesn't contain any IP SANs" node="worker-node1"
+

then you may need to install a valid cluster certificate for your Kubernetes cluster.

+

For testing purposes, you can resolve this issue by:

+
    +
  1. +

    Delete the Kubernetes Metrics Server by running the following command:

    +
    $ kubectl delete -f $WORKDIR/kubernetes/hpa/components.yaml
    +
  2. +
  3. +

    Edit the $WORKDIR/hpa/components.yaml and locate the args: section. Add kubelet-insecure-tls to the arguments. For example:

    +
    spec:
    +  containers:
    +  - args:
    +    - --cert-dir=/tmp
    +    - --secure-port=4443
    +    - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
    +    - --kubelet-use-node-status-port
    +    - --kubelet-insecure-tls
    +    - --metric-resolution=15s
    +    image: registry.k8s.io/metrics-server/metrics-server:v0.6.4
    + ...
    +
  4. +
  5. +

    Deploy the Kubenetes Metrics Server using the command:

    +
    $ kubectl apply -f components.yaml
    +

    Run the following and make sure the READY status shows 1/1:

    +
    $ kubectl get pods -n kube-system | grep metric
    +

    The output should look similar to the following:

    +
    metrics-server-d9694457-mf69d           1/1     Running   0             40s
    +
  6. +
+

Deploy HPA

+

The steps below show how to configure and run an HPA to scale the accessdomain-oam-cluster, based on the CPU or memory utilization resource metrics.

+

The default OAM deployment creates the cluster accessdomain-oam-cluster which starts one OAM Managed Server (oam_server1). The deployment also creates, but doesn’t start, four extra OAM Managed Servers (oam-server2 to oam-server5).

+

In the following example an HPA resource is created, targeted at the cluster resource accessdomain-oam-cluster. This resource will autoscale OAM Managed Servers from a minimum of 1 cluster member up to 5 cluster members. Scaling up will occur when the average CPU is consistently over 70%. Scaling down will occur when the average CPU is consistently below 70%.

+
    +
  1. +

    Navigate to the $WORKDIR/kubernetes/hpa and create an autoscalehpa.yaml file that contains the following.

    +
    #
    +apiVersion: autoscaling/v2
    +kind: HorizontalPodAutoscaler
    +metadata:
    +  name: accessdomain-oam-cluster-hpa
    +  namespace: oamns
    +spec:
    +  scaleTargetRef:
    +    apiVersion: weblogic.oracle/v1
    +    kind: Cluster
    +    name: accessdomain-oam-cluster
    +  behavior:
    +    scaleDown:
    +      stabilizationWindowSeconds: 60
    +    scaleUp:
    +      stabilizationWindowSeconds: 60
    +  minReplicas: 1
    +  maxReplicas: 5
    +  metrics:
    +  - type: Resource
    +    resource:
    +      name: cpu
    +      target:
    +        type: Utilization
    +        averageUtilization: 70
    +

    Note : minReplicas and maxReplicas should match your current domain settings.

    +

    Note: For setting HPA based on Memory Metrics, update the metrics block with the following content. Please note we recommend using only CPU or Memory, not both.

    +
    metrics:
    +- type: Resource
    +  resource:
    +    name: memory
    +    target:
    +      type: Utilization
    +      averageUtilization: 70
    +
  2. +
  3. +

    Run the following command to create the autoscaler:

    +
    $ kubectl apply -f autoscalehpa.yaml
    +

    The output will look similar to the following:

    +
    horizontalpodautoscaler.autoscaling/accessdomain-oam-cluster-hpa created
    +
  4. +
  5. +

    Verify the status of the autoscaler by running the following:

    +
    $ kubectl get hpa -n oamns
    +

    The output will look similar to the following:

    +
    NAME                           REFERENCE                          TARGETS   MINPODS   MAXPODS   REPLICAS   AGE
    +accessdomain-oam-cluster-hpa   Cluster/accessdomain-oam-cluster   5%/70%    1         5         1          21s
    +

    In the example above, this shows that CPU is currently running at 5% for the accessdomain-oam-cluster-hpa.

    +
  6. +
+

Testing HPA

+
    +
  1. +

    Check the current status of the OAM Managed Servers:

    +
    $ kubectl get pods -n oamns
    +

    The output will look similar to the following:

    +
    NAME                                                     READY   STATUS      RESTARTS        AGE
    +accessdomain-adminserver                                 0/1     Running     0               141m
    +accessdomain-create-oam-infra-domain-job-6br2j           0/1     Completed   0               5h19m
    +accessdomain-oam-policy-mgr1                             0/1     Running     0               138m
    +accessdomain-oam-server1                                 1/1     Running     0               138m
    +helper                                                   1/1     Running     0               21h
    +nginx-ingress-ingress-nginx-controller-5f9bdf4c9-f5trt   1/1     Running     0               4h33m
    +

    In the above, only accessdomain-oam-server1 is running.

    +
  2. +
  3. +

    To test HPA can scale up the WebLogic cluster accessdomain-oam-cluster, run the following commands:

    +
    $ kubectl exec --stdin --tty accessdomain-oam-server1 -n oamns -- /bin/bash
    +

    This will take you inside a bash shell inside the oam_server1 pod:

    +
    [oracle@accessdomain-oam-server1 oracle]$
    +

    Inside the bash shell, run the following command to increase the load on the CPU:

    +
    [oracle@accessdomain-oam-server1 oracle]$ dd if=/dev/zero of=/dev/null
    +

    This command will continue to run in the foreground.

    +
  4. +
  5. +

    In a command window outside the bash shell, run the following command to view the current CPU usage:

    +
    $ kubectl get hpa -n oamns
    +

    The output will look similar to the following:

    +
    NAME                           REFERENCE                          TARGETS   MINPODS   MAXPODS   REPLICAS   AGE
    +accessdomain-oam-cluster-hpa   Cluster/accessdomain-oam-cluster   470%/70%    1         5         1        21s
    +

    In the above example the CPU has increased to 470%. As this is above the 70% limit, the autoscaler increases the replicas on the Cluster resource and the operator responds by starting additional cluster members.

    +
  6. +
  7. +

    Run the following to see if any more OAM Managed Servers are started:

    +
    $ kubectl get pods -n oamns
    +

    The output will look similar to the following:

    +
    NAME                                                     READY   STATUS      RESTARTS        AGE
    +accessdomain-adminserver                                 0/1     Running                     143m
    +accessdomain-create-oam-infra-domain-job-6br2j           0/1     Completed   0               5h21m
    +accessdomain-oam-policy-mgr1                             0/1     Running     0               140m
    +accessdomain-oam-server1                                 1/1     Running     0               140m
    +accessdomain-oam-server2                                 1/1     Running     0               3m20s
    +accessdomain-oam-server3                                 1/1     Running     0               3m20s
    +accessdomain-oam-server4                                 1/1     Running     0               3m19s
    +accessdomain-oam-server5                                 1/1     Running     0               3m5s
    +helper                                                   1/1     Running     0               21h
    +

    In the example above four more OAM Managed Servers have been started (oam-server2 - oam-server5).

    +

    Note: It may take some time for the servers to appear and start. Once the servers are at READY status of 1/1, the servers are started.

    +
  8. +
  9. +

    To stop the load on the CPU, in the bash shell, issue a Control C, and then exit the bash shell:

    +
    [oracle@accessdomain-oam-server1 oracle]$ dd if=/dev/zero of=/dev/null
    +^C
    +[oracle@accessdomain-oam-server1 oracle]$ exit
    +
  10. +
  11. +

    Run the following command to view the current CPU usage:

    +
    $ kubectl get hpa -n oamns
    +

    The output will look similar to the following:

    +
    NAME                           REFERENCE                          TARGETS   MINPODS   MAXPODS   REPLICAS   AGE
    +accessdomain-oam-cluster-hpa   Cluster/accessdomain-oam-cluster   19%/70%   1         5         5          19m
    +

    In the above example CPU has dropped to 19%. As this is below the 70% threshold, you should see the autoscaler scale down the servers:

    +
    $ kubectl get pods -n oamns
    +

    The output will look similar to the following:

    +
    NAME                                                     READY   STATUS        RESTARTS        AGE
    +accessdomain-adminserver                                 1/1     Running       0               152m
    +accessdomain-create-oam-infra-domain-job-6br2j           0/1     Completed     0               5h30m
    +accessdomain-oam-policy-mgr1                             1/1     Running       0               149m
    +accessdomain-oam-server1                                 1/1     Running       0               149m
    +accessdomain-oam-server2                                 1/1     Running       0               14m
    +accessdomain-oam-server3                                 0/1     Terminating   0               14m
    +helper                                                   1/1     Running       0               21h
    +nginx-ingress-ingress-nginx-controller-5f9bdf4c9-f5trt   1/1     Running       0               4h45m
    +

    Eventually, all the servers except oam-server1 will disappear:

    +
    NAME                                                     READY   STATUS      RESTARTS       AGE
    +accessdomain-adminserver                                 1/1     Running     0              154m
    +accessdomain-create-oam-infra-domain-job-6br2j           0/1     Completed   0              5h32m
    +accessdomain-oam-policy-mgr1                             1/1     Running     0              151m
    +accessdomain-oam-server1                                 1/1     Running     0              151m
    +helper                                                   1/1     Running     0              21h
    +nginx-ingress-ingress-nginx-controller-5f9bdf4c9-f5trt   1/1     Running     0              4h47m
    +
  12. +
+

Delete the HPA

+
    +
  1. +

    If you need to delete the HPA, you can do so by running the following command:

    +
    $ cd $WORKDIR/kubernetes/hpa
    +$ kubectl delete -f autoscalehpa.yaml
    +
  2. +
+

Other considerations

+
    +
  • If HPA is deployed and you need to upgrade the OAM image, then you must delete the HPA before upgrading. Once the upgrade is successful you can deploy HPA again.
  • +
  • If you choose to start/stop an OAM Managed Server manually as per Domain Life Cycle, then it is recommended to delete the HPA before doing so.
  • +
+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oam/manage-oam-domains/index.html b/docs/23.4.1/idm-products/oam/manage-oam-domains/index.html new file mode 100644 index 000000000..571628822 --- /dev/null +++ b/docs/23.4.1/idm-products/oam/manage-oam-domains/index.html @@ -0,0 +1,4096 @@ + + + + + + + + + + + + Manage OAM Domains :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + Manage OAM Domains +

+ + + + + + + +

Important considerations for Oracle Access Management domains in Kubernetes.

+ + + + + + + + + + + + + + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oam/manage-oam-domains/index.xml b/docs/23.4.1/idm-products/oam/manage-oam-domains/index.xml new file mode 100644 index 000000000..40e25d527 --- /dev/null +++ b/docs/23.4.1/idm-products/oam/manage-oam-domains/index.xml @@ -0,0 +1,82 @@ + + + + Manage OAM Domains on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oam/manage-oam-domains/ + Recent content in Manage OAM Domains on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + a. Domain Life Cycle + /fmw-kubernetes/23.4.1/idm-products/oam/manage-oam-domains/domain-lifecycle/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oam/manage-oam-domains/domain-lifecycle/ + View existing OAM servers Starting/Scaling up OAM Managed servers Stopping/Scaling down OAM Managed servers Starting/Scaling up OAM Policy Managed servers Stopping/Scaling down OAM Policy Managed servers Stopping and starting the Administration Server and Managed Servers Domain lifecycle sample scripts As OAM domains use the WebLogic Kubernetes Operator, domain lifecyle operations are managed using the WebLogic Kubernetes Operator itself. +This document shows the basic operations for starting, stopping and scaling servers in the OAM domain. + + + + b. WLST Administration Operations + /fmw-kubernetes/23.4.1/idm-products/oam/manage-oam-domains/wlst-admin-operations/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oam/manage-oam-domains/wlst-admin-operations/ + To use WLST to administer the OAM domain, use the helper pod in the same Kubernetes cluster as the OAM Domain. + Check to see if the helper pod exists by running: +$ kubectl get pods -n &lt;domain_namespace&gt; | grep helper For example: +$ kubectl get pods -n oamns | grep helper The output should look similar to the following: +helper 1/1 Running 0 26h If the helper pod doesn&rsquo;t exist then see Step 1 in Prepare your environment to create it. + + + + c. Logging and Visualization + /fmw-kubernetes/23.4.1/idm-products/oam/manage-oam-domains/logging-and-visualization/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oam/manage-oam-domains/logging-and-visualization/ + After the OAM domain is set up you can publish operator and WebLogic Server logs into Elasticsearch and interact with them in Kibana. +Install Elasticsearch stack and Kibana If you do not already have a centralized Elasticsearch (ELK) stack then you must configure this first. For details on how to configure the ELK stack, follow Installing Elasticsearch (ELK) Stack and Kibana +Create the logstash pod Variables used in this chapter In order to create the logstash pod, you must create several files. + + + + d. Monitoring an OAM domain + /fmw-kubernetes/23.4.1/idm-products/oam/manage-oam-domains/monitoring-oam-domains/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oam/manage-oam-domains/monitoring-oam-domains/ + After the OAM domain is set up you can monitor the OAM instance using Prometheus and Grafana. See Monitoring a domain. +The WebLogic Monitoring Exporter uses the WLS RESTful Management API to scrape runtime information and then exports Prometheus-compatible metrics. It is deployed as a web application in a WebLogic Server (WLS) instance, version 12.2.1 or later, typically, in the instance from which you want to get metrics. +There are two ways to setup monitoring and you should choose one method or the other: + + + + e. Kubernetes Horizontal Pod Autoscaler + /fmw-kubernetes/23.4.1/idm-products/oam/manage-oam-domains/hpa/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oam/manage-oam-domains/hpa/ + Prerequisite configuration Deploy the Kubernetes Metrics Server Troubleshooting Deploy HPA Testing HPA Delete the HPA Other considerations Kubernetes Horizontal Pod Autoscaler (HPA) is supported from Weblogic Kubernetes Operator 4.0.X and later. +HPA allows automatic scaling (up and down) of the OAM Managed Servers. If load increases then extra OAM Managed Servers will be started as required, up to the value configuredManagedServerCount defined when the domain was created (see Prepare the create domain script). + + + + f. Delete the OAM domain home + /fmw-kubernetes/23.4.1/idm-products/oam/manage-oam-domains/delete-domain-home/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oam/manage-oam-domains/delete-domain-home/ + Sometimes in production, but most likely in testing environments, you might want to remove the domain home that is generated using the create-domain.sh script. + Run the following command to delete the domain: +$ cd $WORKDIR/kubernetes/delete-domain $ ./delete-weblogic-domain-resources.sh -d &lt;domain_uid&gt; For example: +$ cd $WORKDIR/kubernetes/delete-domain $ ./delete-weblogic-domain-resources.sh -d accessdomain Drop the RCU schemas as follows: +$ kubectl exec -it helper -n &lt;domain_namespace&gt; -- /bin/bash [oracle@helper ~]$ [oracle@helper ~]$ export CONNECTION_STRING=&lt;db_host. + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oam/manage-oam-domains/logging-and-visualization/index.html b/docs/23.4.1/idm-products/oam/manage-oam-domains/logging-and-visualization/index.html new file mode 100644 index 000000000..400c0fedc --- /dev/null +++ b/docs/23.4.1/idm-products/oam/manage-oam-domains/logging-and-visualization/index.html @@ -0,0 +1,4467 @@ + + + + + + + + + + + + c. Logging and Visualization :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + c. Logging and Visualization +

+ + + + + + +

After the OAM domain is set up you can publish operator and WebLogic Server logs into Elasticsearch and interact with them in Kibana.

+

Install Elasticsearch stack and Kibana

+

If you do not already have a centralized Elasticsearch (ELK) stack then you must configure this first. For details on how to configure the ELK stack, follow +Installing Elasticsearch (ELK) Stack and Kibana

+

Create the logstash pod

+

Variables used in this chapter

+

In order to create the logstash pod, you must create several files. These files contain variables which you must substitute with variables applicable to your environment.

+

Most of the values for the variables will be based on your ELK deployment as per Installing Elasticsearch (ELK) Stack and Kibana.

+

The table below outlines the variables and values you must set:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
VariableSample ValueDescription
<ELK_VER>8.3.1The version of logstash you want to install.
<ELK_SSL>trueIf SSL is enabled for ELK set the value to true, or if NON-SSL set to false. This value must be lowercase.
<ELK_HOSTS>https://elasticsearch.example.com:9200The URL for sending logs to Elasticsearch. HTTP if NON-SSL is used.
<ELKNS>oamnsThe domain namespace.
<ELK_USER>logstash_internalThe name of the user for logstash to access Elasticsearch.
<ELK_PASSWORD>passwordThe password for ELK_USER.
<ELK_APIKEY>apikeyThe API key details.
+

You will also need the BASE64 version of the Certificate Authority (CA) certificate(s) that signed the certificate of the Elasticsearch server. If using a self-signed certificate, this is the self signed certificate of the Elasticsearch server. See Copying the Elasticsearch Certificate for details on how to get the correct certificate. In the example below the certificate is called elk.crt.

+

Create Kubernetes secrets

+
    +
  1. +

    Create a Kubernetes secret for Elasticsearch using the API Key or Password.

    +

    a) If ELK uses an API Key for authentication:

    +
    $ kubectl create secret generic elasticsearch-pw-elastic -n <domain_namespace> --from-literal password=<ELK_APIKEY>
    +

    For example:

    +
    $ kubectl create secret generic elasticsearch-pw-elastic -n oamns --from-literal password=<ELK_APIKEY>
    +

    The output will look similar to the following:

    +
    secret/elasticsearch-pw-elastic created
    +

    b) If ELK uses a password for authentication:

    +
    $ kubectl create secret generic elasticsearch-pw-elastic -n <domain_namespace> --from-literal password=<ELK_PASSWORD>
    +

    For example:

    +
    $ kubectl create secret generic elasticsearch-pw-elastic -n oamns --from-literal password=<ELK_PASSWORD>
    +

    The output will look similar to the following:

    +
    secret/elasticsearch-pw-elastic created
    +

    Note: It is recommended that the ELK Stack is created with authentication enabled. If no authentication is enabled you may create a secret using the values above.

    +
  2. +
  3. +

    Create a Kubernetes secret to access the required images on hub.docker.com:

    +

    Note: Before executing the command below, you must first have a user account on hub.docker.com.

    +
    kubectl create secret docker-registry "dockercred" --docker-server="https://index.docker.io/v1/" \
    +--docker-username="<DOCKER_USER_NAME>" \
    +--docker-password=<DOCKER_PASSWORD> --docker-email=<DOCKER_EMAIL_ID> \
    +--namespace=<domain_namespace>
    +

    For example,

    +
    kubectl create secret docker-registry "dockercred" --docker-server="https://index.docker.io/v1/" \
    +--docker-username="user@example.com" \
    +--docker-password=password --docker-email=user@example.com \
    +--namespace=oamns
    +

    The output will look similar to the following:

    +
    secret/dockercred created
    +
  4. +
+

Find the mountPath details

+
    +
  1. +

    Run the following command to get the mountPath of your domain:

    +
    $ kubectl describe domains <domain_uid> -n <domain_namespace> | grep "Mount Path"
    +

    For example:

    +
    $ kubectl describe domains accessdomain -n oamns | grep "Mount Path"
    +

    The output will look similar to the following:

    +
    Mount Path:  /u01/oracle/user_projects/domains
    +
  2. +
+

Find the persistentVolumeClaim details

+
    +
  1. +

    Run the following command to get the OAM domain persistence volume details:

    +
    $ kubectl get pv -n <domain_namespace>
    +

    For example:

    +
    $ kubectl get pv -n oamns
    +

    The output will look similar to the following:

    +
    NAME                     CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS  CLAIM                           STORAGECLASS                         REASON   AGE
    +accessdomain-domain-pv   10Gi       RWX            Retain           Bound   oamns/accessdomain-domain-pvc   accessdomain-domain-storage-class           23h
    +

    Make note of the CLAIM value, for example in this case accessdomain-domain-pvc.

    +
  2. +
+

Create the Configmap

+
    +
  1. +

    Copy the elk.crt file to the $WORKDIR/kubernetes/elasticsearch-and-kibana directory.

    +
  2. +
  3. +

    Navigate to the $WORKDIR/kubernetes/elasticsearch-and-kibana directory and run the following:

    +
    kubectl create configmap elk-cert --from-file=elk.crt -n <namespace>
    +

    For example:

    +
    kubectl create configmap elk-cert --from-file=elk.crt -n oamns
    +

    The output will look similar to the following:

    +
    configmap/elk-cert created
    +
  4. +
  5. +

    Create a logstash_cm.yaml file in the $WORKDIR/kubernetes/elasticsearch-and-kibana directory as follows:

    +
    apiVersion: v1
    +kind: ConfigMap
    +metadata:
    +  name: oam-logstash-configmap
    +  namespace: <ELKNS>
    +data:
    +  logstash.yml: |
    +  #http.host: "0.0.0.0"
    +  logstash-config.conf: |
    +    input {
    +     file {
    +        path => "/u01/oracle/user_projects/domains/logs/accessdomain/AdminServer*.log"
    +        tags => "Adminserver_log"
    +        start_position => beginning
    +      }
    +      file {
    +        path => "/u01/oracle/user_projects/domains/logs/accessdomain/oam_policy_mgr*.log"
    +        tags => "Policymanager_log"
    +        start_position => beginning
    +      }
    +      file {
    +        path => "/u01/oracle/user_projects/domains/logs/accessdomain/oam_server*.log"
    +        tags => "Oamserver_log"
    +        start_position => beginning
    +      }
    +      file {
    +        path => "/u01/oracle/user_projects/domains/accessdomain/servers/AdminServer/logs/AdminServer-diagnostic.log"
    +        tags => "Adminserver_diagnostic"
    +        start_position => beginning
    +      }
    +      file {
    +        path => "/u01/oracle/user_projects/domains/accessdomain/servers/**/logs/oam_policy_mgr*-diagnostic.log"
    +        tags => "Policy_diagnostic"
    +        start_position => beginning
    +      }
    +      file {
    +      path => "/u01/oracle/user_projects/domains/accessdomain/servers/AdminServer/logs/auditlogs/OAM/audit.log"
    +      tags => "Audit_logs"
    +      start_position => beginning
    +      }
    +    }
    +    filter {
    +      grok {
    +        match => [ "message", "<%{DATA:log_timestamp}> <%{WORD:log_level}> <%{WORD:thread}> <%{HOSTNAME:hostname}> <%{HOSTNAME:servername}> <%{DATA:timer}> <<%{DATA:kernel}>> <> <%{DATA:uuid}> <%{NUMBER:timestamp}> <%{DATA:misc}> <%{DATA:log_number}> <%{DATA:log_message}>" ]
    +      }
    +    if "_grokparsefailure" in [tags] {
    +        mutate {
    +            remove_tag => [ "_grokparsefailure" ]
    +        }
    +    }
    +    }
    +    output {
    +      elasticsearch {
    +    hosts => ["<ELK_HOSTS>"]
    +    cacert => '/usr/share/logstash/config/certs/elk.crt'
    +    index => "oamlogs-000001"
    +    ssl => true
    +    ssl_certificate_verification => false
    +    user => "<ELK_USER>"
    +    password => "${ELASTICSEARCH_PASSWORD}"
    +    api_key => "${ELASTICSEARCH_PASSWORD}"
    +      }
    +    }
    +

    Change the values in the above file as follows:

    +
      +
    • Change the <ELKNS>, <ELK_HOSTS>, <ELK_SSL>, and <ELK_USER> to match the values for your environment.
    • +
    • Change /u01/oracle/user_projects/domains to match the mountPath returned earlier
    • +
    • If your domainUID is anything other than accessdomain, change each instance of accessdomain to your domainUID.
    • +
    • If using API KEY for your ELK authentication, delete the user and password lines.
    • +
    • If using a password for ELK authentication, delete the api_key line.
    • +
    • If no authentication is used for ELK, delete the user, password, and api_key lines.
    • +
    +

    For example:

    +
    apiVersion: v1
    +kind: ConfigMap
    +metadata:
    +  name: oam-logstash-configmap
    +  namespace: oamns
    +data:
    +  logstash.yml: |
    +  #http.host: "0.0.0.0"
    +  logstash-config.conf: |
    +    input {
    +     file {
    +        path => "/u01/oracle/user_projects/domains/logs/accessdomain/AdminServer*.log"
    +        tags => "Adminserver_log"
    +        start_position => beginning
    +      }
    +      file {
    +        path => "/u01/oracle/user_projects/domains/logs/accessdomain/oam_policy_mgr*.log"
    +        tags => "Policymanager_log"
    +        start_position => beginning
    +      }
    +      file {
    +        path => "/u01/oracle/user_projects/domains/logs/accessdomain/oam_server*.log"
    +        tags => "Oamserver_log"
    +        start_position => beginning
    +      }
    +      file {
    +        path => "/u01/oracle/user_projects/domains/accessdomain/servers/AdminServer/logs/AdminServer-diagnostic.log"
    +        tags => "Adminserver_diagnostic"
    +        start_position => beginning
    +      }
    +      file {
    +        path => "/u01/oracle/user_projects/domains/accessdomain/servers/**/logs/oam_policy_mgr*-diagnostic.log"
    +        tags => "Policy_diagnostic"
    +        start_position => beginning
    +      }
    +      file {
    +      path => "/u01/oracle/user_projects/domains/accessdomain/servers/AdminServer/logs/auditlogs/OAM/audit.log"
    +      tags => "Audit_logs"
    +      start_position => beginning
    +      }
    +    }
    +    filter {
    +      grok {
    +        match => [ "message", "<%{DATA:log_timestamp}> <%{WORD:log_level}> <%{WORD:thread}> <%{HOSTNAME:hostname}> <%{HOSTNAME:servername}> <%{DATA:timer}> <<%{DATA:kernel}>> <> <%{DATA:uuid}> <%{NUMBER:timestamp}> <%{DATA:misc}> <%{DATA:log_number}> <%{DATA:log_message}>" ]
    +      }
    +    if "_grokparsefailure" in [tags] {
    +        mutate {
    +            remove_tag => [ "_grokparsefailure" ]
    +        }
    +    }
    +    }
    +    output {
    +      elasticsearch {
    +    hosts => ["https://elasticsearch.example.com:9200"]
    +    cacert => '/usr/share/logstash/config/certs/elk.crt'
    +    index => "oamlogs-000001"
    +    ssl => true
    +    ssl_certificate_verification => false
    +    user => "logstash_internal"
    +    password => "${ELASTICSEARCH_PASSWORD}"
    +      }
    +    }
    +
  6. +
  7. +

    Run the following command to create the configmap:

    +
    $  kubectl apply -f logstash_cm.yaml
    +

    The output will look similar to the following:

    +
    configmap/oam-logstash-configmap created
    +
  8. +
+

Deploy the logstash pod

+
    +
  1. +

    Navigate to the $WORKDIR/kubernetes/elasticsearch-and-kibana directory and create a logstash.yaml file as follows:

    +
    apiVersion: apps/v1
    +kind: Deployment
    +metadata:
    +  name: oam-logstash
    +  namespace: <ELKNS>
    +spec:
    +  selector:
    +    matchLabels:
    +      k8s-app: logstash
    +  template: # create pods using pod definition in this template
    +    metadata:
    +     labels:
    +        k8s-app: logstash
    +    spec:
    +      imagePullSecrets:
    +      - name: dockercred
    +      containers:
    +      - command:
    +        - logstash
    +        image: logstash:<ELK_VER>
    +        imagePullPolicy: IfNotPresent
    +        name: oam-logstash
    +        env:
    +        - name: ELASTICSEARCH_PASSWORD
    +          valueFrom:
    +            secretKeyRef:
    +              name: elasticsearch-pw-elastic
    +              key: password
    +        resources:
    +        ports:
    +        - containerPort: 5044
    +          name: logstash
    +        volumeMounts:
    +        - mountPath: /u01/oracle/user_projects
    +          name: weblogic-domain-storage-volume
    +        - name: shared-logs
    +          mountPath: /shared-logs
    +        - mountPath: /usr/share/logstash/pipeline/
    +          name: oam-logstash-pipeline
    +        - mountPath: /usr/share/logstash/config/logstash.yml
    +          subPath: logstash.yml
    +          name: config-volume
    +        - mountPath: /usr/share/logstash/config/certs
    +          name: elk-cert
    +      volumes:
    +      - configMap:
    +          defaultMode: 420
    +          items:
    +          - key: elk.crt
    +            path: elk.crt
    +          name: elk-cert
    +        name: elk-cert
    +      - configMap:
    +          defaultMode: 420
    +          items:
    +          - key: logstash-config.conf
    +            path: logstash-config.conf
    +          name: oam-logstash-configmap
    +        name: oam-logstash-pipeline
    +      - configMap:
    +          defaultMode: 420
    +          items:
    +          - key: logstash.yml
    +            path: logstash.yml
    +          name: oam-logstash-configmap
    +        name: config-volume
    +      - name: weblogic-domain-storage-volume
    +        persistentVolumeClaim:
    +          claimName: accessdomain-domain-pvc
    +      - name: shared-logs
    +        emptyDir: {}
    +
      +
    • Change the <ELKNS>, <ELK_VER> to match the values for your environment.
    • +
    • Change /u01/oracle/user_projects/domains to match the mountPath returned earlier
    • +
    • Change the claimName value to match the claimName returned earlier
    • +
    • If your Kubernetes environment does not allow access to the internet to pull the logstash image, you must load the logstash image in your own container registry and change image: logstash:<ELK_VER> to the location of the image in your container registry e.g: container-registry.example.com/logstash:8.3.1
    • +
    +

    For example:

    +
    apiVersion: apps/v1
    +kind: Deployment
    +metadata:
    +  name: oam-logstash
    +  namespace: oamns
    +spec:
    +  selector:
    +    matchLabels:
    +      k8s-app: logstash
    +  template: # create pods using pod definition in this template
    +    metadata:
    +     labels:
    +        k8s-app: logstash
    +    spec:
    +      imagePullSecrets:
    +      - name: dockercred
    +      containers:
    +      - command:
    +        - logstash
    +        image: logstash:8.3.1
    +        imagePullPolicy: IfNotPresent
    +        name: oam-logstash
    +        env:
    +        - name: ELASTICSEARCH_PASSWORD
    +          valueFrom:
    +            secretKeyRef:
    +              name: elasticsearch-pw-elastic
    +              key: password
    +        resources:
    +        ports:
    +        - containerPort: 5044
    +          name: logstash
    +        volumeMounts:
    +        - mountPath: /u01/oracle/user_projects/domains
    +          name: weblogic-domain-storage-volume
    +        - name: shared-logs
    +          mountPath: /shared-logs
    +        - mountPath: /usr/share/logstash/pipeline/
    +          name: oam-logstash-pipeline
    +        - mountPath: /usr/share/logstash/config/logstash.yml
    +          subPath: logstash.yml
    +          name: config-volume
    +        - mountPath: /usr/share/logstash/config/certs
    +          name: elk-cert
    +      volumes:
    +      - configMap:
    +          defaultMode: 420
    +          items:
    +          - key: elk.crt
    +            path: elk.crt
    +          name: elk-cert
    +        name: elk-cert
    +      - configMap:
    +          defaultMode: 420
    +          items:
    +          - key: logstash-config.conf
    +            path: logstash-config.conf
    +          name: oam-logstash-configmap
    +        name: oam-logstash-pipeline
    +      - configMap:
    +          defaultMode: 420
    +          items:
    +          - key: logstash.yml
    +            path: logstash.yml
    +          name: oam-logstash-configmap
    +        name: config-volume
    +      - name: weblogic-domain-storage-volume
    +        persistentVolumeClaim:
    +          claimName: accessdomain-domain-pvc
    +      - name: shared-logs
    +        emptyDir: {}
    +
  2. +
  3. +

    Deploy the logstash pod by executing the following command:

    +
    $ kubectl create -f $WORKDIR/kubernetes/elasticsearch-and-kibana/logstash.yaml 
    +

    The output will look similar to the following:

    +
    deployment.apps/oam-logstash created
    +
  4. +
  5. +

    Run the following command to check the logstash pod is created correctly:

    +
    $ kubectl get pods -n <namespace>
    +

    For example:

    +
    $ kubectl get pods -n oamns
    +

    The output should look similar to the following:

    +
    NAME                                            READY   STATUS      RESTARTS   AGE
    +accessdomain-adminserver                                 1/1     Running     0          18h
    +accessdomain-create-oam-infra-domain-job-7c9r9           0/1     Completed   0          23h
    +accessdomain-oam-policy-mgr1                             1/1     Running     0          18h
    +accessdomain-oam-policy-mgr2                             1/1     Running     0          18h
    +accessdomain-oam-server1                                 1/1     Running     1          18h
    +accessdomain-oam-server2                                 1/1     Running     1          18h
    +elasticsearch-f7b7c4c4-tb4pp                             1/1     Running     0          5m
    +helper                                                   1/1     Running     0          23h
    +nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq   1/1     Running     0          20h
    +oam-logstash-bbbdf5876-85nkd                             1/1     Running     0          4m23s
    +

    Note: Wait a couple of minutes to make sure the pod has not had any failures or restarts. If the pod fails you can view the pod log using:

    +
    $ kubectl logs -f oam-logstash-<pod> -n oamns
    +

    Most errors occur due to misconfiguration of the logstash_cm.yaml or logstash.yaml. This is usually because of an incorrect value set, or the certificate was not pasted with the correct indentation.

    +

    If the pod has errors, delete the pod and configmap as follows:

    +
    $ kubectl delete -f $WORKDIR/kubernetes/elasticsearch-and-kibana/logstash.yaml
    +$ kubectl delete -f $WORKDIR/kubernetes/elasticsearch-and-kibana/logstash_cm.yaml
    +

    Once you have resolved the issue in the yaml files, run the commands outlined earlier to recreate the configmap and logstash pod.

    +
  6. +
+

Verify and access the Kibana console

+

To access the Kibana console you will need the Kibana URL as per Installing Elasticsearch (ELK) Stack and Kibana.

+

For Kibana 7.7.x and below:

+
    +
  1. +

    Access the Kibana console with http://<hostname>:<port>/app/kibana and login with your username and password.

    +
  2. +
  3. +

    From the Navigation menu, navigate to Management > Kibana > Index Patterns.

    +
  4. +
  5. +

    In the Create Index Pattern page enter oamlogs* for the Index pattern and click Next Step.

    +
  6. +
  7. +

    In the Configure settings page, from the Time Filter field name drop down menu select @timestamp and click Create index pattern.

    +
  8. +
  9. +

    Once the index pattern is created click on Discover in the navigation menu to view the OAM logs.

    +
  10. +
+

For Kibana version 7.8.X and above:

+
    +
  1. +

    Access the Kibana console with http://<hostname>:<port>/app/kibana and login with your username and password.

    +
  2. +
  3. +

    From the Navigation menu, navigate to Management > Stack Management.

    +
  4. +
  5. +

    Click Data Views in the Kibana section.

    +
  6. +
  7. +

    Click Create Data View and enter the following information:

    +
      +
    • Name: oamlogs*
    • +
    • Timestamp: @timestamp
    • +
    +
  8. +
  9. +

    Click Create Data View.

    +
  10. +
  11. +

    From the Navigation menu, click Discover to view the log file entries.

    +
  12. +
  13. +

    From the drop down menu, select oamlogs* to view the log file entries.

    +
  14. +
+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oam/manage-oam-domains/monitoring-oam-domains/index.html b/docs/23.4.1/idm-products/oam/manage-oam-domains/monitoring-oam-domains/index.html new file mode 100644 index 000000000..93c6bc70d --- /dev/null +++ b/docs/23.4.1/idm-products/oam/manage-oam-domains/monitoring-oam-domains/index.html @@ -0,0 +1,4681 @@ + + + + + + + + + + + + d. Monitoring an OAM domain :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + d. Monitoring an OAM domain +

+ + + + + + +

After the OAM domain is set up you can monitor the OAM instance using Prometheus and Grafana. See Monitoring a domain.

+

The WebLogic Monitoring Exporter uses the WLS RESTful Management API to scrape runtime information and then exports Prometheus-compatible metrics. It is deployed as a web application in a WebLogic Server (WLS) instance, version 12.2.1 or later, typically, in the instance from which you want to get metrics.

+

There are two ways to setup monitoring and you should choose one method or the other:

+
    +
  1. Setup automatically using setup-monitoring.sh
  2. +
  3. Setup using manual configuration
  4. +
+

Setup automatically using setup-monitoring.sh

+

The $WORKDIR/kubernetes/monitoring-service/setup-monitoring.sh sets up the monitoring for the OAM domain. It installs Prometheus, Grafana, WebLogic Monitoring Exporter and deploys the web applications to the OAM domain. It also deploys the WebLogic Server Grafana dashboard.

+

For usage details execute ./setup-monitoring.sh -h.

+
    +
  1. +

    Edit the $WORKDIR/kubernetes/monitoring-service/monitoring-inputs.yaml and change the domainUID, domainNamespace, and weblogicCredentialsSecretName to correspond to your deployment. Also change wlsMonitoringExporterTooamCluster, wlsMonitoringExporterTopolicyCluster, exposeMonitoringNodePort to true. For example:

    +
    version: create-accessdomain-monitoring-inputs-v1
    +
    +# Unique ID identifying your domain.
    +# This ID must not contain an underscope ("_"), and must be lowercase and unique across all domains in a Kubernetes cluster.
    +domainUID: accessdomain
    +
    +# Name of the domain namespace
    +domainNamespace: oamns
    +
    +# Boolean value indicating whether to install kube-prometheus-stack
    +setupKubePrometheusStack: true
    +
    +# Additional parameters for helm install kube-prometheus-stack
    +# Refer https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/values.yaml for additional parameters
    +# Sample :
    +# additionalParamForKubePrometheusStack: --set nodeExporter.enabled=false --set prometheusOperator.tls.enabled=false --set prometheusOperator.admissionWebhooks.enabled=false
    +additionalParamForKubePrometheusStack:
    +
    +# Name of the monitoring namespace
    +monitoringNamespace: monitoring
    +
    +# Name of the Admin Server
    +adminServerName: AdminServer
    +#
    +# Port number for admin server
    +adminServerPort: 7001
    +
    +# Cluster name
    +oamClusterName: oam_cluster
    +
    +# Port number for managed server
    +oamManagedServerPort: 14100
    +
    +# WebLogic Monitoring Exporter to Cluster
    +wlsMonitoringExporterTooamCluster: true
    +
    +# Cluster name
    +policyClusterName: policy_cluster
    +
    +# Port number for managed server
    +policyManagedServerPort: 15100
    +
    +# WebLogic Monitoring Exporter to Cluster
    +wlsMonitoringExporterTopolicyCluster: true
    +
    +
    +# Boolean to indicate if the adminNodePort will be exposed
    +exposeMonitoringNodePort: true
    +
    +# NodePort to expose Prometheus
    +prometheusNodePort: 32101
    +
    +# NodePort to expose Grafana
    +grafanaNodePort: 32100
    +
    +# NodePort to expose Alertmanager
    +alertmanagerNodePort: 32102
    +
    +# Name of the Kubernetes secret for the Admin Server's username and password
    +weblogicCredentialsSecretName: accessdomain-credentials
    +

    Note: If your cluster does not have access to the internet to pull external images, such as grafana or prometheus, you must load the images in a local container registry. You must then set additionalParamForKubePrometheusStack to set the location of the image in your local container registry, for example:

    +
    # Additional parameters for helm install kube-prometheus-stack
    +# Refer https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/values.yaml for additional parameters
    +# Sample :
    +# additionalParamForKubePrometheusStack: --set nodeExporter.enabled=false --set prometheusOperator.tls.enabled=false --set prometheusOperator.admissionWebhooks.enabled=false
    +additionalParamForKubePrometheusStack: --set grafana.image.repository=container-registry.example.com/grafana --set grafana.image.tag=8.3.4
    +
  2. +
  3. +

    Run the following command to setup monitoring.

    +
    $ cd $WORKDIR/kubernetes/monitoring-service
    +$ ./setup-monitoring.sh -i monitoring-inputs.yaml
    +

    The output should be similar to the following:

    +
    Monitoring setup in  monitoring in progress
    +
    +node/worker-node1 not labeled
    +node/worker-node2 not labeled
    +node/master-node not labeled
    +Setup prometheus-community/kube-prometheus-stack started
    +"prometheus-community" has been added to your repositories
    +Hang tight while we grab the latest from your chart repositories...
    +   ...Successfully got an update from the "stable" chart repository
    +   ...Successfully got an update from the "prometheus" chart repository
    +   ...Successfully got an update from the "prometheus-community" chart repository
    +   ...Successfully got an update from the "appscode" chart repository
    +Update Complete. ⎈ Happy Helming!⎈ 
    +Setup prometheus-community/kube-prometheus-stack in progress
    +W0320 <DATE>   19121 warnings.go:70] policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+
    +W0320 <DATE>   19121 warnings.go:70] policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+
    +W0320 <DATE>   19121 warnings.go:70] policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+
    +..
    +W0320 <DATE>   19121 warnings.go:70] policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+
    +NAME: monitoring
    +LAST DEPLOYED: <DATE>
    +NAMESPACE: monitoring
    +STATUS: deployed
    +REVISION: 1
    +NOTES:
    +kube-prometheus-stack has been installed. Check its status by running:
    +  kubectl --namespace monitoring get pods -l "release=monitoring"
    +
    +Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create & configure Alertmanager and Prometheus instances using the Operator.
    +Setup prometheus-community/kube-prometheus-stack completed
    +Deploy WebLogic Monitoring Exporter started
    +Deploying WebLogic Monitoring Exporter with domainNamespace[oamns], domainUID[accessdomain], adminServerPodName[accessdomain-adminserver]
    +  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
    +                                 Dload  Upload   Total   Spent    Left  Speed
    +100   655  100   655    0     0   1564      0 --:--:-- --:--:-- --:--:--  1566
    +100 2196k  100 2196k    0     0  2025k      0  0:00:01  0:00:01 --:--:-- 5951k
    +created $WORKDIR/kubernetes/monitoring-service/scripts/wls-exporter-deploy dir
    +created /tmp/ci-EHhB7bP847
    +/tmp/ci-EHhB7bP847 $WORKDIR/kubernetes/monitoring-service
    +in temp dir
    +  adding: WEB-INF/weblogic.xml (deflated 61%)
    +  adding: config.yml (deflated 60%)
    +$WORKDIR/kubernetes/monitoring-service
    +created /tmp/ci-e7wPrlLlud
    +14:26
    +/tmp/ci-e7wPrlLlud $WORKDIR/kubernetes/monitoring-service
    +in temp dir
    +  adding: WEB-INF/weblogic.xml (deflated 61%)
    +  adding: config.yml (deflated 60%)
    +$WORKDIR/kubernetes/monitoring-service
    +created /tmp/ci-U38XXs6d06
    +/tmp/ci-U38XXs6d06 $WORKDIR/kubernetes/monitoring-service
    +in temp dir
    +  adding: WEB-INF/weblogic.xml (deflated 61%)
    +  adding: config.yml (deflated 60%)
    +$WORKDIR/kubernetes/monitoring-service
    +
    +Initializing WebLogic Scripting Tool (WLST) ...
    +
    +Welcome to WebLogic Server Administration Scripting Shell
    +
    +Type help() for help on available commands
    +
    +Connecting to t3://accessdomain-adminserver:7001 with userid weblogic ...
    +Successfully connected to Admin Server "AdminServer" that belongs to domain "accessdomain".
    +
    +Warning: An insecure protocol was used to connect to the server. 
    +To ensure on-the-wire security, the SSL port or Admin port should be used instead.
    +
    +Deploying .........
    +Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war to targets AdminServer (upload=true) ...
    +<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating deploy operation for application, wls-exporter-adminserver [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war], to AdminServer .> 
    +.Completed the deployment of Application with status completed
    +Current Status of your Deployment:
    +Deployment command type: deploy
    +Deployment State : completed
    +Deployment Message : no message
    +Starting application wls-exporter-adminserver.
    +<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating start operation for application, wls-exporter-adminserver [archive: null], to AdminServer .> 
    +Completed the start of Application with status completed
    +Current Status of your Deployment:
    +Deployment command type: start
    +Deployment State : completed
    +14:27
    +Deployment command type: start
    +Deployment State : completed
    +Deployment Message : no message
    +Deploying .........
    +Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-oam.war to targets oam_cluster (upload=true) ...
    +<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating deploy operation for application, wls-exporter-oam [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-oam.war], to oam_cluster .> 
    +.Completed the deployment of Application with status completed
    +Current Status of your Deployment:
    +Deployment command type: deploy
    +Deployment State : completed
    +Deployment Message : no message
    +Starting application wls-exporter-oam.
    +<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating start operation for application, wls-exporter-oam [archive: null], to oam_cluster .> 
    +.Completed the start of Application with status completed
    +Current Status of your Deployment:
    +Deployment command type: start
    +Deployment State : completed
    +Deployment Message : no message
    +Deploying .........
    +Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-policy.war to targets policy_cluster (upload=true) ...
    +<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating deploy operation for application, wls-exporter-policy [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-policy.war], to policy_cluster .> 
    +.Completed the deployment of Application with status completed
    +Current Status of your Deployment:
    +Deployment command type: deploy
    +Deployment State : completed
    +Deployment Message : no message
    +Starting application wls-exporter-policy.
    +<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating start operation for application, wls-exporter-policy [archive: null], to policy_cluster .> 
    +.Completed the start of Application with status completed
    +Current Status of your Deployment:
    +Deployment command type: start
    +Deployment State : completed
    +Deployment Message : no message
    +Disconnected from weblogic server: AdminServer
    + 
    +
    +Exiting WebLogic Scripting Tool.
    +
    +<DATE> <Warning> <JNDI> <BEA-050001> <WLContext.close() was called in a different thread than the one in which it was created.> 
    +14:27
    +Deploy WebLogic Monitoring Exporter completed
    +secret/basic-auth created
    +servicemonitor.monitoring.coreos.com/wls-exporter created
    +Deploying WebLogic Server Grafana Dashboard....
    +{"id":25,"slug":"weblogic-server-dashboard","status":"success","uid":"5yUwzbZWz","url":"/d/5yUwzbZWz/weblogic-server-dashboard","version":1}
    +Deployed WebLogic Server Grafana Dashboard successfully
    +
    +Grafana is available at NodePort: 32100
    +Prometheus is available at NodePort: 32101
    +Altermanager is available at NodePort: 32102
    +==============================================================
    +
  4. +
+

Prometheus service discovery

+

After the ServiceMonitor is deployed, the wls-exporter should be discovered by Prometheus and be able to collect metrics.

+
    +
  1. +

    Access the following URL to view Prometheus service discovery: http://${MASTERNODE-HOSTNAME}:32101/service-discovery

    +
  2. +
  3. +

    Click on serviceMonitor/oamns/wls-exporter/0 and then show more. Verify all the targets are mentioned.

    +
  4. +
+

Note : It may take several minutes for serviceMonitor/oamns/wls-exporter/0 to appear, so refresh the page until it does.

+

Grafana dashboard

+
    +
  1. +

    Access the Grafana dashboard with the following URL: http://${MASTERNODE-HOSTNAME}:32100 and login with admin/admin. Change your password when prompted.

    +
  2. +
  3. +

    In the Dashboards panel, click on WebLogic Server Dashboard. The dashboard for your OAM domain should be displayed. If it is not displayed, click the Search icon in the left hand menu and search for WebLogic Server Dashboard.

    +
  4. +
+

Cleanup

+

To uninstall the Prometheus, Grafana, WebLogic Monitoring Exporter and the deployments, you can run the $WORKDIR/monitoring-service/kubernetes/delete-monitoring.sh script. For usage details execute ./delete-monitoring.sh -h.

+
    +
  1. +

    To uninstall run the following command:

    +

    For example:

    +
    $ cd $WORKDIR/kubernetes/monitoring-service
    +$ ./delete-monitoring.sh -i monitoring-inputs.yaml
    +$ kubectl delete namespace monitoring
    +
  2. +
+

Setup using manual configuration

+

Install Prometheus, Grafana and WebLogic Monitoring Exporter manually. Create the web applications and deploy to the OAM domain.

+

Deploy the Prometheus operator

+
    +
  1. +

    Kube-Prometheus requires all nodes to be labelled with kubernetes.io/os=linux. To check if your nodes are labelled, run the following:

    +
    $ kubectl get nodes --show-labels
    +

    If the nodes are labelled the output will look similar to the following:

    +
    NAME             STATUS   ROLES    AGE   VERSION   LABELS
    +worker-node1     Ready    <none>   42d   v1.20.10  beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker-node1,kubernetes.io/os=linux
    +worker-node2     Ready    <none>   42d   v1.20.10  beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker-node2,kubernetes.io/os=linux
    +master-node      Ready    master   42d   v1.20.10 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=master-node,kubernetes.io/os=linux,node-role.kubernetes.io/master=
    +

    If the nodes are not labelled, run the following command:

    +
    $ kubectl label nodes --all kubernetes.io/os=linux
    +
  2. +
  3. +

    Clone Prometheus by running the following commands:

    +
    $ cd $WORKDIR/kubernetes/monitoring-service
    +$ git clone https://github.com/coreos/kube-prometheus.git -b v0.7.0
    +

    Note: Please refer the compatibility matrix of Kube Prometheus. Please download the release of the repository according to the Kubernetes version of your cluster.

    +
  4. +
  5. +

    If your cluster does not have access to the internet to pull external images, such as grafana, you must load the images in a local container registry.

    +

    For grafana, edit the $WORKDIR/kubernetes/monitoring-service/kube-prometheus/manifests/grafana-deployment.yaml and change image: grafana/grafana:7.3.4 to your local container registry image location, for example image: container-registry.example.com/grafana/grafana:8.3.4.

    +

    For any other images check the $WORKDIR/kubernetes/monitoring-service/kube-prometheus/manifests/*deployment.yaml files.

    +
  6. +
  7. +

    Run the following command to create the namespace and custom resource definitions:

    +
    $ cd $WORKDIR/kubernetes/monitoring-service/kube-prometheus
    +$ kubectl create -f manifests/setup
    +

    The output will look similar to the following:

    +
    namespace/monitoring created
    +customresourcedefinition.apiextensions.k8s.io/alertmanagerconfigs.monitoring.coreos.com created
    +customresourcedefinition.apiextensions.k8s.io/alertmanagers.monitoring.coreos.com created
    +customresourcedefinition.apiextensions.k8s.io/podmonitors.monitoring.coreos.com created
    +customresourcedefinition.apiextensions.k8s.io/probes.monitoring.coreos.com created
    +customresourcedefinition.apiextensions.k8s.io/prometheuses.monitoring.coreos.com created
    +customresourcedefinition.apiextensions.k8s.io/prometheusrules.monitoring.coreos.com created
    +customresourcedefinition.apiextensions.k8s.io/servicemonitors.monitoring.coreos.com created
    +customresourcedefinition.apiextensions.k8s.io/thanosrulers.monitoring.coreos.com created
    +clusterrole.rbac.authorization.k8s.io/prometheus-operator created
    +clusterrolebinding.rbac.authorization.k8s.io/prometheus-operator created
    +deployment.apps/prometheus-operator created
    +service/prometheus-operator created
    +serviceaccount/prometheus-operator created
    +
  8. +
  9. +

    Run the following command to created the rest of the resources:

    +
    $ kubectl create -f manifests/
    +

    The output will look similar to the following:

    +
    alertmanager.monitoring.coreos.com/main created
    +networkpolicy.networking.k8s.io/alertmanager-main created
    +poddisruptionbudget.policy/alertmanager-main created
    +prometheusrule.monitoring.coreos.com/alertmanager-main-rules created
    +secret/alertmanager-main created
    +service/alertmanager-main created
    +serviceaccount/alertmanager-main created
    +servicemonitor.monitoring.coreos.com/alertmanager-main created
    +clusterrole.rbac.authorization.k8s.io/blackbox-exporter created
    +clusterrolebinding.rbac.authorization.k8s.io/blackbox-exporter created
    +configmap/blackbox-exporter-configuration created
    +deployment.apps/blackbox-exporter created
    +networkpolicy.networking.k8s.io/blackbox-exporter created
    +service/blackbox-exporter created
    +serviceaccount/blackbox-exporter created
    +servicemonitor.monitoring.coreos.com/blackbox-exporter created
    +secret/grafana-config created
    +secret/grafana-datasources created
    +configmap/grafana-dashboard-alertmanager-overview created
    +configmap/grafana-dashboard-apiserver created
    +configmap/grafana-dashboard-cluster-total created
    +configmap/grafana-dashboard-controller-manager created
    +configmap/grafana-dashboard-grafana-overview created
    +configmap/grafana-dashboard-k8s-resources-cluster created
    +configmap/grafana-dashboard-k8s-resources-namespace created
    +configmap/grafana-dashboard-k8s-resources-node created
    +configmap/grafana-dashboard-k8s-resources-pod created
    +configmap/grafana-dashboard-k8s-resources-workload created
    +configmap/grafana-dashboard-k8s-resources-workloads-namespace created
    +configmap/grafana-dashboard-kubelet created
    +configmap/grafana-dashboard-namespace-by-pod created
    +configmap/grafana-dashboard-namespace-by-workload created
    +configmap/grafana-dashboard-node-cluster-rsrc-use created
    +configmap/grafana-dashboard-node-rsrc-use created
    +configmap/grafana-dashboard-nodes-darwin created
    +configmap/grafana-dashboard-nodes created
    +configmap/grafana-dashboard-persistentvolumesusage created
    +configmap/grafana-dashboard-pod-total created
    +configmap/grafana-dashboard-prometheus-remote-write created
    +configmap/grafana-dashboard-prometheus created
    +configmap/grafana-dashboard-proxy created
    +configmap/grafana-dashboard-scheduler created
    +configmap/grafana-dashboard-workload-total created
    +configmap/grafana-dashboards created
    +deployment.apps/grafana created
    +networkpolicy.networking.k8s.io/grafana created
    +prometheusrule.monitoring.coreos.com/grafana-rules created
    +service/grafana created
    +serviceaccount/grafana created
    +servicemonitor.monitoring.coreos.com/grafana created
    +prometheusrule.monitoring.coreos.com/kube-prometheus-rules created
    +clusterrole.rbac.authorization.k8s.io/kube-state-metrics created
    +clusterrolebinding.rbac.authorization.k8s.io/kube-state-metrics created
    +deployment.apps/kube-state-metrics created
    +networkpolicy.networking.k8s.io/kube-state-metrics created
    +prometheusrule.monitoring.coreos.com/kube-state-metrics-rules created
    +service/kube-state-metrics created
    +serviceaccount/kube-state-metrics created
    +servicemonitor.monitoring.coreos.com/kube-state-metrics created
    +prometheusrule.monitoring.coreos.com/kubernetes-monitoring-rules created
    +servicemonitor.monitoring.coreos.com/kube-apiserver created
    +servicemonitor.monitoring.coreos.com/coredns created
    +servicemonitor.monitoring.coreos.com/kube-controller-manager created
    +servicemonitor.monitoring.coreos.com/kube-scheduler created
    +servicemonitor.monitoring.coreos.com/kubelet created
    +clusterrole.rbac.authorization.k8s.io/node-exporter created
    +clusterrolebinding.rbac.authorization.k8s.io/node-exporter created
    +daemonset.apps/node-exporter created
    +networkpolicy.networking.k8s.io/node-exporter created
    +prometheusrule.monitoring.coreos.com/node-exporter-rules created
    +service/node-exporter created
    +serviceaccount/node-exporter created
    +servicemonitor.monitoring.coreos.com/node-exporter created
    +clusterrole.rbac.authorization.k8s.io/prometheus-k8s created
    +clusterrolebinding.rbac.authorization.k8s.io/prometheus-k8s created
    +networkpolicy.networking.k8s.io/prometheus-k8s created
    +poddisruptionbudget.policy/prometheus-k8s created
    +prometheus.monitoring.coreos.com/k8s created
    +prometheusrule.monitoring.coreos.com/prometheus-k8s-prometheus-rules created
    +rolebinding.rbac.authorization.k8s.io/prometheus-k8s-config created
    +rolebinding.rbac.authorization.k8s.io/prometheus-k8s created
    +rolebinding.rbac.authorization.k8s.io/prometheus-k8s created
    +rolebinding.rbac.authorization.k8s.io/prometheus-k8s created
    +role.rbac.authorization.k8s.io/prometheus-k8s-config created
    +role.rbac.authorization.k8s.io/prometheus-k8s created
    +role.rbac.authorization.k8s.io/prometheus-k8s created
    +role.rbac.authorization.k8s.io/prometheus-k8s created
    +service/prometheus-k8s created
    +serviceaccount/prometheus-k8s created
    +servicemonitor.monitoring.coreos.com/prometheus-k8s created
    +apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created
    +clusterrole.rbac.authorization.k8s.io/prometheus-adapter created
    +clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created
    +clusterrolebinding.rbac.authorization.k8s.io/prometheus-adapter created
    +clusterrolebinding.rbac.authorization.k8s.io/resource-metrics:system:auth-delegator created
    +clusterrole.rbac.authorization.k8s.io/resource-metrics-server-resources created
    +configmap/adapter-config created
    +deployment.apps/prometheus-adapter created
    +networkpolicy.networking.k8s.io/prometheus-adapter created
    +poddisruptionbudget.policy/prometheus-adapter created
    +rolebinding.rbac.authorization.k8s.io/resource-metrics-auth-reader created
    +service/prometheus-adapter created
    +serviceaccount/prometheus-adapter created
    +servicemonitor.monitoring.coreos.com/prometheus-adapter created
    +clusterrole.rbac.authorization.k8s.io/prometheus-operator created
    +clusterrolebinding.rbac.authorization.k8s.io/prometheus-operator created
    +deployment.apps/prometheus-operator created
    +networkpolicy.networking.k8s.io/prometheus-operator created
    +prometheusrule.monitoring.coreos.com/prometheus-operator-rules created
    +service/prometheus-operator created
    +serviceaccount/prometheus-operator created
    +servicemonitor.monitoring.coreos.com/prometheus-operator created
    +
  10. +
  11. +

    Provide external access for Grafana, Prometheus, and Alertmanager, by running the following commands:

    +
    $ kubectl patch svc grafana -n monitoring --type=json -p '[{"op": "replace", "path": "/spec/type", "value": "NodePort" },{"op": "replace", "path": "/spec/ports/0/nodePort", "value": 32100 }]'
    +   
    +$ kubectl patch svc prometheus-k8s -n monitoring --type=json -p '[{"op": "replace", "path": "/spec/type", "value": "NodePort" },{"op": "replace", "path": "/spec/ports/0/nodePort", "value": 32101 }]'
    + 
    +$ kubectl patch svc alertmanager-main -n monitoring --type=json -p '[{"op": "replace", "path": "/spec/type", "value": "NodePort" },{"op": "replace", "path": "/spec/ports/0/nodePort", "value": 32102 }]'
    +

    Note: This assigns port 32100 to Grafana, 32101 to Prometheus, and 32102 to Alertmanager.

    +

    The output will look similar to the following:

    +
    service/grafana patched
    +service/prometheus-k8s patched
    +service/alertmanager-main patched
    +
  12. +
  13. +

    Verify that the Prometheus, Grafana, and Alertmanager pods are running in the monitoring namespace and the respective services have the exports configured correctly:

    +
    $ kubectl get pods,services -o wide -n monitoring
    +

    The output should look similar to the following:

    +
    NAME                                      READY   STATUS    RESTARTS   AGE   IP             NODE           NOMINATED NODE   READINESS GATES
    +pod/alertmanager-main-0                    2/2     Running  0          67s   10.244.1.7     worker-node1   <none>           <none>
    +pod/alertmanager-main-1                    2/2     Running  0          67s   10.244.2.26    worker-node2   <none>           <none>
    +pod/alertmanager-main-2                    2/2     Running  0          67s   10.244.1.8     worker-node1   <none>           <none>
    +pod/grafana-f8cd57fcf-tmlqt                1/1     Running  0          65s   10.244.2.28    worker-node2   <none>           <none>
    +pod/kube-state-metrics-587bfd4f97-l8knh    3/3     Running  0          65s   10.244.1.9     worker-node1   <none>           <none>
    +pod/node-exporter-2ztpd                    2/2     Running  0          65s   10.247.95.26   worker-node1   <none>           <none>
    +pod/node-exporter-92sxb                    2/2     Running  0          65s   10.250.40.59   worker-node2   <none>           <none>
    +pod/node-exporter-d77tl                    2/2     Running  0          65s   10.196.54.36   master-node    <none>           <none>
    +pod/prometheus-adapter-69b8496df6-6gqrz    1/1     Running  0          65s   10.244.2.29    worker-node2   <none>           <none>
    +pod/prometheus-k8s-0                       2/2     Running  1          66s   10.244.2.27    worker-node2   <none>           <none>
    +pod/prometheus-k8s-1                       2/2     Running  1          66s   10.244.1.10    worker-node1   <none>           <none>
    +pod/prometheus-operator-7649c7454f-9p747   2/2     Running  0          2m    10.244.2.25    worker-node2   <none>           <none>
    +
    +NAME                            TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)                      AGE    SELECTOR
    +service/alertmanager-main       NodePort    10.104.92.62    <none>        9093:32102/TCP               67s    alertmanager=main,app=alertmanager
    +service/alertmanager-operated   ClusterIP   None            <none>        9093/TCP,9094/TCP,9094/UDP   67s    app=alertmanager
    +service/grafana                 NodePort    10.100.171.3    <none>        3000:32100/TCP               66s    app=grafana
    +service/kube-state-metrics      ClusterIP   None            <none>        8443/TCP,9443/TCP            66s    app.kubernetes.io/name=kube-state-metrics
    +service/node-exporter           ClusterIP   None            <none>        9100/TCP                     66s    app.kubernetes.io/name=node-exporter
    +service/prometheus-adapter      ClusterIP   10.109.248.92   <none>        443/TCP                      66s    name=prometheus-adapter
    +service/prometheus-k8s          NodePort    10.98.212.247   <none>        9090:32101/TCP               66s    app=prometheus,prometheus=k8s
    +service/prometheus-operated     ClusterIP   None            <none>        9090/TCP                     66s    app=prometheus
    +service/prometheus-operator     ClusterIP   None            <none>        8443/TCP                     2m1s   app.kubernetes.io/component=controller,app.kubernetes.io/name=prometheus-operator
    +
  14. +
+

Deploy WebLogic Monitoring Exporter

+
    +
  1. +

    Generate the WebLogic Monitoring Exporter deployment package. The wls-exporter.war package need to be updated and created for each listening port (Administration Server and Managed Servers) in the domain. Set the below environment values and run the script get-wls-exporter.sh to generate the required WAR files at ${WORKDIR}/kubernetes/monitoring-service/scripts/wls-exporter-deploy:

    +
    $ cd $WORKDIR/kubernetes/monitoring-service/scripts
    +$ export adminServerPort=7001
    +$ export wlsMonitoringExporterTopolicyCluster=true
    +$ export policyManagedServerPort=15100
    +$ export wlsMonitoringExporterTooamCluster=true
    +$ export oamManagedServerPort=14100
    +$ sh get-wls-exporter.sh
    +

    The output will look similar to the following:

    +
      % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
    +                              Dload  Upload   Total   Spent    Left  Speed
    +100   655  100   655    0     0   1107      0 --:--:-- --:--:-- --:--:--  1108
    +100 2196k  100 2196k    0     0  1787k      0  0:00:01  0:00:01 --:--:-- 9248k
    +created $WORKDIR/kubernetes/monitoring-service/scripts/wls-exporter-deploy dir
    +domainNamespace is empty, setting to default oamns
    +domainUID is empty, setting to default accessdomain
    +weblogicCredentialsSecretName is empty, setting to default "accessdomain-domain-credentials"
    +adminServerName is empty, setting to default "AdminServer"
    +oamClusterName is empty, setting to default "oam_cluster"
    +policyClusterName is empty, setting to default "policy_cluster"
    +created /tmp/ci-Bu74rCBxwu
    +/tmp/ci-Bu74rCBxwu $WORKDIR/kubernetes/monitoring-service/scripts
    +in temp dir
    +  adding: WEB-INF/weblogic.xml (deflated 61%)
    +  adding: config.yml (deflated 60%)
    +$WORKDIR/kubernetes/monitoring-service/scripts
    +created /tmp/ci-RQv3rLbLsX
    +/tmp/ci-RQv3rLbLsX $WORKDIR/kubernetes/monitoring-service/scripts
    +in temp dir
    +  adding: WEB-INF/weblogic.xml (deflated 61%)
    +  adding: config.yml (deflated 60%)
    +$WORKDIR/kubernetes/monitoring-service/scripts
    +created /tmp/ci-DWIYlocP5e
    +/tmp/ci-DWIYlocP5e $WORKDIR/kubernetes/monitoring-service/scripts
    +in temp dir
    +  adding: WEB-INF/weblogic.xml (deflated 61%)
    +  adding: config.yml (deflated 60%)
    +$WORKDIR/kubernetes/monitoring-service/scripts
    +
  2. +
  3. +

    Deploy the WebLogic Monitoring Exporter WAR files into the Oracle Access Management domain:

    +
    $ cd $WORKDIR/kubernetes/monitoring-service/scripts
    +$ kubectl cp wls-exporter-deploy <domain_namespace>/<domain_uid>-adminserver:/u01/oracle
    +$ kubectl cp deploy-weblogic-monitoring-exporter.py <domain_namespace>/<domain_uid>-adminserver:/u01/oracle/wls-exporter-deploy
    +$ kubectl exec -it -n <domain_namespace> <domain_uid>-adminserver -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/deploy-weblogic-monitoring-exporter.py -domainName <domain_uid> -adminServerName AdminServer -adminURL <domain_uid>-adminserver:7001 -username weblogic -password <password> -oamClusterName oam_cluster -wlsMonitoringExporterTooamCluster true -policyClusterName policy_cluster -wlsMonitoringExporterTopolicyCluster true
    +

    For example:

    +
    $ cd $WORKDIR/kubernetes/monitoring-service/scripts
    +$ kubectl cp wls-exporter-deploy oamns/accessdomain-adminserver:/u01/oracle
    +$ kubectl cp deploy-weblogic-monitoring-exporter.py oamns/accessdomain-adminserver:/u01/oracle/wls-exporter-deploy
    +$ kubectl exec -it -n oamns accessdomain-adminserver -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/deploy-weblogic-monitoring-exporter.py -domainName accessdomain -adminServerName AdminServer -adminURL accessdomain-adminserver:7001 -username weblogic -password <password> -oamClusterName oam_cluster -wlsMonitoringExporterTooamCluster true -policyClusterName policy_cluster -wlsMonitoringExporterTopolicyCluster true
    +

    The output will look similar to the following:

    +
    Initializing WebLogic Scripting Tool (WLST) ...
    +
    +Welcome to WebLogic Server Administration Scripting Shell
    +
    +Type help() for help on available commands
    +
    +Connecting to t3://accessdomain-adminserver:7001 with userid weblogic ...
    +Successfully connected to Admin Server "AdminServer" that belongs to domain "accessdomain".
    +
    +Warning: An insecure protocol was used to connect to the server.
    +To ensure on-the-wire security, the SSL port or Admin port should be used instead.
    +
    +Deploying .........
    +Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war to targets AdminServer (upload=true) ...
    +<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating deploy operation for application, wls-exporter-adminserver [archive:    /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war], to AdminServer .>
    +..Completed the deployment of Application with status completed
    +Current Status of your Deployment:
    +Deployment command type: deploy
    +Deployment State : completed
    +Deployment Message : no message
    +Starting application wls-exporter-adminserver.
    +<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating start operation for application, wls-exporter-adminserver [archive: null], to AdminServer .>
    +.Completed the start of Application with status completed
    +Current Status of your Deployment:
    +Deployment command type: start
    +Deployment State : completed
    +Deployment Message : no message
    +Deploying .........
    +Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-oam.war to targets oam_cluster (upload=true) ...
    +<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating deploy operation for application, wls-exporter-oam [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-oam.war], to oam_cluster .>
    +.Completed the deployment of Application with status completed
    +Current Status of your Deployment:
    +Deployment command type: deploy
    +Deployment State : completed
    +Deployment Message : no message
    +Starting application wls-exporter-oam.
    +<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating start operation for application, wls-exporter-oam [archive: null], to oam_cluster .>
    +.Completed the start of Application with status completed
    +Current Status of your Deployment:
    +Deployment command type: start
    +Deployment State : completed
    +Deployment Message : no message
    +Deploying .........
    +Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-policy.war to targets policy_cluster (upload=true) ...
    +<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating deploy operation for application, wls-exporter-policy [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-policy.war], to policy_cluster .>
    +.Completed the deployment of Application with status completed
    +Current Status of your Deployment:
    +Deployment command type: deploy
    +Deployment State : completed
    +Deployment Message : no message
    +Starting application wls-exporter-policy.
    +<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating start operation for application, wls-exporter-policy [archive: null], to policy_cluster .>
    +.Completed the start of Application with status completed
    +Current Status of your Deployment:
    +Deployment command type: start
    +Deployment State : completed
    +Deployment Message : no message
    +Disconnected from weblogic server: AdminServer
    +
    +Exiting WebLogic Scripting Tool.
    +
    +<DATE> <Warning> <JNDI> <BEA-050001> <WLContext.close() was called in a different thread than the one in which it was created.>
    +
  4. +
+

Configure Prometheus Operator

+

Prometheus enables you to collect metrics from the WebLogic Monitoring Exporter. The Prometheus Operator identifies the targets using service discovery. To get the WebLogic Monitoring Exporter end point discovered as a target, you must create a service monitor pointing to the service.

+

The exporting of metrics from wls-exporter requires basicAuth, so a Kubernetes Secret is created with the user name and password that are base64 encoded. This Secret is used in the ServiceMonitor deployment. The wls-exporter-ServiceMonitor.yaml has basicAuth with credentials as username: weblogic and password: <password> in base64 encoded.

+
    +
  1. +

    Run the following command to get the base64 encoded version of the weblogic password:

    +
    $ echo -n "<password>" | base64
    +

    The output will look similar to the following:

    +
    V2VsY29tZTE=
    +
  2. +
  3. +

    Update the $WORKDIR/kubernetes/monitoring-service/manifests/wls-exporter-ServiceMonitor.yaml and change the password: value to the value returned above. Also change the namespace: and weblogic.domainName: values to match your OAM namespace and domain name:

    +
    apiVersion: v1
    +kind: Secret
    +metadata:
    +  name: basic-auth
    +  namespace: oamns
    +data:
    +  password: V2VsY29tZTE=
    +  user: d2VibG9naWM=
    +type: Opaque
    +---
    +apiVersion: monitoring.coreos.com/v1
    +kind: ServiceMonitor
    +metadata:
    +  name: wls-exporter
    +  namespace: oamns
    +  labels:
    +    k8s-app: wls-exporter
    +    release: monitoring
    +spec:
    +  namespaceSelector:
    +    matchNames:
    +    - oamns
    +  selector:
    +    matchLabels:
    +      weblogic.domainName: accessdomain
    +  endpoints:
    +  - basicAuth:
    +      password:
    +        name: basic-auth
    +        key: password
    +      username:
    +        name: basic-auth
    +        key: user
    +    port: default
    +    relabelings:
    +      - action: labelmap
    +        regex: __meta_kubernetes_service_label_(.+)
    +    interval: 10s
    +    honorLabels: true
    +    path: /wls-exporter/metrics
    +
  4. +
  5. +

    Update the $WORKDIR/kubernetes/monitoring-service/manifests/prometheus-roleSpecific-domain-namespace.yaml and change the namespace to match your OAM namespace. For example:

    +
    apiVersion: rbac.authorization.k8s.io/v1
    +items:
    +- apiVersion: rbac.authorization.k8s.io/v1
    +  kind: Role
    +  metadata:
    +    name: prometheus-k8s
    +    namespace: oamns
    +  rules:
    +  - apiGroups:
    +    - ""
    +    resources:
    +    - services
    +    - endpoints
    +    - pods
    +    verbs:
    +    - get
    +    - list
    +    - watch
    +kind: RoleList
    +
  6. +
  7. +

    Update the $WORKDIR/kubernetes/monitoring-service/manifests/prometheus-roleBinding-domain-namespace.yaml and change the namespace to match your OAM namespace. For example:

    +
    apiVersion: rbac.authorization.k8s.io/v1
    +items:
    +- apiVersion: rbac.authorization.k8s.io/v1
    +  kind: RoleBinding
    +  metadata:
    +    name: prometheus-k8s
    +    namespace: oamns
    +  roleRef:
    +    apiGroup: rbac.authorization.k8s.io
    +    kind: Role
    +    name: prometheus-k8s
    +  subjects:
    +  - kind: ServiceAccount
    +    name: prometheus-k8s
    +    namespace: monitoring
    +kind: RoleBindingList
    +
  8. +
  9. +

    Run the following command to enable Prometheus:

    +
    $ cd $WORKDIR/kubernetes/monitoring-service/manifests
    +$ kubectl apply -f .
    +

    The output will look similar to the following:

    +
    rolebinding.rbac.authorization.k8s.io/prometheus-k8s created
    +role.rbac.authorization.k8s.io/prometheus-k8s created
    +secret/basic-auth created
    +servicemonitor.monitoring.coreos.com/wls-exporter created
    +
  10. +
+

Prometheus Service Discovery

+

After the ServiceMonitor is deployed, the wls-exporter should be discovered by Prometheus and be able to collect metrics.

+
    +
  1. +

    Access the following URL to view Prometheus service discovery: http://${MASTERNODE-HOSTNAME}:32101/service-discovery

    +
  2. +
  3. +

    Click on oamns/wls-exporter/0 and then show more. Verify all the targets are mentioned.

    +
  4. +
+

Grafana Dashboard

+
    +
  1. +

    Access the Grafana dashboard with the following URL: http://${MASTERNODE-HOSTNAME}:32100 and login with admin/admin. Change your password when prompted.

    +
  2. +
  3. +

    Import the Grafana dashboard by navigating on the left hand menu to Create > Import. Copy the content from $WORKDIR/kubernetes/monitoring-service/config/weblogic-server-dashboard-import.json and paste. Then click Load and Import. The dashboard should be displayed in the Dashboards panel.

    +
  4. +
+

Cleanup

+

To clean up a manual installation:

+
    +
  1. +

    Run the following commands:

    +
    $ cd $WORKDIR/kubernetes/monitoring-service/manifests/
    +$ kubectl delete -f .
    +
  2. +
  3. +

    Delete the deployments:

    +
    $ cd $WORKDIR/kubernetes/monitoring-service/scripts/
    +$ kubectl cp undeploy-weblogic-monitoring-exporter.py <domain_namespace>/<domain_uid>-adminserver:/u01/oracle/wls-exporter-deploy
    +$ kubectl exec -it -n <domain_namespace> <domain_uid>-adminserver -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/undeploy-weblogic-monitoring-exporter.py -domainName <domain_uid>  -adminServerName AdminServer -adminURL <domain_uid>-adminserver:7001 -username weblogic -password <password> -oamClusterName oam_cluster -wlsMonitoringExporterTooamCluster true -policyClusterName policy_cluster -wlsMonitoringExporterTopolicyCluster true
    +
  4. +
  5. +

    Delete Prometheus:

    +
    $ cd $WORKDIR/kubernetes/monitoring-service/kube-prometheus
    +$ kubectl delete -f manifests
    +
  6. +
+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oam/manage-oam-domains/wlst-admin-operations/index.html b/docs/23.4.1/idm-products/oam/manage-oam-domains/wlst-admin-operations/index.html new file mode 100644 index 000000000..a7f3cf385 --- /dev/null +++ b/docs/23.4.1/idm-products/oam/manage-oam-domains/wlst-admin-operations/index.html @@ -0,0 +1,4261 @@ + + + + + + + + + + + + b. WLST Administration Operations :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + b. WLST Administration Operations +

+ + + + + + +

To use WLST to administer the OAM domain, use the helper pod in the same Kubernetes cluster as the OAM Domain.

+
    +
  1. +

    Check to see if the helper pod exists by running:

    +
    $ kubectl get pods -n <domain_namespace> | grep helper
    +

    For example:

    +
    $ kubectl get pods -n oamns | grep helper
    +

    The output should look similar to the following:

    +
    helper                                  1/1     Running     0          26h
    +

    If the helper pod doesn’t exist then see Step 1 in Prepare your environment to create it.

    +
  2. +
  3. +

    Run the following command to start a bash shell in the helper pod:

    +
    $ kubectl exec -it helper -n <domain_namespace> -- /bin/bash
    +

    For example:

    +
    $ kubectl exec -it helper -n oamns -- /bin/bash
    +

    This will take you into a bash shell in the running helper pod:

    +
    [oracle@helper ~]$
    +
  4. +
  5. +

    Connect to WLST using the following command:

    +
    $ cd $ORACLE_HOME/oracle_common/common/bin
    +$ ./wlst.sh
    +

    The output will look similar to the following:

    +
    Initializing WebLogic Scripting Tool (WLST) ...
    +
    +Jython scans all the jar files it can find at first startup. Depending on the system, this process may take a few minutes to complete, and WLST may not return a prompt right away.
    +
    +Welcome to WebLogic Server Administration Scripting Shell
    +
    +Type help() for help on available commands
    +
    +wls:/offline>
    +
  6. +
  7. +

    To access t3 for the Administration Server connect as follows:

    +
    wls:/offline> connect('weblogic','<password>','t3://accessdomain-adminserver:7001')
    +

    The output will look similar to the following:

    +
    Connecting to t3://accessdomain-adminserver:7001 with userid weblogic ...
    +Successfully connected to Admin Server "AdminServer" that belongs to domain "accessdomain".
    +
    +Warning: An insecure protocol was used to connect to the server.
    +To ensure on-the-wire security, the SSL port or Admin port should be used instead.
    +
    +wls:/accessdomain/serverConfig/>
    +

    Or to access t3 for the OAM Cluster service, connect as follows:

    +
    connect('weblogic','<password>','t3://accessdomain-cluster-oam-cluster:14100')
    +

    The output will look similar to the following:

    +
    Connecting to t3://accessdomain-cluster-oam-cluster:14100 with userid weblogic ...
    +Successfully connected to managed Server "oam_server1" that belongs to domain "accessdomain".
    +
    +Warning: An insecure protocol was used to connect to the server.
    +To ensure on-the-wire security, the SSL port or Admin port should be used instead.
    +
    +wls:/accessdomain/serverConfig/>
    +
  8. +
+

Sample operations

+

For a full list of WLST operations refer to WebLogic Server WLST Online and Offline Command Reference.

+

Display servers

+
wls:/accessdomain/serverConfig/> cd('/Servers')
+wls:/accessdomain/serverConfig/Servers> ls()
+   
+dr--   AdminServer
+dr--   oam_policy_mgr1
+dr--   oam_policy_mgr2
+dr--   oam_policy_mgr3
+dr--   oam_policy_mgr4
+dr--   oam_policy_mgr5
+dr--   oam_server1
+dr--   oam_server2
+dr--   oam_server3
+dr--   oam_server4
+dr--   oam_server5
+
+wls:/accessdomain/serverConfig/Servers>
+

Configure logging for managed servers

+

Connect to the Administration Server and run the following:

+
wls:/accessdomain/serverConfig/> domainRuntime()
+Location changed to domainRuntime tree. This is a read-only tree
+with DomainMBean as the root MBean.
+For more help, use help('domainRuntime')
+   
+wls:/accessdomain/domainRuntime/>
+   
+wls:/accessdomain/domainRuntime/> listLoggers(pattern="oracle.oam.*",target="oam_server1")
+------------------------------------------+-----------------
+Logger                                    | Level
+------------------------------------------+-----------------
+oracle.oam                                | <Inherited>
+oracle.oam.admin.foundation.configuration | <Inherited>
+oracle.oam.admin.service.config           | <Inherited>
+oracle.oam.agent                          | <Inherited>
+oracle.oam.agent-default                  | <Inherited>
+oracle.oam.audit                          | <Inherited>
+oracle.oam.binding                        | <Inherited>
+oracle.oam.certvalidation                 | <Inherited>
+oracle.oam.certvalidation.mbeans          | <Inherited>
+oracle.oam.common.healthcheck             | <Inherited>
+oracle.oam.common.runtimeent              | <Inherited>
+oracle.oam.commonutil                     | <Inherited>
+oracle.oam.config                         | <Inherited>
+oracle.oam.controller                     | <Inherited>
+oracle.oam.default                        | <Inherited>
+oracle.oam.diagnostic                     | <Inherited>
+oracle.oam.engine.authn                   | <Inherited>
+oracle.oam.engine.authz                   | <Inherited>
+oracle.oam.engine.policy                  | <Inherited>
+oracle.oam.engine.ptmetadata              | <Inherited>
+oracle.oam.engine.session                 | <Inherited>
+oracle.oam.engine.sso                     | <Inherited>
+oracle.oam.esso                           | <Inherited>
+oracle.oam.extensibility.lifecycle        | <Inherited>
+oracle.oam.foundation.access              | <Inherited>
+oracle.oam.idm                            | <Inherited>
+oracle.oam.install                        | <Inherited>
+oracle.oam.install.bootstrap              | <Inherited>
+oracle.oam.install.mbeans                 | <Inherited>
+oracle.oam.ipf.rest.api                   | <Inherited>
+oracle.oam.oauth                          | <Inherited>
+oracle.oam.plugin                         | <Inherited>
+oracle.oam.proxy.oam                      | <Inherited>
+oracle.oam.proxy.oam.workmanager          | <Inherited>
+oracle.oam.proxy.opensso                  | <Inherited>
+oracle.oam.pswd.service.provider          | <Inherited>
+oracle.oam.replication                    | <Inherited>
+oracle.oam.user.identity.provider         | <Inherited>
+wls:/accessdomain/domainRuntime/>
+

Set the log level to TRACE:32:

+
wls:/accessdomain/domainRuntime/> setLogLevel(target='oam_server1',logger='oracle.oam',level='TRACE:32',persist="1",addLogger=1)
+wls:/accessdomain/domainRuntime/>
+
+wls:/accessdomain/domainRuntime/> listLoggers(pattern="oracle.oam.*",target="oam_server1")
+------------------------------------------+-----------------
+Logger                                    | Level
+------------------------------------------+-----------------
+oracle.oam                                | TRACE:32
+oracle.oam.admin.foundation.configuration | <Inherited>
+oracle.oam.admin.service.config           | <Inherited>
+oracle.oam.agent                          | <Inherited>
+oracle.oam.agent-default                  | <Inherited>
+oracle.oam.audit                          | <Inherited>
+oracle.oam.binding                        | <Inherited>
+oracle.oam.certvalidation                 | <Inherited>
+oracle.oam.certvalidation.mbeans          | <Inherited>
+oracle.oam.common.healthcheck             | <Inherited>
+oracle.oam.common.runtimeent              | <Inherited>
+oracle.oam.commonutil                     | <Inherited>
+oracle.oam.config                         | <Inherited>
+oracle.oam.controller                     | <Inherited>
+oracle.oam.default                        | <Inherited>
+oracle.oam.diagnostic                     | <Inherited>
+oracle.oam.engine.authn                   | <Inherited>
+oracle.oam.engine.authz                   | <Inherited>
+oracle.oam.engine.policy                  | <Inherited>
+oracle.oam.engine.ptmetadata              | <Inherited>
+oracle.oam.engine.session                 | <Inherited>
+oracle.oam.engine.sso                     | <Inherited>
+oracle.oam.esso                           | <Inherited>
+oracle.oam.extensibility.lifecycle        | <Inherited>
+oracle.oam.foundation.access              | <Inherited>
+oracle.oam.idm                            | <Inherited>
+oracle.oam.install                        | <Inherited>
+oracle.oam.install.bootstrap              | <Inherited>
+oracle.oam.install.mbeans                 | <Inherited>
+oracle.oam.ipf.rest.api                   | <Inherited>
+oracle.oam.oauth                          | <Inherited>
+oracle.oam.plugin                         | <Inherited>
+oracle.oam.proxy.oam                      | <Inherited>
+oracle.oam.proxy.oam.workmanager          | <Inherited>
+oracle.oam.proxy.opensso                  | <Inherited>
+oracle.oam.pswd.service.provider          | <Inherited>
+oracle.oam.replication                    | <Inherited>
+oracle.oam.user.identity.provider         | <Inherited>
+wls:/accessdomain/domainRuntime/>
+

Verify that TRACE:32 log level is set by connecting to the Administration Server and viewing the logs:

+
$ kubectl exec -it accessdomain-adminserver -n oamns -- /bin/bash
+[oracle@accessdomain-adminserver oracle]$
+[oracle@accessdomain-adminserver oracle]$ cd /u01/oracle/user_projects/domains/accessdomain/servers/oam_server1/logs
+[oracle@accessdomain-adminserver logs]$ tail oam_server1-diagnostic.log
+[<DATE>] [oam_server1] [TRACE:32] [] [oracle.oam.config] [tid: Configuration Store Observer] [userId: <anonymous>] [ecid: 8b3ac37b-c7cf-46dd-aeee-5ed67886be21-0000000b,0:1795] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.admin.config.util.observable.ObservableConfigStore$StoreWatcher] [SRC_METHOD: run] Start of run before start of detection at 1,635,848,774,793. Detector: oracle.security.am.admin.config.util.observable.DbStoreChangeDetector:Database configuration store:DSN:jdbc/oamds. Monitor: { StoreMonitor: { disabled: 'false' } }
+[<DATE>] [oam_server1] [TRACE] [] [oracle.oam.config] [tid: Configuration Store Observer] [userId: <anonymous>] [ecid: 8b3ac37b-c7cf-46dd-aeee-5ed67886be21-0000000b,0:1795] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.admin.config.util.store.StoreUtil] [SRC_METHOD: getContainerProperty] Configuration property CONFIG_HISTORY not specified
+[<DATE>] [oam_server1] [TRACE] [] [oracle.oam.config] [tid: Configuration Store Observer] [userId: <anonymous>] [ecid: 8b3ac37b-c7cf-46dd-aeee-5ed67886be21-0000000b,0:1795] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.admin.config.util.store.StoreUtil] [SRC_METHOD: getContainerProperty] Configuration property CONFIG not specified
+[<DATE>] [oam_server1] [TRACE:32] [] [oracle.oam.config] [tid: Configuration Store Observer] [userId: <anonymous>] [ecid: 8b3ac37b-c7cf-46dd-aeee-5ed67886be21-0000000b,0:1795] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.admin.config.util.store.DbStore] [SRC_METHOD: getSelectSQL] SELECT SQL:SELECT  version  from  IDM_OBJECT_STORE  where id = ? and version = (select max(version) from  IDM_OBJECT_STORE  where id = ?)
+[<DATE>] [oam_server1] [TRACE] [] [oracle.oam.config] [tid: Configuration Store Observer] [userId: <anonymous>] [ecid: 8b3ac37b-c7cf-46dd-aeee-5ed67886be21-0000000b,0:1795] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.admin.config.util.store.DbStore] [SRC_METHOD: load] Time (ms) to load key CONFIG:-1{FIELD_TYPES=INT, SELECT_FIELDS=SELECT  version  from  IDM_OBJECT_STORE }:4
+

Performing WLST Administration via SSL

+
    +
  1. +

    By default the SSL port is not enabled for the Administration Server or OAM Managed Servers. To configure the SSL port for the Administration Server and Managed Servers login to WebLogic Administration console https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console and navigate to Lock & Edit -> Environment ->Servers -> server_name ->Configuration -> General -> SSL Listen Port Enabled -> Provide SSL Port ( For Administration Server: 7002 and for OAM Managed Server (oam_server1): 14101) - > Save -> Activate Changes.

    +

    Note: If configuring the OAM Managed Servers for SSL you must enable SSL on the same port for all servers (oam_server1 through oam_server5)

    +
  2. +
  3. +

    Create a myscripts directory as follows:

    +
    $ cd $WORKDIR/kubernetes/
    +$ mkdir myscripts
    +$ cd myscripts
    +

    For example:

    +
    $ cd $WORKDIR/kubernetes/
    +$ mkdir myscripts
    +$ cd myscripts
    +
  4. +
  5. +

    Create a sample yaml template file in the myscripts directory called <domain_uid>-adminserver-ssl.yaml to create a Kubernetes service for the Administration Server:

    +

    Note: Update the domainName, domainUID and namespace based on your environment. For example:

    +
    apiVersion: v1
    +kind: Service
    +metadata:
    +  labels:
    +    serviceType: SERVER
    +    weblogic.domainName: accessdomain
    +    weblogic.domainUID: accessdomain
    +    weblogic.resourceVersion: domain-v2
    +    weblogic.serverName: AdminServer
    +  name: accessdomain-adminserverssl
    +  namespace: oamns
    +spec:
    +  clusterIP: None
    +  ports:
    +  - name: default
    +    port: 7002
    +    protocol: TCP
    +    targetPort: 7002
    +  selector:
    +    weblogic.createdByOperator: "true"
    +    weblogic.domainUID: accessdomain
    +    weblogic.serverName: AdminServer
    +  type: ClusterIP
    +

    and the following sample yaml template file <domain_uid>-oamcluster-ssl.yaml for the OAM Managed Server:

    +
    apiVersion: v1
    +kind: Service
    +metadata:
    +  labels:
    +    serviceType: SERVER
    +    weblogic.domainName: accessdomain
    +    weblogic.domainUID: accessdomain
    +    weblogic.resourceVersion: domain-v2
    +  name: accessdomain-oamcluster-ssl
    +  namespace: oamns
    +spec:
    +  clusterIP: None
    +  ports:
    +  - name: default
    +    port: 14101
    +    protocol: TCP
    +    targetPort: 14101
    +  selector:
    +    weblogic.clusterName: oam_cluster
    +    weblogic.createdByOperator: "true"
    +    weblogic.domainUID: accessdomain
    +  type: ClusterIP
    +
  6. +
  7. +

    Apply the template using the following command for the AdminServer:

    +
    $ kubectl apply -f <domain_uid>-adminserver-ssl.yaml
    +

    For example:

    +
    $ kubectl apply -f accessdomain-adminserver-ssl.yaml
    +service/accessdomain-adminserverssl created
    +

    and using the following command for the OAM Managed Server:

    +
    $ kubectl apply -f <domain_uid>-oamcluster-ssl.yaml
    +

    For example:

    +
    $ kubectl apply -f accessdomain-oamcluster-ssl.yaml
    +service/accessdomain-oamcluster-ssl created
    +
  8. +
  9. +

    Validate that the Kubernetes Services to access SSL ports are created successfully:

    +
    $ kubectl get svc -n <domain_namespace> |grep ssl
    +

    For example:

    +
    $ kubectl get svc -n oamns |grep ssl
    +

    The output will look similar to the following:

    +
    accessdomain-adminserverssl           ClusterIP   None             <none>        7002/TCP                     102s
    +accessdomain-oamcluster-ssl           ClusterIP   None             <none>        14101/TCP                    35s
    +
  10. +
  11. +

    Inside the bash shell of the running helper pod, run the following:

    +
    [oracle@helper bin]$ export WLST_PROPERTIES="-Dweblogic.security.SSL.ignoreHostnameVerification=true -Dweblogic.security.TrustKeyStore=DemoTrust"
    +[oracle@helper bin]$ cd /u01/oracle/oracle_common/common/bin
    +[oracle@helper bin]$ ./wlst.sh
    +Initializing WebLogic Scripting Tool (WLST) ...
    +
    +Welcome to WebLogic Server Administration Scripting Shell
    +   
    +Type help() for help on available commands
    +wls:/offline>
    +

    To connect to the Administration Server t3s service:

    +
    wls:/offline> connect('weblogic','<password>','t3s://accessdomain-adminserverssl:7002')
    +Connecting to t3s://accessdomain-adminserverssl:7002 with userid weblogic ...
    +<<DATE>> <Info> <Security> <BEA-090905> <Disabling the CryptoJ JCE Provider self-integrity check for better startup performance. To enable this check, specify -Dweblogic.security.allowCryptoJDefaultJCEVerification=true.>
    +<<DATE>> <Info> <Security> <BEA-090906> <Changing the default Random Number Generator in RSA CryptoJ from ECDRBG128 to HMACDRBG. To disable this change, specify -Dweblogic.security.allowCryptoJDefaultPRNG=true.>
    +<<DATE>> <Info> <Security> <BEA-090909> <Using the configured custom SSL Hostname Verifier implementation: weblogic.security.utils.SSLWLSHostnameVerifier$NullHostnameVerifier.>
    +Successfully connected to Admin Server "AdminServer" that belongs to domain "accessdomain".
    +
    +wls:/accessdomain/serverConfig/>
    +

    To connect to the OAM Managed Server t3s service:

    +
    wls:/offline> connect('weblogic','<password>','t3s://accessdomain-oamcluster-ssl:14101')   
    +Connecting to t3s://accessdomain-oamcluster-ssl:14101 with userid weblogic ...
    +<<DATE>> <Info> <Security> <BEA-090905> <Disabling the CryptoJ JCE Provider self-integrity check for better startup performance. To enable this check, specify -Dweblogic.security.allowCryptoJDefaultJCEVerification=true.>
    +<<DATE>> <Info> <Security> <BEA-090906> <Changing the default Random Number Generator in RSA CryptoJ from ECDRBG128 to HMACDRBG. To disable this change, specify -Dweblogic.security.allowCryptoJDefaultPRNG=true.>
    +<<DATE>> <Info> <Security> <BEA-090909> <Using the configured custom SSL Hostname Verifier implementation: weblogic.security.utils.SSLWLSHostnameVerifier$NullHostnameVerifier.>
    +Successfully connected to managed Server "oam_server1" that belongs to domain "accessdomain".
    +
  12. +
+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oam/patch-and-upgrade/index.html b/docs/23.4.1/idm-products/oam/patch-and-upgrade/index.html new file mode 100644 index 000000000..662bcf30b --- /dev/null +++ b/docs/23.4.1/idm-products/oam/patch-and-upgrade/index.html @@ -0,0 +1,4056 @@ + + + + + + + + + + + + Patch and Upgrade :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + Patch and Upgrade +

+ + + + + + + +

This section shows you how to upgrade the WebLogic Kubernetes Operator, the OAM image, the Elasticsearch and Kibana stack, and the Ingress.

+

The upgrade path taken depends on the version you are upgrading from, and the version you are upgrading to.

+

Please refer to the Release Notes for information on which upgrade steps are necessary for the version you are upgrading to.

+ + + + + + + +
    + + + + + + + + + + + + + + + + + +

    +a. Upgrade an operator release +

    + + + + + +

    Instructions on how to update the WebLogic Kubernetes Operator version.

    + + + + + + + + + + + + +

    +b. Patch an image +

    + + + + + +

    Instructions on how to update your OAM Kubernetes cluster with a new OAM container image.

    + + + + + + + + + + + + +

    +c. Upgrade Ingress +

    + + + + + +

    Instructions on how to upgrade the ingress.

    + + + + + + + + + + + + +

    +d. Upgrade Elasticsearch and Kibana +

    + + + + + +

    Instructions on how to upgrade Elastic Search and Kibana.

    + + + + + + + + +
+ + + + + + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oam/patch-and-upgrade/index.xml b/docs/23.4.1/idm-products/oam/patch-and-upgrade/index.xml new file mode 100644 index 000000000..13f210be9 --- /dev/null +++ b/docs/23.4.1/idm-products/oam/patch-and-upgrade/index.xml @@ -0,0 +1,64 @@ + + + + Patch and Upgrade on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oam/patch-and-upgrade/ + Recent content in Patch and Upgrade on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + a. Upgrade an operator release + /fmw-kubernetes/23.4.1/idm-products/oam/patch-and-upgrade/upgrade-an-operator-release/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oam/patch-and-upgrade/upgrade-an-operator-release/ + These instructions apply to upgrading operators from 3.X.X to 4.X, or from within the 4.X release family as additional versions are released. + On the master node, download the new WebLogic Kubernetes Operator source code from the operator github project: +$ mkdir &lt;workdir&gt;/weblogic-kubernetes-operator-4.X.X $ cd &lt;workdir&gt;/weblogic-kubernetes-operator-4.X.X $ git clone https://github.com/oracle/weblogic-kubernetes-operator.git --branch v4.X.X For example: +$ mkdir /scratch/OAMK8S/weblogic-kubernetes-operator-4.X.X $ cd /scratch/OAMK8S/weblogic-kubernetes-operator-4.X.X $ git clone https://github.com/oracle/weblogic-kubernetes-operator.git --branch v4.X.X This will create the directory &lt;workdir&gt;/weblogic-kubernetes-operator-4. + + + + b. Patch an image + /fmw-kubernetes/23.4.1/idm-products/oam/patch-and-upgrade/patch-an-image/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oam/patch-and-upgrade/patch-an-image/ + Choose one of the following options to update your OAM kubernetes cluster to use the new image: + Run the kubectl edit domain command Run the kubectl patch domain command In all of the above cases, the WebLogic Kubernetes Operator will restart the Administration Server pod first and then perform a rolling restart on the OAM Managed Servers. +Note: If you are not using Oracle Container Registry or your own container registry, then you must first load the new container image on all nodes in your Kubernetes cluster. + + + + c. Upgrade Ingress + /fmw-kubernetes/23.4.1/idm-products/oam/patch-and-upgrade/upgrade-an-ingress/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oam/patch-and-upgrade/upgrade-an-ingress/ + This section shows how to upgrade the ingress. +To determine if this step is required for the version you are upgrading to, refer to the Release Notes. +Download the latest code repository Download the latest code repository as follows: + Create a working directory to setup the source code. +$ mkdir &lt;workdir&gt; For example: +$ mkdir /scratch/OAMK8Slatest Download the latest OAM deployment scripts from the OAM repository. + + + + d. Upgrade Elasticsearch and Kibana + /fmw-kubernetes/23.4.1/idm-products/oam/patch-and-upgrade/upgrade-elk/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oam/patch-and-upgrade/upgrade-elk/ + This section shows how to upgrade Elasticsearch and Kibana. +To determine if this step is required for the version you are upgrading to, refer to the Release Notes. +Undeploy Elasticsearch and Kibana From October 22 (22.4.1) onwards, OAM logs should be stored on a centralized Elasticsearch and Kibana stack. +Deployments prior to October 22 (22.4.1) used local deployments of Elasticsearch and Kibana. +If you are upgrading from July 22 (22.3.1) or earlier, to October 22 (22. + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oam/patch-and-upgrade/patch-an-image/index.html b/docs/23.4.1/idm-products/oam/patch-and-upgrade/patch-an-image/index.html new file mode 100644 index 000000000..665590a0b --- /dev/null +++ b/docs/23.4.1/idm-products/oam/patch-and-upgrade/patch-an-image/index.html @@ -0,0 +1,4000 @@ + + + + + + + + + + + + b. Patch an image :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + b. Patch an image +

+ + + + + + +

Choose one of the following options to update your OAM kubernetes cluster to use the new image:

+
    +
  1. Run the kubectl edit domain command
  2. +
  3. Run the kubectl patch domain command
  4. +
+

In all of the above cases, the WebLogic Kubernetes Operator will restart the Administration Server pod first and then perform a rolling restart on the OAM Managed Servers.

+

Note: If you are not using Oracle Container Registry or your own container registry, then you must first load the new container image on all nodes in your Kubernetes cluster.

+

Run the kubectl edit domain command

+
    +
  1. +

    To update the domain with the kubectl edit domain command, run the following:

    +
    $ kubectl edit domain <domainname> -n <namespace>
    +

    For example:

    +
    $ kubectl edit domain accessdomain -n oamns
    +

    If using Oracle Container Registry or your own container registry for your OAM container image, update the image to point at the new image, for example:

    +
    domainHomeInImage: false
    +image: container-registry.oracle.com/middleware/oam_cpu:<tag>
    +imagePullPolicy: IfNotPresent
    +

    If you are not using a container registry and have loaded the image on each of the master and worker nodes, update the image to point at the new image:

    +
    domainHomeInImage: false
    +image: oracle/oam:<tag>
    +imagePullPolicy: IfNotPresent
    +
  2. +
  3. +

    Save the file and exit (:wq!)

    +
  4. +
+

Run the kubectl patch command

+
    +
  1. +

    To update the domain with the kubectl patch domain command, run the following:

    +
    $ kubectl patch domain <domain> -n <namespace> --type merge  -p '{"spec":{"image":"newimage:tag"}}'
    +

    For example, if using Oracle Container Registry or your own container registry for your OAM container image:

    +
    $ kubectl patch domain accessdomain -n oamns --type merge  -p '{"spec":{"image":"container-registry.oracle.com/middleware/oam_cpu:<tag>"}}'
    +

    For example, if you are not using a container registry and have loaded the image on each of the master and worker nodes:

    +
    $ kubectl patch domain accessdomain -n oamns --type merge  -p '{"spec":{"image":"oracle/oam:<tag>"}}'
    +

    The output will look similar to the following:

    +
    domain.weblogic.oracle/accessdomain patched
    +
  2. +
+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oam/patch-and-upgrade/upgrade-an-ingress/index.html b/docs/23.4.1/idm-products/oam/patch-and-upgrade/upgrade-an-ingress/index.html new file mode 100644 index 000000000..aadcd490c --- /dev/null +++ b/docs/23.4.1/idm-products/oam/patch-and-upgrade/upgrade-an-ingress/index.html @@ -0,0 +1,4090 @@ + + + + + + + + + + + + c. Upgrade Ingress :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + c. Upgrade Ingress +

+ + + + + + +

This section shows how to upgrade the ingress.

+

To determine if this step is required for the version you are upgrading to, refer to the Release Notes.

+

Download the latest code repository

+

Download the latest code repository as follows:

+
    +
  1. +

    Create a working directory to setup the source code.

    +
    $ mkdir <workdir>
    +

    For example:

    +
    $ mkdir /scratch/OAMK8Slatest
    +
  2. +
  3. +

    Download the latest OAM deployment scripts from the OAM repository.

    +
    $ cd <workdir>
    +$ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1
    +

    For example:

    +
    $ cd /scratch/OAMK8Slatest
    +$ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1
    +
  4. +
  5. +

    Set the $WORKDIR environment variable as follows:

    +
    $ export WORKDIR=<workdir>/fmw-kubernetes/OracleAccessManagement
    +

    For example:

    +
    $ export WORKDIR=/scratch/OAMK8Slatest/fmw-kubernetes/OracleAccessManagement
    +
  6. +
+

Upgrading the ingress

+

To upgrade the existing ingress rules, follow the steps below:

+
    +
  1. +

    List the existing ingress:

    +
    $ helm list -n oamns
    +

    The output will look similar to the following:

    +
    NAME            NAMESPACE       REVISION        UPDATED     STATUS          CHART                       APP VERSION
    +nginx-ingress   oamns           1               <DATE>      deployed        ingress-nginx-4.3.0         1.4.0
    +oam-nginx       oamns           1               <DATE>      deployed        ingress-per-domain-0.1.0    1.0
    +
  2. +
  3. +

    Edit the $WORKDIR/kubernetes/charts/ingress-per-domain/values.yaml and change the domainUID parameter to match your domainUID, for example domainUID: accessdomain. For example:

    +
    # Load balancer type. Supported values are: NGINX
    +type: NGINX
    +
    +# SSL configuration Type. Supported Values are : NONSSL,SSL
    +sslType: SSL
    +
    +# domainType. Supported values are: oam
    +domainType: oam
    +
    +#WLS domain as backend to the load balancer
    +wlsDomain:
    +  domainUID: accessdomain
    +  adminServerName: AdminServer
    +  adminServerPort: 7001
    +  adminServerSSLPort:
    +  oamClusterName: oam_cluster
    +  oamManagedServerPort: 14100
    +  oamManagedServerSSLPort:
    +  policyClusterName: policy_cluster
    +  policyManagedServerPort: 15100
    +  policyManagedServerSSLPort:
    +
    +
    +# Host  specific values
    +hostName:
    +  enabled: false
    +  admin:
    +  runtime:
    +
  4. +
  5. +

    Upgrade the oam-nginx with the following command:

    +
    $ helm upgrade oam-nginx kubernetes/charts/ingress-per-domain/ --namespace oamns --values kubernetes/charts/ingress-per-domain/values.yaml --reuse-values
    +

    The output will look similar to the following:

    +
    Release "oam-nginx" has been upgraded. Happy Helming!
    +NAME: oam-nginx
    +LAST DEPLOYED: <DATE>
    +NAMESPACE: oamns
    +STATUS: deployed
    +REVISION: 2
    +TEST SUITE: None
    +
  6. +
  7. +

    List the ingress:

    +
    $ kubectl get ing -n oamns
    +

    The output will look similar to the following:

    +
    NAME                 CLASS    HOSTS   ADDRESS        PORTS   AGE
    +accessdomain-nginx   <none>   *       10.99.189.61   80      18s
    +
  8. +
  9. +

    Describe the ingress and make sure all the listed paths are accessible:

    +
    $ kubectl describe ing accessdomain-nginx -n oamns
    +

    The output will look similar to the following:

    +
    Name:             accessdomain-nginx
    +Labels:           app.kubernetes.io/managed-by=Helm
    +Namespace:        oamns
    +Address:          10.99.189.61
    +Default backend:  default-http-backend:80 (<error: endpoints "default-http-backend" not found>)
    +Rules:
    +  Host        Path  Backends
    +  ----        ----  --------
    +  *
    +              /console                        accessdomain-adminserver:7001 (10.244.1.224:7001)
    +              /consolehelp                    accessdomain-adminserver:7001 (10.244.1.224:7001)
    +              /rreg/rreg                      accessdomain-adminserver:7001 (10.244.1.224:7001)
    +              /em                             accessdomain-adminserver:7001 (10.244.1.224:7001)
    +              /oamconsole                     accessdomain-adminserver:7001 (10.244.1.224:7001)
    +              /dms                            accessdomain-adminserver:7001 (10.244.1.224:7001)
    +              /oam/services/rest              accessdomain-adminserver:7001 (10.244.1.224:7001)
    +              /iam/admin/config               accessdomain-adminserver:7001 (10.244.1.224:7001)
    +              /iam/admin/diag                 accessdomain-adminserver:7001 (10.244.1.224:7001)
    +              /iam/access                     accessdomain-cluster-oam-cluster:14100 (10.244.1.225:14100)
    +              /oam/admin/api                  accessdomain-adminserver:7001 (10.244.1.224:7001)
    +              /oam/services/rest/access/api   accessdomain-cluster-oam-cluster:14100 (10.244.1.225:14100)
    +              /access                         accessdomain-cluster-policy-cluster:15100 (10.244.1.226:15100)
    +              /                               accessdomain-cluster-oam-cluster:14100 (10.244.1.225:14100)
    +Annotations:  kubernetes.io/ingress.class: nginx
    +              meta.helm.sh/release-name: oam-nginx
    +              meta.helm.sh/release-namespace: oamns
    +              nginx.ingress.kubernetes.io/configuration-snippet:
    +                more_clear_input_headers "WL-Proxy-Client-IP" "WL-Proxy-SSL";
    +                more_set_input_headers "X-Forwarded-Proto: https";
    +                more_set_input_headers "WL-Proxy-SSL: true";
    +              nginx.ingress.kubernetes.io/enable-access-log: false
    +              nginx.ingress.kubernetes.io/ingress.allow-http: false
    +              nginx.ingress.kubernetes.io/proxy-buffer-size: 2000k
    +Events:
    +  Type    Reason  Age                From                      Message
    +  ----    ------  ----               ----                      -------
    +  Normal  Sync    55s (x2 over 63s)  nginx-ingress-controller  Scheduled for sync
    +
  10. +
+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oam/patch-and-upgrade/upgrade-an-operator-release/index.html b/docs/23.4.1/idm-products/oam/patch-and-upgrade/upgrade-an-operator-release/index.html new file mode 100644 index 000000000..fbb0abe35 --- /dev/null +++ b/docs/23.4.1/idm-products/oam/patch-and-upgrade/upgrade-an-operator-release/index.html @@ -0,0 +1,4002 @@ + + + + + + + + + + + + a. Upgrade an operator release :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + a. Upgrade an operator release +

+ + + + + + +

These instructions apply to upgrading operators from 3.X.X to 4.X, or from within the 4.X release family as additional versions are released.

+
    +
  1. +

    On the master node, download the new WebLogic Kubernetes Operator source code from the operator github project:

    +
    $ mkdir <workdir>/weblogic-kubernetes-operator-4.X.X
    +$ cd <workdir>/weblogic-kubernetes-operator-4.X.X
    +$ git clone https://github.com/oracle/weblogic-kubernetes-operator.git --branch v4.X.X 
    +

    For example:

    +
    $ mkdir /scratch/OAMK8S/weblogic-kubernetes-operator-4.X.X
    +$ cd /scratch/OAMK8S/weblogic-kubernetes-operator-4.X.X
    +$ git clone https://github.com/oracle/weblogic-kubernetes-operator.git --branch v4.X.X 
    +

    This will create the directory <workdir>/weblogic-kubernetes-operator-4.X.X/weblogic-kubernetes-operator

    +
  2. +
  3. +

    Run the following helm command to upgrade the operator:

    +
    $ cd <workdir>/weblogic-kubernetes-operator-4.X.X/weblogic-kubernetes-operator
    +$ helm upgrade --reuse-values --set image=ghcr.io/oracle/weblogic-kubernetes-operator:4.X.X --namespace <sample-kubernetes-operator-ns> --wait weblogic-kubernetes-operator kubernetes/charts/weblogic-operator
    +

    For example:

    +
    $ cd /scratch/OAMK8S/weblogic-kubernetes-operator-4.X.X/weblogic-kubernetes-operator
    +$ helm upgrade --reuse-values --set image=ghcr.io/oracle/weblogic-kubernetes-operator:4.X.X --namespace opns --wait weblogic-kubernetes-operator kubernetes/charts/weblogic-operator
    +

    The output will look similar to the following:

    +
    Release "weblogic-kubernetes-operator" has been upgraded. Happy Helming!
    +NAME: weblogic-kubernetes-operator
    +LAST DEPLOYED: <DATE>
    +NAMESPACE: opns
    +STATUS: deployed
    +REVISION: 2
    +TEST SUITE: None
    +
  4. +
  5. +

    Verify that the operator’s pod and services are running by executing the following command:

    +
    $ kubectl get all -n <sample-kubernetes-operator-ns>
    +

    For example:

    +
    $ kubectl get all -n opns
    +

    The output will look similar to the following:

    +
    NAME                                             READY   STATUS    RESTARTS   AGE
    +pod/weblogic-operator-b7d6df78c-mfrc4            1/1     Running   0          40s
    +pod/weblogic-operator-webhook-7996b8b58b-frtwp   1/1     Running   0          42s
    +
    +NAME                                     TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)             AGE
    +service/weblogic-operator-webhook-svc    ClusterIP   10.106.51.57   <none>        8083/TCP,8084/TCP   42s
    +
    +NAME                                        READY   UP-TO-DATE   AVAILABLE   AGE
    +deployment.apps/weblogic-operator           1/1     1            1           6d
    +deployment.apps/weblogic-operator-webhook   1/1     1            1           42s
    +
    +NAME                                                   DESIRED   CURRENT   READY   AGE
    +replicaset.apps/weblogic-operator-5884685f4f           0         0         0       6d
    +replicaset.apps/weblogic-operator-b7d6df78c            1         1         1       40s
    +replicaset.apps/weblogic-operator-webhook-7996b8b58b   1         1         1       42s
    +

    Note: When you upgrade a 3.x WebLogic Kubernetes Operator to 4.x, the upgrade process creates a WebLogic Domain resource conversion webhook deployment, and associated resources in the same namespace. The webhook automatically and transparently upgrades the existing WebLogic Domains from the 3.x schema to the 4.x schema. For more information, see Domain Upgrade in the WebLogic Kubernetes Operator documentation.

    +

    Note: In WebLogic Kubernetes Operator 4.X, changes are made to serverStartPolicy that affect starting/stopping of the domain. Refer to the serverStartPolicy entry in the create-domain-inputs.yaml for more information. Also see Domain Life Cycle.

    +
  6. +
+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oam/patch-and-upgrade/upgrade-elk/index.html b/docs/23.4.1/idm-products/oam/patch-and-upgrade/upgrade-elk/index.html new file mode 100644 index 000000000..00733b1bf --- /dev/null +++ b/docs/23.4.1/idm-products/oam/patch-and-upgrade/upgrade-elk/index.html @@ -0,0 +1,3982 @@ + + + + + + + + + + + + d. Upgrade Elasticsearch and Kibana :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + d. Upgrade Elasticsearch and Kibana +

+ + + + + + +

This section shows how to upgrade Elasticsearch and Kibana.

+

To determine if this step is required for the version you are upgrading to, refer to the Release Notes.

+

Undeploy Elasticsearch and Kibana

+

From October 22 (22.4.1) onwards, OAM logs should be stored on a centralized Elasticsearch and Kibana stack.

+

Deployments prior to October 22 (22.4.1) used local deployments of Elasticsearch and Kibana.

+

If you are upgrading from July 22 (22.3.1) or earlier, to October 22 (22.4.1) or later, you must first undeploy Elasticsearch and Kibana using the steps below:

+
    +
  1. +

    Make sure you have downloaded the latest code repository as per Download the latest code repository

    +
  2. +
  3. +

    Edit the $WORKDIR/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml and change all instances of namespace to correspond to your deployment.

    +
  4. +
  5. +

    Delete the Elasticsearch and Kibana resources using the following command:

    +
    $ kubectl delete -f $WORKDIR/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml
    +
  6. +
+

Deploy Elasticsearch and Kibana in centralized stack

+
    +
  1. Follow Install Elasticsearch stack and Kibana to deploy Elasticsearch and Kibana in a centralized stack.
  2. +
+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oam/post-install-config/index.html b/docs/23.4.1/idm-products/oam/post-install-config/index.html new file mode 100644 index 000000000..bdab23c17 --- /dev/null +++ b/docs/23.4.1/idm-products/oam/post-install-config/index.html @@ -0,0 +1,4145 @@ + + + + + + + + + + + + Post Install Configuration :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+ +
+ +
+ +
+ +

+ + Post Install Configuration +

+ + + + + + + +

Follow these post install configuration steps.

+
    +
  1. Create a Server Overrides File
  2. +
  3. Removing OAM Server from WebLogic Server 12c Default Coherence Cluster
  4. +
  5. WebLogic Server Tuning
  6. +
  7. Enable Virtualization
  8. +
  9. Restart the domain
  10. +
+

Create a Server Overrides File

+
    +
  1. +

    Navigate to the following directory:

    +
    $ cd $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv/output/weblogic-domains/accessdomain
    +
  2. +
  3. +

    Create a setUserOverrides.sh with the following contents:

    +
    DERBY_FLAG=false
    +JAVA_OPTIONS="${JAVA_OPTIONS} -Djava.net.preferIPv4Stack=true"
    +MEM_ARGS="-Xms8192m -Xmx8192m"
    +
  4. +
  5. +

    Copy the setUserOverrides.sh file to the Administration Server pod:

    +
    $ chmod 755 setUserOverrides.sh
    +$ kubectl cp setUserOverrides.sh oamns/accessdomain-adminserver:/u01/oracle/user_projects/domains/accessdomain/bin/setUserOverrides.sh
    +

    Where oamns is the OAM namespace and accessdomain is the DOMAIN_NAME/UID.

    +
  6. +
  7. +

    Stop the OAM domain using the following command:

    +
    $ kubectl -n <domain_namespace> patch domains <domain_uid> --type='json' -p='[{"op": "replace", "path": "/spec/serverStartPolicy", "value": "Never" }]'
    +

    For example:

    +
    $ kubectl -n oamns patch domains accessdomain --type='json' -p='[{"op": "replace", "path": "/spec/serverStartPolicy", "value": "Never" }]'
    +

    The output will look similar to the following:

    +
    domain.weblogic.oracle/accessdomain patched
    +
  8. +
  9. +

    Check that all the pods are stopped:

    +
    $ kubectl get pods -n <domain_namespace>
    +

    For example:

    +
    $ kubectl get pods -n oamns
    +

    The output will look similar to the following:

    +
    NAME                                                     READY   STATUS        RESTARTS   AGE
    +accessdomain-adminserver                                 1/1     Terminating   0          27m
    +accessdomain-create-oam-infra-domain-job-7c9r9           0/1     Completed     0          4h29m
    +accessdomain-oam-policy-mgr1                             1/1     Terminating   0          24m
    +accessdomain-oam-server1                                 1/1     Terminating   0          24m
    +helper                                                   1/1     Running       0          4h44m
    +nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq   1/1     Running       0          108m
    +

    The Administration Server pod and Managed Server pods will move to a STATUS of Terminating. After a few minutes, run the command again and the pods should have disappeared:

    +
    NAME                                                     READY   STATUS      RESTARTS   AGE
    +accessdomain-create-oam-infra-domain-job-7c9r9           0/1     Completed   0          4h30m
    +helper                                                   1/1     Running     0          4h45m
    +nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq   1/1     Running     0          109m
    +
  10. +
  11. +

    Start the domain using the following command:

    +
    $ kubectl -n <domain_namespace> patch domains <domain_uid> --type='json' -p='[{"op": "replace", "path": "/spec/serverStartPolicy", "value": "IfNeeded" }]'
    +

    For example:

    +
    $ kubectl -n oamns patch domains accessdomain --type='json' -p='[{"op": "replace", "path": "/spec/serverStartPolicy", "value": "IfNeeded" }]'
    +

    Run the following kubectl command to view the pods:

    +
    $ kubectl get pods -n <domain_namespace>
    +

    For example:

    +
    $ kubectl get pods -n oamns
    +

    The output will look similar to the following:

    +
    NAME                                                     READY   STATUS      RESTARTS   AGE
    +accessdomain-create-oam-infra-domain-job-7c9r9           0/1     Completed   0          4h30m
    +accessdomain-introspector-mckp2                          1/1     Running     0          8s
    +helper                                                   1/1     Running     0          4h46m
    +nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq   1/1     Running     0          110m
    +

    The Administration Server pod will start followed by the OAM Managed Servers pods. This process will take several minutes, so keep executing the command until all the pods are running with READY status 1/1:

    +
    NAME                                                     READY   STATUS      RESTARTS   AGE  
    +accessdomain-adminserver                                 1/1     Running     0          5m38s
    +accessdomain-create-oam-infra-domain-job-7c9r9           0/1     Completed   0          4h37m
    +accessdomain-oam-policy-mgr1                             1/1     Running     0          2m51s
    +accessdomain-oam-server1                                 1/1     Running     0          2m50s
    +helper                                                   1/1     Running     0          4h52m
    +nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq   1/1     Running     0          116m
    +
  12. +
+

Removing OAM Server from WebLogic Server 12c Default Coherence Cluster

+

Exclude all Oracle Access Management (OAM) clusters (including Policy Manager and OAM runtime server) from the default WebLogic Server 12c coherence cluster by using the WebLogic Server Administration Console.

+

From 12.2.1.3.0 onwards, OAM server-side session management uses the database and does not require coherence cluster to be established. In some environments, warnings and errors are observed due to default coherence cluster initialized by WebLogic. To avoid or fix these errors, exclude all of the OAM clusters from default WebLogic Server coherence cluster using the following steps:

+
    +
  1. Login to the WebLogic Server Console at https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console.
  2. +
  3. Click Lock & Edit.
  4. +
  5. In Domain Structure, expand Environment and select Coherence Clusters.
  6. +
  7. Click defaultCoherenceCluster and select the Members tab.
  8. +
  9. From Servers and Clusters, deselect all OAM clusters (oam_cluster and policy_cluster).
  10. +
  11. Click Save.
  12. +
  13. Click Activate changes.
  14. +
+

WebLogic Server Tuning

+

For production environments, the following WebLogic Server tuning parameters must be set:

+

Add Minimum Thread constraint to worker manager “OAPOverRestWM”

+
    +
  1. Login to the WebLogic Server Console at https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console.
  2. +
  3. Click Lock & Edit.
  4. +
  5. In Domain Structure, click Deployments.
  6. +
  7. On the Deployments page click Next until you see oam_server.
  8. +
  9. Expand oam_server by clicking on the + icon, then click /iam/access/binding.
  10. +
  11. Click the Configuration tab, followed by the Workload tab.
  12. +
  13. Click wm/OAPOverRestWM
  14. +
  15. Under Application Scoped Work Managed Components, click New.
  16. +
  17. In Create a New Work Manager Component, select Minumum Threads Constraint and click Next.
  18. +
  19. In Minimum Threads Constraint Properties enter the Count as 400 and click Finish.
  20. +
  21. In the Save Deployment Plan change the Path to the value /u01/oracle/user_projects/domains/accessdomain/Plan.xml, where accessdomain is your domain_UID.
  22. +
  23. Click OK and then Activate Changes.
  24. +
+

Remove Max Thread Constraint and Capacity Constraint

+
    +
  1. Repeat steps 1-7 above.
  2. +
  3. Under Application Scoped Work Managed Components select the check box for Capacity and MaxThreadsCount. Click Delete.
  4. +
  5. In the Delete Work Manage Components screen, click OK to delete.
  6. +
  7. Click on Release Configuration and then Log Out.
  8. +
+

oamDS DataSource Tuning

+
    +
  1. Login to the WebLogic Server Console at https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console.
  2. +
  3. Click Lock & Edit.
  4. +
  5. In Domain Structure, Expand Services and click Data Sources.
  6. +
  7. Click on oamDS.
  8. +
  9. In Settings for oamDS, select the Configuration tab, and then the Connection Pool tab.
  10. +
  11. Change Initial Capacity, Maximum Capacity, and Minimum Capacity to 800 and click Save.
  12. +
  13. Click Activate Changes.
  14. +
+

Enable Virtualization

+
    +
  1. Log in to Oracle Enterprise Manager Fusion Middleware Control at https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/em
  2. +
  3. Click WebLogic Domain > Security > Security Provider Configuration.
  4. +
  5. Expand Security Store Provider.
  6. +
  7. Expand Identity Store Provider.
  8. +
  9. Click Configure.
  10. +
  11. Add a custom property.
  12. +
  13. Select virtualize property with value true and click OK.
  14. +
  15. Click OK again to persist the change.
  16. +
+

Restart the domain

+

For the above changes to take effect, you must restart the OAM domain:

+
    +
  1. +

    Stop the OAM domain using the following command:

    +
    $ kubectl -n <domain_namespace> patch domains <domain_uid> --type='json' -p='[{"op": "replace", "path": "/spec/serverStartPolicy", "value": "Never" }]'
    +

    For example:

    +
    $ kubectl -n oamns patch domains accessdomain --type='json' -p='[{"op": "replace", "path": "/spec/serverStartPolicy", "value": "Never" }]'
    +

    The output will look similar to the following:

    +
    domain.weblogic.oracle/accessdomain patched
    +
  2. +
  3. +

    Check that all the pods are stopped:

    +
    $ kubectl get pods -n <domain_namespace>
    +

    For example:

    +
    $ kubectl get pods -n oamns
    +

    The output will look similar to the following:

    +
    NAME                                                     READY   STATUS        RESTARTS   AGE
    +accessdomain-adminserver                                 1/1     Terminating   0          27m
    +accessdomain-create-oam-infra-domain-job-7c9r9           0/1     Completed     0          4h29m
    +accessdomain-oam-policy-mgr1                             1/1     Terminating   0          24m
    +accessdomain-oam-server1                                 1/1     Terminating   0          24m
    +helper                                                   1/1     Running       0          4h44m
    +nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq   1/1     Running       0          108m
    +

    The Administration Server pod and Managed Server pods will move to a STATUS of Terminating. After a few minutes, run the command again and the pods should have disappeared:

    +
    NAME                                                     READY   STATUS      RESTARTS   AGE
    +accessdomain-create-oam-infra-domain-job-7c9r9           0/1     Completed   0          4h30m
    +helper                                                   1/1     Running     0          4h45m
    +nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq   1/1     Running     0          109m
    +
  4. +
  5. +

    Start the domain using the following command:

    +
    $ kubectl -n <domain_namespace> patch domains <domain_uid> --type='json' -p='[{"op": "replace", "path": "/spec/serverStartPolicy", "value": "IfNeeded" }]'
    +

    For example:

    +
    $ kubectl -n oamns patch domains accessdomain --type='json' -p='[{"op": "replace", "path": "/spec/serverStartPolicy", "value": "IfNeeded" }]'
    +

    Run the following kubectl command to view the pods:

    +
    $ kubectl get pods -n <domain_namespace>
    +

    For example:

    +
    $ kubectl get pods -n oamns
    +

    The output will look similar to the following:

    +
    NAME                                                     READY   STATUS      RESTARTS   AGE
    +accessdomain-create-oam-infra-domain-job-7c9r9           0/1     Completed   0          4h30m
    +accessdomain-introspector-mckp2                          1/1     Running     0          8s
    +helper                                                   1/1     Running     0          4h46m
    +nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq   1/1     Running     0          110m
    +

    The Administration Server pod will start followed by the OAM Managed Servers pods. This process will take several minutes, so keep executing the command until all the pods are running with READY status 1/1:

    +
    NAME                                                     READY   STATUS      RESTARTS   AGE  
    +accessdomain-adminserver                                 1/1     Running     0          5m38s
    +accessdomain-create-oam-infra-domain-job-7c9r9           0/1     Completed   0          4h37m
    +accessdomain-oam-policy-mgr1                             1/1     Running     0          2m51s
    +accessdomain-oam-server1                                 1/1     Running     0          2m50s
    +helper                                                   1/1     Running     0          4h52m
    +nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq   1/1     Running     0          116m
    +
  6. +
+ + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oam/post-install-config/index.xml b/docs/23.4.1/idm-products/oam/post-install-config/index.xml new file mode 100644 index 000000000..7f2972dbb --- /dev/null +++ b/docs/23.4.1/idm-products/oam/post-install-config/index.xml @@ -0,0 +1,14 @@ + + + + Post Install Configuration on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oam/post-install-config/ + Recent content in Post Install Configuration on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oam/prepare-your-environment/index.html b/docs/23.4.1/idm-products/oam/prepare-your-environment/index.html new file mode 100644 index 000000000..d08497093 --- /dev/null +++ b/docs/23.4.1/idm-products/oam/prepare-your-environment/index.html @@ -0,0 +1,4636 @@ + + + + + + + + + + + + Prepare your environment :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+ +
+ +
+ +
+ +

+ + Prepare your environment +

+ + + + + + + +

To prepare for Oracle Access Management deployment in a Kubernetes environment, complete the following steps:

+
    +
  1. +

    Check the Kubernetes cluster is ready

    +
  2. +
  3. +

    Obtain the OAM container image

    +
  4. +
  5. +

    Set up the code repository to deploy OAM domains

    +
  6. +
  7. +

    Install the WebLogic Kubernetes Operator

    +
  8. +
  9. +

    Create a namespace for Oracle Access Management

    +
  10. +
  11. +

    Create a Kubernetes secret for the container registry

    +
  12. +
  13. +

    RCU schema creation

    +
  14. +
  15. +

    Preparing the environment for domain creation

    +

    a. Creating Kubernetes secrets for the domain and RCU

    +

    b. Create a Kubernetes persistent volume and persistent volume claim

    +
  16. +
+

Check the Kubernetes cluster is ready

+

As per the Prerequisites a Kubernetes cluster should have already been configured.

+

Check that all the nodes in the Kubernetes cluster are running.

+
    +
  1. +

    Run the following command on the master node to check the cluster and worker nodes are running:

    +
    $ kubectl get nodes,pods -n kube-system
    +

    The output will look similar to the following:

    +
    NAME                  STATUS   ROLES                  AGE   VERSION
    +node/worker-node1     Ready    <none>                 17h   v1.26.6+1.el8
    +node/worker-node2     Ready    <none>                 17h   v1.26.6+1.el8
    +node/master-node      Ready    control-plane,master   23h   v1.26.6+1.el8
    +
    +NAME                                     READY   STATUS    RESTARTS   AGE
    +pod/coredns-66bff467f8-fnhbq             1/1     Running   0          23h
    +pod/coredns-66bff467f8-xtc8k             1/1     Running   0          23h
    +pod/etcd-master                          1/1     Running   0          21h
    +pod/kube-apiserver-master-node           1/1     Running   0          21h
    +pod/kube-controller-manager-master-node  1/1     Running   0          21h
    +pod/kube-flannel-ds-amd64-lxsfw          1/1     Running   0          17h
    +pod/kube-flannel-ds-amd64-pqrqr          1/1     Running   0          17h
    +pod/kube-flannel-ds-amd64-wj5nh          1/1     Running   0          17h
    +pod/kube-proxy-2kxv2                     1/1     Running   0          17h
    +pod/kube-proxy-82vvj                     1/1     Running   0          17h
    +pod/kube-proxy-nrgw9                     1/1     Running   0          23h
    +pod/kube-scheduler-master                1/1     Running   0          21h
    +
  2. +
+

Obtain the OAM container image

+

The OAM Kubernetes deployment requires access to an OAM container image. The image can be obtained in the following ways:

+
    +
  • Prebuilt OAM container image
  • +
  • Build your own OAM container image using WebLogic Image Tool
  • +
+

Prebuilt OAM container image

+

The prebuilt OAM October 2023 container image can be downloaded from Oracle Container Registry. This image is prebuilt by Oracle and includes Oracle Access Management 12.2.1.4.0, the October Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program.

+

Note: Before using this image you must login to Oracle Container Registry, navigate to Middleware > oam_cpu and accept the license agreement.

+

You can use this image in the following ways:

+
    +
  • Pull the container image from the Oracle Container Registry automatically during the OAM Kubernetes deployment.
  • +
  • Manually pull the container image from the Oracle Container Registry and then upload it to your own container registry.
  • +
  • Manually pull the container image from the Oracle Container Registry and manually stage it on the master node and each worker node.
  • +
+

Build your own OAM container image using WebLogic Image Tool

+

You can build your own OAM container image using the WebLogic Image Tool. This is recommended if you need to apply one off patches to a Prebuilt OAM container image. For more information about building your own container image with WebLogic Image Tool, see Create or update image.

+

You can use an image built with WebLogic Image Tool in the following ways:

+
    +
  • Manually upload them to your own container registry.
  • +
  • Manually stage them on the master node and each worker node.
  • +
+

Note: This documentation does not tell you how to pull or push the above images into a private container registry, or stage them on the master and worker nodes. Details of this can be found in the Enterprise Deployment Guide.

+

Set up the code repository to deploy OAM domains

+

OAM domain deployment on Kubernetes leverages the WebLogic Kubernetes Operator infrastructure. For deploying the OAM domains, you need to set up the deployment scripts on the master node as below:

+
    +
  1. +

    Create a working directory to setup the source code.

    +
    $ mkdir <workdir>
    +

    For example:

    +
    $ mkdir /scratch/OAMK8S
    +
  2. +
  3. +

    Download the latest OAM deployment scripts from the OAM repository.

    +
    $ cd <workdir>
    +$ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1
    +

    For example:

    +
    $ cd /scratch/OAMK8S
    +$ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1
    +
  4. +
  5. +

    Set the $WORKDIR environment variable as follows:

    +
    $ export WORKDIR=<workdir>/fmw-kubernetes/OracleAccessManagement
    +

    For example:

    +
    $ export WORKDIR=/scratch/OAMK8S/fmw-kubernetes/OracleAccessManagement
    +
  6. +
  7. +

    Run the following command and see if the WebLogic custom resource definition name already exists:

    +
    $ kubectl get crd
    +

    In the output you should see:

    +
    No resources found
    +

    If you see any of the following:

    +
    NAME                     AGE
    +clusters.weblogic.oracle 5d
    +domains.weblogic.oracle  5d
    +

    then run the following command to delete the existing crd’s:

    +
    $ kubectl delete crd clusters.weblogic.oracle
    +$ kubectl delete crd domains.weblogic.oracle
    +
  8. +
+

Install the WebLogic Kubernetes Operator

+
    +
  1. +

    On the master node run the following command to create a namespace for the operator:

    +
    $ kubectl create namespace <sample-kubernetes-operator-ns>
    +

    For example:

    +
    $ kubectl create namespace opns
    +

    The output will look similar to the following:

    +
    namespace/opns created
    +
  2. +
  3. +

    Create a service account for the operator in the operator’s namespace by running the following command:

    +
    $ kubectl create serviceaccount -n <sample-kubernetes-operator-ns> <sample-kubernetes-operator-sa>
    +

    For example:

    +
    $ kubectl create serviceaccount -n opns op-sa
    +

    The output will look similar to the following:

    +
    serviceaccount/op-sa created
    +
  4. +
  5. +

    Run the following helm command to install and start the operator:

    +
    $ cd $WORKDIR
    +$ helm install weblogic-kubernetes-operator kubernetes/charts/weblogic-operator \
    +--namespace <sample-kubernetes-operator-ns> \
    +--set image=ghcr.io/oracle/weblogic-kubernetes-operator:4.1.2 \
    +--set serviceAccount=<sample-kubernetes-operator-sa> \
    +--set “enableClusterRoleBinding=true” \
    +--set "domainNamespaceSelectionStrategy=LabelSelector" \
    +--set "domainNamespaceLabelSelector=weblogic-operator\=enabled" \
    +--set "javaLoggingLevel=FINE" --wait
    +

    For example:

    +
    $ cd $WORKDIR
    +$ helm install weblogic-kubernetes-operator kubernetes/charts/weblogic-operator \
    +--namespace opns \
    +--set image=ghcr.io/oracle/weblogic-kubernetes-operator:4.1.2 \
    +--set serviceAccount=op-sa \
    +--set "enableClusterRoleBinding=true" \
    +--set "domainNamespaceSelectionStrategy=LabelSelector" \
    +--set "domainNamespaceLabelSelector=weblogic-operator\=enabled" \
    +--set "javaLoggingLevel=FINE" --wait
    +

    The output will look similar to the following:

    +
    NAME: weblogic-kubernetes-operator
    +LAST DEPLOYED: <DATE>
    +NAMESPACE: opns
    +STATUS: deployed
    +REVISION: 1
    +TEST SUITE: None
    +
  6. +
  7. +

    Verify that the operator’s pod and services are running by executing the following command:

    +
    $ kubectl get all -n <sample-kubernetes-operator-ns>
    +

    For example:

    +
    $ kubectl get all -n opns
    +

    The output will look similar to the following:

    +
    NAME                                             READY   STATUS    RESTARTS   AGE
    +pod/weblogic-operator-676d5cc6f4-wct7b           1/1     Running   0          40s
    +pod/weblogic-operator-webhook-7996b8b58b-9sfhd   1/1     Running   0          40s
    +
    +NAME                                     TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)             AGE
    +service/weblogic-operator-webhook-svc    ClusterIP   10.100.91.237  <none>        8083/TCP,8084/TCP   47s
    +
    +NAME                                        READY   UP-TO-DATE   AVAILABLE   AGE
    +deployment.apps/weblogic-operator           1/1     1            1           40s
    +deployment.apps/weblogic-operator-webhook   1/1     1            1           40s
    +
    +NAME                                                   DESIRED   CURRENT   READY   AGE
    +replicaset.apps/weblogic-operator-676d5cc6f4           1         1         1       40s
    +replicaset.apps/weblogic-operator-webhook-7996b8b58b   1         1         1       46s
    +
  8. +
  9. +

    Verify the operator pod’s log:

    +
    $ kubectl logs -n <sample-kubernetes-operator-ns> -c weblogic-operator deployments/weblogic-operator
    +

    For example:

    +
    $ kubectl logs -n opns -c weblogic-operator deployments/weblogic-operator
    +

    The output will look similar to the following:

    +
    ...
    +{"timestamp":"<DATE>","thread":21,"fiber":"","namespace":"","domainUID":"","level":"FINE","class":"oracle.kubernetes.operator.DeploymentLiveness","method":"run","timeInMillis":1678183291191,"message":"Liveness file last modified time set","exception":"","code":"","headers":{},"body":""}
    +{"timestamp":"<DATE>","thread":37,"fiber":"","namespace":"","domainUID":"","level":"FINE","class":"oracle.kubernetes.operator.DeploymentLiveness","method":"run","timeInMillis":1678183296193,"message":"Liveness file last modified time set","exception":"","code":"","headers":{},"body":""}
    +{"timestamp":"<DATE>","thread":31,"fiber":"","namespace":"","domainUID":"","level":"FINE","class":"oracle.kubernetes.operator.DeploymentLiveness","method":"run","timeInMillis":1678183301194,"message":"Liveness file last modified time set","exception":"","code":"","headers":{},"body":""}
    +{"timestamp":"<DATE>","thread":31,"fiber":"","namespace":"","domainUID":"","level":"FINE","class":"oracle.kubernetes.operator.DeploymentLiveness","method":"run","timeInMillis":1678183306195,"message":"Liveness file last modified time set","exception":"","code":"","headers":{},"body":""}
    +
  10. +
+

Create a namespace for Oracle Access Management

+
    +
  1. +

    Run the following command to create a namespace for the domain:

    +
    $ kubectl create namespace <domain_namespace>
    +

    For example:

    +
    $ kubectl create namespace oamns
    +

    The output will look similar to the following:

    +
    namespace/oamns created
    +
  2. +
  3. +

    Run the following command to tag the namespace so the WebLogic Kubernetes Operator can manage it:

    +
    $ kubectl label namespaces <domain_namespace> weblogic-operator=enabled
    +

    For example:

    +
    $ kubectl label namespaces oamns weblogic-operator=enabled
    +

    The output will look similar to the following:

    +
    namespace/oamns labeled
    +
  4. +
  5. +

    Run the following command to check the label was created:

    +
    $ kubectl describe namespace <domain_namespace>
    +

    For example:

    +
    $ kubectl describe namespace oamns
    +

    The output will look similar to the following:

    +
    Name:         oamns
    +Labels:       kubernetes.io/metadata.name=oamns
    +              weblogic-operator=enabled
    +Annotations:  <none>
    +Status:       Active
    +
    +No resource quota.
    +
    +No LimitRange resource.
    +
  6. +
+

Create a Kubernetes secret for the container registry

+

In this section you create a secret that stores the credentials for the container registry where the OAM image is stored.

+

If you are not using a container registry and have loaded the images on each of the master and worker nodes, then there is no need to create the registry secret.

+
    +
  1. +

    Run the following command to create the secret:

    +
    kubectl create secret docker-registry "orclcred" --docker-server=<CONTAINER_REGISTRY> \
    +--docker-username="<USER_NAME>" \
    +--docker-password=<PASSWORD> --docker-email=<EMAIL_ID> \
    +--namespace=<domain_namespace>
    +

    For example, if using Oracle Container Registry:

    +
    kubectl create secret docker-registry "orclcred" --docker-server=container-registry.oracle.com \
    +--docker-username="user@example.com" \
    +--docker-password=password --docker-email=user@example.com \
    +--namespace=oamns
    +

    Replace <USER_NAME> and <PASSWORD> with the credentials for the registry with the following caveats:

    +
      +
    • +

      If using Oracle Container Registry to pull the OAM container image, this is the username and password used to login to Oracle Container Registry. Before you can use this image you must login to Oracle Container Registry, navigate to Middleware > oam_cpu and accept the license agreement.

      +
    • +
    • +

      If using your own container registry to store the OAM container image, this is the username and password (or token) for your container registry.

      +
    • +
    +

    The output will look similar to the following:

    +
    secret/orclcred created
    +
  2. +
+

RCU schema creation

+

In this section you create the RCU schemas in the Oracle Database.

+

Before following the steps in this section, make sure that the database and listener are up and running and you can connect to the database via SQL*Plus or other client tool.

+
    +
  1. +

    If using Oracle Container Registry or your own container registry for your OAM container image, run the following command to create a helper pod to run RCU:

    +
    $ kubectl run --image=<image_name-from-registry>:<tag> --image-pull-policy="IfNotPresent" --overrides='{"apiVersion": "v1", "spec":{"imagePullSecrets": [{"name": "orclcred"}]}}' helper -n <domain_namespace> -- sleep infinity
    +

    For example:

    +
    $ kubectl run --image=container-registry.oracle.com/middleware/oam_cpu:12.2.1.4-jdk8-ol7-<October`23> --image-pull-policy="IfNotPresent" --overrides='{"apiVersion": "v1","spec":{"imagePullSecrets": [{"name": "orclcred"}]}}' helper -n oamns -- sleep infinity
    +

    If you are not using a container registry and have loaded the image on each of the master and worker nodes, run the following command:

    +
    $ kubectl run helper --image <image>:<tag> -n oamns -- sleep infinity
    +

    For example:

    +
    $ kubectl run helper --image oracle/oam:12.2.1.4-jdk8-ol7-<October`23> -n oamns -- sleep infinity
    +

    The output will look similar to the following:

    +
    pod/helper created
    +
  2. +
  3. +

    Run the following command to check the pod is running:

    +
    $ kubectl get pods -n <domain_namespace>
    +

    For example:

    +
    $ kubectl get pods -n oamns
    +

    The output will look similar to the following:

    +
    NAME     READY   STATUS    RESTARTS   AGE
    +helper   1/1     Running   0          3m
    +

    Note: If you are pulling the image from a container registry it may take several minutes before the pod has a STATUS of 1\1. While the pod is starting you can check the status of the pod, by running the following command:

    +
    $ kubectl describe pod helper -n oamns
    +
  4. +
  5. +

    Run the following command to start a bash shell in the helper pod:

    +
    $ kubectl exec -it helper -n <domain_namespace> -- /bin/bash
    +

    For example:

    +
    $ kubectl exec -it helper -n oamns -- /bin/bash
    +

    This will take you into a bash shell in the running helper pod:

    +
    [oracle@helper ~]$
    +
  6. +
  7. +

    In the helper bash shell run the following commands to set the environment:

    +
    [oracle@helper ~]$ export CONNECTION_STRING=<db_host.domain>:<db_port>/<service_name>
    +[oracle@helper ~]$ export RCUPREFIX=<rcu_schema_prefix>
    +[oracle@helper ~]$ echo -e <db_pwd>"\n"<rcu_schema_pwd> > /tmp/pwd.txt
    +[oracle@helper ~]$ cat /tmp/pwd.txt
    +

    where:

    +

    <db_host.domain>:<db_port>/<service_name> is your database connect string

    +

    <rcu_schema_prefix> is the RCU schema prefix you want to set

    +

    <db_pwd> is the SYS password for the database

    +

    <rcu_schema_pwd> is the password you want to set for the <rcu_schema_prefix>

    +

    For example:

    +
    [oracle@helper ~]$ export CONNECTION_STRING=mydatabasehost.example.com:1521/orcl.example.com
    +[oracle@helper ~]$ export RCUPREFIX=OAMK8S
    +[oracle@helper ~]$ echo -e <password>"\n"<password> > /tmp/pwd.txt
    +[oracle@helper ~]$ cat /tmp/pwd.txt
    +<password>
    +<password>
    +
  8. +
  9. +

    In the helper bash shell run the following command to create the RCU schemas in the database:

    +
    $ [oracle@helper ~]$ /u01/oracle/oracle_common/bin/rcu -silent -createRepository -databaseType ORACLE -connectString \
    +$CONNECTION_STRING -dbUser sys -dbRole sysdba -useSamePasswordForAllSchemaUsers true \
    +-selectDependentsForComponents true -schemaPrefix $RCUPREFIX -component MDS -component IAU \
    +-component IAU_APPEND -component IAU_VIEWER -component OPSS -component WLS -component STB -component OAM -f < /tmp/pwd.txt
    +

    The output will look similar to the following:

    +
    RCU Logfile: /tmp/RCU<DATE>/logs/rcu.log
    +Processing command line ....
    +Repository Creation Utility - Checking Prerequisites
    +Checking Global Prerequisites
    +Repository Creation Utility - Checking Prerequisites
    +Checking Component Prerequisites
    +Repository Creation Utility - Creating Tablespaces
    +Validating and Creating Tablespaces
    +Create tablespaces in the repository database
    +Repository Creation Utility - Create
    +Repository Create in progress.
    +Executing pre create operations
    +Percent Complete: 18
    +Percent Complete: 18
    +Percent Complete: 19
    +Percent Complete: 20
    +Percent Complete: 21
    +Percent Complete: 21
    +Percent Complete: 22
    +Percent Complete: 22
    +Creating Common Infrastructure Services(STB)
    +Percent Complete: 30
    +Percent Complete: 30
    +Percent Complete: 39
    +Percent Complete: 39
    +Percent Complete: 39
    +Creating Audit Services Append(IAU_APPEND)
    +Percent Complete: 46
    +Percent Complete: 46
    +Percent Complete: 55
    +Percent Complete: 55
    +Percent Complete: 55
    +Creating Audit Services Viewer(IAU_VIEWER)
    +Percent Complete: 62
    +Percent Complete: 62
    +Percent Complete: 63
    +Percent Complete: 63
    +Percent Complete: 64
    +Percent Complete: 64
    +Creating Metadata Services(MDS)
    +Percent Complete: 73
    +Percent Complete: 73
    +Percent Complete: 73
    +Percent Complete: 74
    +Percent Complete: 74
    +Percent Complete: 75
    +Percent Complete: 75
    +Percent Complete: 75
    +Creating Weblogic Services(WLS)
    +Percent Complete: 80
    +Percent Complete: 80
    +Percent Complete: 83
    +Percent Complete: 83
    +Percent Complete: 91
    +Percent Complete: 98
    +Percent Complete: 98
    +Creating Audit Services(IAU)
    +Percent Complete: 100
    +Creating Oracle Platform Security Services(OPSS)
    +Creating Oracle Access Manager(OAM)
    +Executing post create operations
    +Repository Creation Utility: Create - Completion Summary
    +Database details:
    +-----------------------------
    +Host Name : mydatabasehost.example.com
    +Port : 1521
    +Service Name : ORCL.EXAMPLE.COM
    +Connected As : sys
    +Prefix for (prefixable) Schema Owners : OAMK8S
    +RCU Logfile                                  : /tmp/RCU<DATE>/logs/rcu.log
    +
    +Component schemas created:
    +-----------------------------
    +Component                                    Status         Logfile
    +
    +Common Infrastructure Services               Success        /tmp/RCU<DATE>/logs/stb.log
    +Oracle Platform Security Services            Success        /tmp/RCU<DATE>/logs/opss.log
    +Oracle Access Manager                        Success        /tmp/RCU<DATE>/logs/oam.log
    +Audit Services                               Success        /tmp/RCU<DATE>/logs/iau.log
    +Audit Services Append                        Success        /tmp/RCU<DATE>/logs/iau_append.log
    +Audit Services Viewer                        Success        /tmp/RCU<DATE>/logs/iau_viewer.log
    +Metadata Services                            Success        /tmp/RCU<DATE>/logs/mds.log
    +WebLogic Services                            Success        /tmp/RCU<DATE>/logs/wls.log
    +
    +Repository Creation Utility - Create : Operation Completed
    +[oracle@helper ~]$
    +
  10. +
  11. +

    Exit the helper bash shell by issuing the command exit.

    +
  12. +
+

Preparing the environment for domain creation

+

In this section you prepare the environment for the OAM domain creation. This involves the following steps:

+

a. Creating Kubernetes secrets for the domain and RCU

+

b. Create a Kubernetes persistent volume and persistent volume claim

+

Creating Kubernetes secrets for the domain and RCU

+
    +
  1. +

    Create a Kubernetes secret for the domain using the create-weblogic-credentials script in the same Kubernetes namespace as the domain:

    +
    $ cd $WORKDIR/kubernetes/create-weblogic-domain-credentials
    +$ ./create-weblogic-credentials.sh -u weblogic -p <pwd> -n <domain_namespace> -d <domain_uid> -s <kubernetes_domain_secret>
    +

    where:

    +

    -u weblogic is the WebLogic username

    +

    -p <pwd> is the password for the weblogic user

    +

    -n <domain_namespace> is the domain namespace

    +

    -d <domain_uid> is the domain UID to be created. The default is domain1 if not specified

    +

    -s <kubernetes_domain_secret> is the name you want to create for the secret for this namespace. The default is to use the domainUID if not specified

    +

    For example:

    +
    $ cd $WORKDIR/kubernetes/create-weblogic-domain-credentials
    +$ ./create-weblogic-credentials.sh -u weblogic -p <password> -n oamns -d accessdomain -s accessdomain-credentials
    +

    The output will look similar to the following:

    +
    secret/accessdomain-credentials created
    +secret/accessdomain-credentials labeled
    +The secret accessdomain-credentials has been successfully created in the oamns namespace.
    +
  2. +
  3. +

    Verify the secret is created using the following command:

    +
    $ kubectl get secret <kubernetes_domain_secret> -o yaml -n <domain_namespace>
    +

    For example:

    +
    $ kubectl get secret accessdomain-credentials -o yaml -n oamns
    +

    The output will look similar to the following:

    +
    apiVersion: v1
    +data:
    +  password: V2VsY29tZTE=
    +  username: d2VibG9naWM=
    +kind: Secret
    +metadata:
    +  creationTimestamp: "<DATE>"
    +  labels:
    +    weblogic.domainName: accessdomain
    +    weblogic.domainUID: accessdomain
    +  name: accessdomain-credentials
    +  namespace: oamns
    +  resourceVersion: "29428101"
    +  uid: 6dac0561-d157-4144-9ed7-c475a080eb3a
    +type: Opaque
    +
  4. +
  5. +

    Create a Kubernetes secret for RCU using the create-weblogic-credentials script in the same Kubernetes namespace as the domain:

    +
    $ cd $WORKDIR/kubernetes/create-rcu-credentials
    +$ ./create-rcu-credentials.sh -u <rcu_prefix> -p <rcu_schema_pwd> -a sys -q <sys_db_pwd> -d <domain_uid> -n <domain_namespace> -s <kubernetes_rcu_secret>
    +

    where:

    +

    -u <rcu_prefix> is the name of the RCU schema prefix created previously

    +

    -p <rcu_schema_pwd> is the password for the RCU schema prefix

    +

    -q <sys_db_pwd> is the sys database password

    +

    -d <domain_uid> is the domain_uid that you created earlier

    +

    -n <domain_namespace> is the domain namespace

    +

    -s <kubernetes_rcu_secret> is the name of the rcu secret to create

    +

    For example:

    +
    $ cd $WORKDIR/kubernetes/create-rcu-credentials
    +$ ./create-rcu-credentials.sh -u OAMK8S -p <password> -a sys -q <password> -d accessdomain -n oamns -s accessdomain-rcu-credentials
    +

    The output will look similar to the following:

    +
    secret/accessdomain-rcu-credentials created
    +secret/accessdomain-rcu-credentials labeled
    +The secret accessdomain-rcu-credentials has been successfully created in the oamns namespace.
    +
  6. +
  7. +

    Verify the secret is created using the following command:

    +
    $ kubectl get secret <kubernetes_rcu_secret> -o yaml -n <domain_namespace>
    +

    For example:

    +
    $ kubectl get secret accessdomain-rcu-credentials -o yaml -n oamns
    +

    The output will look similar to the following:

    +
    apiVersion: v1
    +data:
    +  password: T3JhY2xlXzEyMw==
    +  sys_password: T3JhY2xlXzEyMw==
    +  sys_username: c3lz
    +  username: T0FNSzhT
    +kind: Secret
    +metadata:
    +  creationTimestamp: "<DATE>"
    +  labels:
    +    weblogic.domainName: accessdomain
    +    weblogic.domainUID: accessdomain
    +  name: accessdomain-rcu-credentials
    +  namespace: oamns
    +  resourceVersion: "29428242"
    +  uid: 1b81b6e0-fd7d-40b8-a060-454c8d23f4dc
    +type: Opaque
    +
  8. +
+

Create a Kubernetes persistent volume and persistent volume claim

+

As referenced in Prerequisites the nodes in the Kubernetes cluster must have access to a persistent volume such as a Network File System (NFS) mount or a shared file system.

+

A persistent volume is the same as a disk mount but is inside a container. A Kubernetes persistent volume is an arbitrary name (determined in this case, by Oracle) that is mapped to a physical volume on a disk.

+

When a container is started, it needs to mount that volume. The physical volume should be on a shared disk accessible by all the Kubernetes worker nodes because it is not known on which worker node the container will be started. In the case of Identity and Access Management, the persistent volume does not get erased when a container stops. This enables persistent configurations.

+

The example below uses an NFS mounted volume (<persistent_volume>/accessdomainpv). Other volume types can also be used. See the official Kubernetes documentation for Volumes.

+

Note: The persistent volume directory needs to be accessible to both the master and worker node(s). In this example /scratch/shared/accessdomainpv is accessible from all nodes via NFS.

+

To create a Kubernetes persistent volume, perform the following steps:

+
    +
  1. +

    Make a backup copy of the create-pv-pvc-inputs.yaml file and create required directories:

    +
    $ cd $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc
    +$ cp create-pv-pvc-inputs.yaml create-pv-pvc-inputs.yaml.orig
    +$ mkdir output
    +$ mkdir -p <persistent_volume>/accessdomainpv
    +$ sudo chown -R 1000:0 <persistent_volume>/accessdomainpv
    +

    For example:

    +
    $ cd $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc
    +$ cp create-pv-pvc-inputs.yaml create-pv-pvc-inputs.yaml.orig
    +$ mkdir output
    +$ mkdir -p /scratch/shared/accessdomainpv
    +$ sudo chown -R 1000:0 /scratch/shared/accessdomainpv
    +
  2. +
  3. +

    On the master node run the following command to ensure it is possible to read and write to the persistent volume:

    +
    cd <persistent_volume>/accessdomainpv
    +touch filemaster.txt
    +ls filemaster.txt
    +

    For example:

    +
    cd /scratch/shared/accessdomainpv
    +touch filemaster.txt
    +ls filemaster.txt
    +

    On the first worker node run the following to ensure it is possible to read and write to the persistent volume:

    +
    cd /scratch/shared/accessdomainpv
    +ls filemaster.txt
    +touch fileworker1.txt
    +ls fileworker1.txt
    +

    Repeat the above for any other worker nodes e.g fileworker2.txt etc. Once proven that it’s possible to read and write from each node to the persistent volume, delete the files created.

    +
  4. +
  5. +

    Navigate to $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc:

    +
    $ cd $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc
    +

    and edit the create-pv-pvc-inputs.yaml file and update the following parameters to reflect your settings. Save the file when complete:

    +
    baseName: <domain>
    +domainUID: <domain_uid>
    +namespace: <domain_namespace>
    +weblogicDomainStorageType: NFS
    +weblogicDomainStorageNFSServer: <nfs_server>
    +weblogicDomainStoragePath: <physical_path_of_persistent_storage>
    +weblogicDomainStorageSize: 10Gi
    +

    For example:

    +
    	
    +# The base name of the pv and pvc
    +baseName: domain
    +
    +# Unique ID identifying a domain.
    +# If left empty, the generated pv can be shared by multiple domains
    +# This ID must not contain an underscope ("_"), and must be lowercase and unique across all domains in a Kubernetes cluster.
    +domainUID: accessdomain
    +	
    +# Name of the namespace for the persistent volume claim
    +namespace: oamns
    +...
    +# Persistent volume type for the persistent storage.
    +# The value must be 'HOST_PATH' or 'NFS'.
    +# If using 'NFS', weblogicDomainStorageNFSServer must be specified.
    +weblogicDomainStorageType: NFS
    +
    +# The server name or ip address of the NFS server to use for the persistent storage.
    +# The following line must be uncomment and customized if weblogicDomainStorateType is NFS:
    +weblogicDomainStorageNFSServer: mynfsserver
    +
    +# Physical path of the persistent storage.
    +# When weblogicDomainStorageType is set to HOST_PATH, this value should be set the to path to the
    +# domain storage on the Kubernetes host.
    +# When weblogicDomainStorageType is set to NFS, then weblogicDomainStorageNFSServer should be set
    +# to the IP address or name of the DNS server, and this value should be set to the exported path
    +# on that server.
    +# Note that the path where the domain is mounted in the WebLogic containers is not affected by this
    +# setting, that is determined when you create your domain.
    +# The following line must be uncomment and customized:
    +weblogicDomainStoragePath: /scratch/shared/accessdomainpv
    +   
    +# Reclaim policy of the persistent storage
    +# The valid values are: 'Retain', 'Delete', and 'Recycle'
    +weblogicDomainStorageReclaimPolicy: Retain
    +
    +# Total storage allocated to the persistent storage.
    +weblogicDomainStorageSize: 10Gi
    +
  6. +
  7. +

    Execute the create-pv-pvc.sh script to create the PV and PVC configuration files:

    +
    $ ./create-pv-pvc.sh -i create-pv-pvc-inputs.yaml -o output
    +

    The output will be similar to the following:

    +
    Input parameters being used
    +export version="create-weblogic-sample-domain-pv-pvc-inputs-v1"
    +export baseName="domain"
    +export domainUID="accessdomain"
    +export namespace="oamns"
    +export weblogicDomainStorageType="NFS"
    +export weblogicDomainStorageNFSServer="mynfsserver"
    +export weblogicDomainStoragePath="/scratch/shared/accessdomainpv"
    +export weblogicDomainStorageReclaimPolicy="Retain"
    +export weblogicDomainStorageSize="10Gi"
    +
    +Generating output/pv-pvcs/accessdomain-domain-pv.yaml
    +Generating output/pv-pvcs/accessdomain-domain-pvc.yaml
    +The following files were generated:
    +  output/pv-pvcs/accessdomain-domain-pv.yaml.yaml
    +  output/pv-pvcs/accessdomain-domain-pvc.yaml
    +
  8. +
  9. +

    Run the following to show the files are created:

    +
    $ ls output/pv-pvcs
    +accessdomain-domain-pv.yaml  accessdomain-domain-pvc.yaml  create-pv-pvc-inputs.yaml
    +
  10. +
  11. +

    Run the following kubectl command to create the PV and PVC in the domain namespace:

    +
    $ kubectl create -f output/pv-pvcs/accessdomain-domain-pv.yaml -n <domain_namespace>
    +$ kubectl create -f output/pv-pvcs/accessdomain-domain-pvc.yaml -n <domain_namespace>
    +

    For example:

    +
    $ kubectl create -f output/pv-pvcs/accessdomain-domain-pv.yaml -n oamns
    +$ kubectl create -f output/pv-pvcs/accessdomain-domain-pvc.yaml -n oamns
    +

    The output will look similar to the following:

    +
    persistentvolume/accessdomain-domain-pv created
    +persistentvolumeclaim/accessdomain-domain-pvc created
    +
  12. +
  13. +

    Run the following commands to verify the PV and PVC were created successfully:

    +
    $ kubectl describe pv <pv_name>
    +$ kubectl describe pvc <pvc_name> -n <domain_namespace>
    +

    For example:

    +
    $ kubectl describe pv accessdomain-domain-pv
    +$ kubectl describe pvc accessdomain-domain-pvc -n oamns
    +

    The output will look similar to the following:

    +
    $ kubectl describe pv accessdomain-domain-pv
    +   
    +Name:           accessdomain-domain-pv
    +Labels:         weblogic.domainUID=accessdomain
    +Annotations:    pv.kubernetes.io/bound-by-controller: yes
    +Finalizers:     [kubernetes.io/pv-protection]
    +StorageClass:   accessdomain-domain-storage-class
    +Status:         Bound
    +Claim:          oamns/accessdomain-domain-pvc
    +Reclaim Policy: Retain
    +Access Modes:   RWX
    +VolumeMode:     Filesystem
    +Capacity:       10Gi
    +Node Affinity:  <none>
    +Message:
    +Source:
    +    Type:      NFS (an NFS mount that lasts the lifetime of a pod)
    +    Server:    mynfsserver
    +    Path:      /scratch/shared/accessdomainpv
    +    ReadOnly:  false
    +Events: <none>
    +
    $ kubectl describe pvc accessdomain-domain-pvc -n oamns
    +   
    +Name:            accessdomain-domain-pvc
    +Namespace:       oamns
    +StorageClass:    accessdomain-domain-storage-class
    +Status:          Bound
    +Volume:          accessdomain-domain-pv
    +Labels:          weblogic.domainUID=accessdomain
    +Annotations:     pv.kubernetes.io/bind-completed: yes
    +                 pv.kubernetes.io/bound-by-controller: yes
    +Finalizers:     [kubernetes.io/pvc-protection]
    +Capacity:       10Gi
    +Access Modes:   RWX
    +VolumeMode:     Filesystem
    +Events:         <none>
    +Mounted By:     <none>
    +

    You are now ready to create the OAM domain as per Create OAM Domains.

    +
  14. +
+ + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oam/prepare-your-environment/index.xml b/docs/23.4.1/idm-products/oam/prepare-your-environment/index.xml new file mode 100644 index 000000000..fd98f19d2 --- /dev/null +++ b/docs/23.4.1/idm-products/oam/prepare-your-environment/index.xml @@ -0,0 +1,14 @@ + + + + Prepare your environment on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oam/prepare-your-environment/ + Recent content in Prepare your environment on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oam/prerequisites/index.html b/docs/23.4.1/idm-products/oam/prerequisites/index.html new file mode 100644 index 000000000..212465faf --- /dev/null +++ b/docs/23.4.1/idm-products/oam/prerequisites/index.html @@ -0,0 +1,3983 @@ + + + + + + + + + + + + Prerequisites :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + Prerequisites +

+ + + + + + + +

Introduction

+

This document provides information about the system requirements and limitations for deploying and running OAM domains with the WebLogic Kubernetes Operator 4.1.2.

+

System requirements for oam domains

+
    +
  • +

    A running Kubernetes cluster that meets the following requirements:

    +
      +
    • The Kubernetes cluster must have sufficient nodes and resources.
    • +
    • An installation of Helm is required on the Kubernetes cluster. Helm is used to create and deploy the necessary resources and run the WebLogic Kubernetes Operator in a Kubernetes cluster
    • +
    • A supported container engine must be installed and running on the Kubernetes cluster.
    • +
    • The Kubernetes cluster and container engine must meet the minimum version requirements outlined in document ID 2723908.1 on My Oracle Support.
    • +
    • You must have the cluster-admin role to install the WebLogic Kubernetes Operator.
    • +
    • The nodes in the Kubernetes cluster must have access to a persistent volume such as a Network File System (NFS) mount or a shared file system.
    • +
    • The system clocks on node of the Kubernetes cluster must be synchronized. Run the date command simultaneously on all the nodes in each cluster and then syncrhonize accordingly.
    • +
    +
  • +
  • +

    A running Oracle Database 12.2.0.1 or later. The database must be a supported version for OAM as outlined in Oracle Fusion Middleware 12c certifications. It must meet the requirements as outlined in About Database Requirements for an Oracle Fusion Middleware Installation and in RCU Requirements for Oracle Databases. It is recommended that the database initialization parameters are set as per Minimum Initialization Parameters.

    +
  • +
+

Note: This documentation does not tell you how to install a Kubernetes cluster, Helm, the container engine, or how to push container images to a container registry. +Please refer to your vendor specific documentation for this information. Also see Getting Started.

+

Limitations

+

Compared to running a WebLogic Server domain in Kubernetes using the operator, the following limitations currently exist for OAM domains:

+
    +
  • In this release, OAM domains are supported using the “domain on a persistent volume” model only, where the domain home is located in a persistent volume (PV).The “domain in image” model is not supported.
  • +
  • Only configured clusters are supported. Dynamic clusters are not supported for OAM domains. Note that you can still use all of the scaling features, but you need to define the maximum size of your cluster at domain creation time, using the parameter configuredManagedServerCount. For more details on this parameter, see Prepare the create domain script. It is recommended to pre-configure your cluster so it’s sized a little larger than the maximum size you plan to expand it to. You must rigorously test at this maximum size to make sure that your system can scale as expected.
  • +
  • The WebLogic Monitoring Exporter currently supports the WebLogic MBean trees only. Support for JRF MBeans has not been added yet.
  • +
  • We do not currently support running OAM in non-Linux containers.
  • +
+ + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oam/prerequisites/index.xml b/docs/23.4.1/idm-products/oam/prerequisites/index.xml new file mode 100644 index 000000000..daa751a03 --- /dev/null +++ b/docs/23.4.1/idm-products/oam/prerequisites/index.xml @@ -0,0 +1,14 @@ + + + + Prerequisites on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oam/prerequisites/ + Recent content in Prerequisites on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oam/release-notes/index.html b/docs/23.4.1/idm-products/oam/release-notes/index.html new file mode 100644 index 000000000..9db30c920 --- /dev/null +++ b/docs/23.4.1/idm-products/oam/release-notes/index.html @@ -0,0 +1,4264 @@ + + + + + + + + + + + + Release Notes :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + Release Notes +

+ + + + + + + +

Review the latest changes and known issues for Oracle Access Management on Kubernetes.

+

Recent changes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
DateVersionChange
October, 202323.4.1Supports Oracle Access Management 12.2.1.4 domain deployment using the October 2023 container image which contains the October Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program.
This release contains the following changes:
+ Support for WebLogic Kubernetes Operator 4.1.2.
+ Ability to set resource requests and limits for CPU and memory on a cluster resource. See, Set the OAM server memory parameters.
+ Support for the Kubernetes Horizontal Pod Autoscaler (HPA). See, Kubernetes Horizontal Pod Autoscaler.
+ The default domain now only starts one OAM Managed Server (oam_server1) and one Policy Managed Server (policy_mgr1).
If upgrading to October 23 (23.4.1) from October 22 (22.4.1) or later, you must upgrade the following in order:
1. WebLogic Kubernetes Operator to 4.1.2
2. Patch the OAM container image to October 23
If upgrading to October 23 (23.4.1) from a release prior to October 22 (22.4.1), you must upgrade the following in order:
1. WebLogic Kubernetes Operator to 4.1.2
2. Patch the OAM container image to October 23
3. Upgrade the Ingress
4. Upgrade Elasticsearch and Kibana
See Patch and Upgrade for these instructions.
July, 202323.3.1Supports Oracle Access Management 12.2.1.4 domain deployment using the July 2023 container image which contains the July Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program.
If upgrading to July 23 (23.3.1) from April 23 (23.2.1), upgrade as follows:
1. Patch the OAM container image to July 23
If upgrading to July 23 (23.3.1) from October 22 (22.4.1), or January 23 (23.1.1) release, you must upgrade the following in order:
1. WebLogic Kubernetes Operator to 4.0.4
2. Patch the OAM container image to July 23
If upgrading to July 23 (23.3.1) from a release prior to October 22 (22.4.1) release, you must upgrade the following in order:
1. WebLogic Kubernetes Operator to 4.0.4
2. Patch the OAM container image to July 23
3. Upgrade the Ingress
4. Upgrade Elasticsearch and Kibana
See Patch and Upgrade for these instructions.
April, 202323.2.1Supports Oracle Access Management 12.2.1.4 domain deployment using the April 2023 container image which contains the April Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program.
Support for WebLogic Kubernetes Operator 4.0.4.
Changes to stopping/starting pods due to domain and cluster configuration being separated and parameter changes (IF_NEEDED, NEVER to IfNeeded, Never).
If upgrading to April 23 (23.2.1) from October 22 (22.4.1) or later, you must upgrade in the following order:
1. WebLogic Kubernetes Operator to 4.0.4
2. Patch the OAM container image to April 23
If upgrading to April 23 (23.2.1) from a release prior to October 22 (22.4.1) release, you must upgrade the following in order:
1. WebLogic Kubernetes Operator to 4.0.4
2. Patch the OAM container image to April 23
3. Upgrade the Ingress
4. Upgrade Elasticsearch and Kibana See Patch and Upgrade for these instructions.
January, 202323.1.1Supports Oracle Access Management 12.2.1.4 domain deployment using the January 2023 container image which contains the January Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program.
If upgrading to January 23 (23.1.1) from October 22 (22.4.1) release, you only need to patch the OAM container image to January 23.
If upgrading to January 23 (23.1.1) from a release prior to October 22 (22.4.1) release, you must upgrade the following in order:
1. WebLogic Kubernetes Operator to 3.4.2
2. Patch the OAM container image to January 23
3. Upgrade the Ingress
4. Upgrade Elasticsearch and Kibana See Patch and Upgrade for these instructions.
October, 202222.4.1Supports Oracle Access Management 12.2.1.4 domain deployment using the October 2022 container image which contains the October Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program.
Support for WebLogic Kubernetes Operator 3.4.2.
Additional Ingress mappings added.
Changes to deployment of Logging and Visualization with Elasticsearch and Kibana.
OAM container images are now only available from container-registry.oracle.com and are no longer available from My Oracle Support.
If upgrading to October 22 (22.4.1) from a previous release, you must upgrade the following in order:
1. WebLogic Kubernetes Operator to 3.4.2
2. Patch the OAM container image to October 22
3. Upgrade the Ingress
4. Upgrade Elasticsearch and Kibana See Patch and Upgrade for these instructions.
July, 202222.3.1Supports Oracle Access Management 12.2.1.4 domain deployment using the July 2022 container image which contains the July Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program.
April, 202222.2.1Updated for CRI-O support.
November, 202121.4.2Supports Oracle Access Management domain deployment using WebLogic Kubernetes Operator 3.3.0. Voyager ingress removed as no longer supported.
October 202121.4.1A) References to supported Kubernetes, Helm and Docker versions removed and replaced with Support note reference. B) Namespace and domain names changed to be consistent with Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster. C) Additional post configuration tasks added. D) Upgrading a Kubernetes Cluster and Security Hardening removed as vendor specific.
November 202020.4.1Initial release of Oracle Access Management on Kubernetes.
+ + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oam/release-notes/index.xml b/docs/23.4.1/idm-products/oam/release-notes/index.xml new file mode 100644 index 000000000..a70b239f8 --- /dev/null +++ b/docs/23.4.1/idm-products/oam/release-notes/index.xml @@ -0,0 +1,14 @@ + + + + Release Notes on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oam/release-notes/ + Recent content in Release Notes on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oam/troubleshooting/index.html b/docs/23.4.1/idm-products/oam/troubleshooting/index.html new file mode 100644 index 000000000..a29b8f219 --- /dev/null +++ b/docs/23.4.1/idm-products/oam/troubleshooting/index.html @@ -0,0 +1,3979 @@ + + + + + + + + + + + + Troubleshooting :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + Troubleshooting +

+ + + + + + + +

Domain creation failure

+

If the OAM domain creation fails when running create-domain.sh, run the following to diagnose the issue:

+
    +
  1. +

    Run the following command to diagnose the create domain job:

    +
    $ kubectl logs <domain_job> -n <domain_namespace>
    +

    For example:

    +
    $ kubectl logs accessdomain-create-fmw-infra-sample-domain-job-c6vfb -n oamns
    +

    Also run:

    +
    $ kubectl describe pod <domain_job> -n <domain_namespace>
    +

    For example:

    +
    $ kubectl describe pod accessdomain-create-fmw-infra-sample-domain-job-c6vfb -n oamns
    +

    Using the output you should be able to diagnose the problem and resolve the issue.

    +

    Clean down the failed domain creation by following steps 1-3 in Delete the OAM domain home. Then follow RCU schema creation onwards to recreate the RCU schema, kubernetes secrets for domain and RCU, the persistent volume and the persistent volume claim. Then execute the OAM domain creation steps again.

    +
  2. +
  3. +

    If any of the above commands return the following error:

    +
    Failed to start container "create-fmw-infra-sample-domain-job": Error response from daemon: error while creating mount source path
    +'/scratch/shared/accessdomainpv ': mkdir /scratch/shared/accessdomainpv : permission denied
    +

    then there is a permissions error on the directory for the PV and PVC and the following should be checked:

    +

    a) The directory has 777 permissions: chmod -R 777 <persistent_volume>/accessdomainpv.

    +

    b) If it does have the permissions, check if an oracle user exists and the uid is 1000 and gid is 0.

    +

    Create the oracle user if it doesn’t exist and set the uid to 1000 and gid to 0.

    +

    c) Edit the $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc/create-pv-pvc-inputs.yaml and add a slash to the end of the directory for the weblogicDomainStoragePath parameter:

    +
    weblogicDomainStoragePath: /scratch/shared/accessdomainpv/
    +

    Clean down the failed domain creation by following steps 1-3 in Delete the OAM domain home. Then follow RCU schema creation onwards to recreate the RCU schema, kubernetes secrets for domain and RCU, the persistent volume and the persistent volume claim. Then execute the OAM domain creation steps again.

    +
  4. +
+ + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oam/troubleshooting/index.xml b/docs/23.4.1/idm-products/oam/troubleshooting/index.xml new file mode 100644 index 000000000..f1624e5e0 --- /dev/null +++ b/docs/23.4.1/idm-products/oam/troubleshooting/index.xml @@ -0,0 +1,14 @@ + + + + Troubleshooting on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oam/troubleshooting/ + Recent content in Troubleshooting on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oam/validate-domain-urls/index.html b/docs/23.4.1/idm-products/oam/validate-domain-urls/index.html new file mode 100644 index 000000000..78c6b5f02 --- /dev/null +++ b/docs/23.4.1/idm-products/oam/validate-domain-urls/index.html @@ -0,0 +1,3989 @@ + + + + + + + + + + + + Validate Domain URLs :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + Validate Domain URLs +

+ + + + + + + +

In this section you validate the OAM domain URLs are accessible via the NGINX ingress.

+

Make sure you know the master hostname and ingress port for NGINX before proceeding.

+

Validate the OAM domain urls via the Ingress

+

Launch a browser and access the following URL’s. Login with the weblogic username and password (weblogic/<password>).

+

Note: If using a load balancer for your ingress replace ${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT} with ${LOADBALANCER-HOSTNAME}:${LOADBALANCER-PORT}.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Console or PageURL
WebLogic Administration Consolehttps://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console
Oracle Enterprise Manager Consolehttps://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/em
Oracle Access Management Consolehttps://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/oamconsole
Oracle Access Management Consolehttps://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/access
Logout URLhttps://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/oam/server/logout
+

Note: WebLogic Administration Console and Oracle Enterprise Manager Console should only be used to monitor the servers in the OAM domain. To control the Administration Server and OAM Managed Servers (start/stop) you must use Kubernetes. See Domain Life Cycle for more information.

+

The browser will give certificate errors if you used a self signed certificate and have not imported it into the browsers Certificate Authority store. If this occurs you can proceed with the connection and ignore the errors.

+

After validating the URL’s proceed to Post Install Configuration.

+ + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oam/validate-domain-urls/index.xml b/docs/23.4.1/idm-products/oam/validate-domain-urls/index.xml new file mode 100644 index 000000000..c7929d8cb --- /dev/null +++ b/docs/23.4.1/idm-products/oam/validate-domain-urls/index.xml @@ -0,0 +1,14 @@ + + + + Validate Domain URLs on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oam/validate-domain-urls/ + Recent content in Validate Domain URLs on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oam/validate-sso-using-webgate/index.html b/docs/23.4.1/idm-products/oam/validate-sso-using-webgate/index.html new file mode 100644 index 000000000..feb68d1cd --- /dev/null +++ b/docs/23.4.1/idm-products/oam/validate-sso-using-webgate/index.html @@ -0,0 +1,4059 @@ + + + + + + + + + + + + Validate a Basic SSO Flow using WebGate Registration :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + Validate a Basic SSO Flow using WebGate Registration +

+ + + + + + + +

In this section you validate single-sign on works to the OAM Kubernetes cluster via Oracle WebGate. The instructions below assume you have a running Oracle HTTP Server (for example ohs_k8s) and Oracle WebGate installed on an independent server. The instructions also assume basic knowledge of how to register a WebGate agent.

+

Note: At present Oracle HTTP Server and Oracle WebGate are not supported on a Kubernetes cluster.

+

Update the OAM Hostname and Port for the Loadbalancer

+

If using an NGINX ingress with no load balancer, change {LOADBALANCER-HOSTNAME}:${LOADBALANCER-PORT} to {MASTERNODE-HOSTNAME}:${MASTERNODE-PORT} when referenced below.

+
    +
  1. +

    Launch a browser and access the OAM console (https://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-PORT}/oamconsole). Login with the weblogic username and password (weblogic/<password>)

    +
  2. +
  3. +

    Navigate to ConfigurationSettings ( View )Access Manager.

    +
  4. +
  5. +

    Under Load Balancing modify the OAM Server Host and OAM Server Port, to point to the Loadbalancer HTTP endpoint (e.g loadbalancer.example.com and <port> respectively). In the OAM Server Protocol drop down list select https.

    +
  6. +
  7. +

    Under WebGate Traffic Load Balancer modify the OAM Server Host and OAM Server Port, to point to the Loadbalancer HTTP endpoint (e.g loadbalancer.example.com and <port> repectively). In the OAM Server Protocol drop down list select https.

    +
  8. +
  9. +

    Click Apply.

    +
  10. +
+

Register a WebGate Agent

+

In all the examples below, change the directory path as appropriate for your installation.

+
    +
  1. +

    Run the following command on the server with Oracle HTTP Server and WebGate installed:

    +
    $ cd <OHS_ORACLE_HOME>/webgate/ohs/tools/deployWebGate
    +
    +$ ./deployWebGateInstance.sh -w <OHS_DOMAIN_HOME>/config/fmwconfig/components/OHS/ohs_k8s -oh <OHS_ORACLE_HOME> -ws ohs
    +

    The output will look similar to the following:

    +
    Copying files from WebGate Oracle Home to WebGate Instancedir
    +
  2. +
  3. +

    Run the following command to update the OHS configuration files appropriately:

    +
    $ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:<OHS_ORACLE_HOME>/lib
    +$ cd <OHS_ORACLE_HOME>/webgate/ohs/tools/setup/InstallTools/
    +$ ./EditHttpConf -w <OHS_DOMAIN_HOME>/config/fmwconfig/components/OHS/ohs_k8s -oh <OHS_ORACLE_HOME>
    +

    The output will look similar to the following:

    +
    The web server configuration file was successfully updated
    +<OHS_DOMAIN_HOME>/config/fmwconfig/components/OHS/ohs_k8s/httpd.conf has been backed up as <OHS_DOMAIN_HOME>/config/fmwconfig/components/OHS/ohs_k8s/httpd.conf.ORIG   
    +
  4. +
  5. +

    Launch a browser, and access the OAM console. Navigate to Application SecurityQuick Start WizardsSSO Agent Registration. Register the agent in the usual way, download the configuration zip file and copy to the OHS WebGate server, for example: <OHS_DOMAIN_HOME>/config/fmwconfig/components/OHS/ohs_k8/webgate/config. Extract the zip file.

    +
  6. +
  7. +

    Copy the Certificate Authority (CA) certificate (cacert.pem) for the load balancer/ingress certificate to the same directory e.g: <OHS_DOMAIN_HOME>/config/fmwconfig/components/OHS/ohs_k8/webgate/config.

    +

    If you used a self signed certificate for the ingress, instead copy the self signed certificate (e.g: /scratch/ssl/tls.crt) to the above directory. Rename the certificate to cacert.pem.

    +
  8. +
  9. +

    Restart Oracle HTTP Server.

    +
  10. +
  11. +

    Access the configured OHS e.g http://ohs.example.com:7778, and check you are redirected to the SSO login page. Login and make sure you are redirected successfully to the home page.

    +
  12. +
+

Changing WebGate agent to use OAP

+

Note: This section should only be followed if you need to change the OAM/WebGate Agent communication from HTTPS to OAP.

+

To change the WebGate agent to use OAP:

+
    +
  1. +

    In the OAM Console click Application Security and then Agents.

    +
  2. +
  3. +

    Search for the agent you want modify and select it.

    +
  4. +
  5. +

    In the User Defined Parameters change:

    +

    a) OAMServerCommunicationMode from HTTPS to OAP. For example OAMServerCommunicationMode=OAP

    +

    b) OAMRestEndPointHostName=<hostname> to the {$MASTERNODE-HOSTNAME}. For example OAMRestEndPointHostName=masternode.example.com

    +
  6. +
  7. +

    In the Server Lists section click Add to add a new server with the following values:

    +
      +
    • Access Server: Other
    • +
    • Host Name: <{$MASTERNODE-HOSTNAME}>
    • +
    • Host Port: <oamoap-service NodePort>
    • +
    +

    Note: To find the value for Host Port run the following:

    +
    $ kubectl describe svc accessdomain-oamoap-service -n oamns
    +

    The output will look similar to the following:

    +
    Name:                     accessdomain-oamoap-service
    +Namespace:                oamns
    +Labels:                   <none>
    +Annotations:              <none>
    +Selector:                 weblogic.clusterName=oam_cluster
    +Type:                     NodePort
    +IP Families:              <none>
    +IP:                       10.100.202.44
    +IPs:                      10.100.202.44
    +Port:                     <unset>  5575/TCP
    +TargetPort:               5575/TCP
    +NodePort:                 <unset>  30540/TCP
    +Endpoints:                10.244.5.21:5575,10.244.6.76:5575
    +Session Affinity:         None
    +External Traffic Policy:  Cluster
    +Events:                   <none>
    +

    In the example above the NodePort is 30540.

    +
  8. +
  9. +

    Delete all servers in Server Lists except for the one just created, and click Apply.

    +
  10. +
  11. +

    Click Download to download the webgate zip file. Copy the zip file to the desired WebGate.

    +
  12. +
  13. +

    Delete the cache from <OHS_DOMAIN_HOME>/servers/ohs1/cache and restart Oracle HTTP Server.

    +
  14. +
+ + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oam/validate-sso-using-webgate/index.xml b/docs/23.4.1/idm-products/oam/validate-sso-using-webgate/index.xml new file mode 100644 index 000000000..561fc9771 --- /dev/null +++ b/docs/23.4.1/idm-products/oam/validate-sso-using-webgate/index.xml @@ -0,0 +1,14 @@ + + + + Validate a Basic SSO Flow using WebGate Registration on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oam/validate-sso-using-webgate/ + Recent content in Validate a Basic SSO Flow using WebGate Registration on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oid/index.html b/docs/23.4.1/idm-products/oid/index.html new file mode 100644 index 000000000..4ec19b30b --- /dev/null +++ b/docs/23.4.1/idm-products/oid/index.html @@ -0,0 +1,3948 @@ + + + + + + + + + + + + Oracle Internet Directory :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+ +
+ + +
+
+ +
+ +
+ +
+ +

+ + Oracle Internet Directory +

+ + + + + + + +

As of July 2022, container support has been removed for Oracle Internet Directory. Refer to document ID 2723908.1 on My Oracle Support for more details.

+

Documentation for earlier releases

+

To view documentation for previous releases, see:

+ + + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oid/index.xml b/docs/23.4.1/idm-products/oid/index.xml new file mode 100644 index 000000000..0f1a42894 --- /dev/null +++ b/docs/23.4.1/idm-products/oid/index.xml @@ -0,0 +1,24 @@ + + + + Oracle Internet Directory on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oid/ + Recent content in Oracle Internet Directory on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + Release Notes + /fmw-kubernetes/23.4.1/idm-products/oid/release-notes/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oid/release-notes/ + Review the latest changes and known issues for Oracle Internet Directory on Kubernetes. +Recent changes Date Version Change July, 2022 22.3.1 As of July 2022, Container support has been removed for Oracle Internet Directory. Refer to document ID 2723908.1 on My Oracle Support for more details. April, 2022 22.2.1 Updated for CRI-O support. October, 2021 21.4.1 Initial release of Oracle Identity Directory on Kubernetes. + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oid/release-notes/index.html b/docs/23.4.1/idm-products/oid/release-notes/index.html new file mode 100644 index 000000000..4135e1b52 --- /dev/null +++ b/docs/23.4.1/idm-products/oid/release-notes/index.html @@ -0,0 +1,3974 @@ + + + + + + + + + + + + Release Notes :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + Release Notes +

+ + + + + + +

Review the latest changes and known issues for Oracle Internet Directory on Kubernetes.

+

Recent changes

+ + + + + + + + + + + + + + + + + + + + + + + + + +
DateVersionChange
July, 202222.3.1As of July 2022, Container support has been removed for Oracle Internet Directory. Refer to document ID 2723908.1 on My Oracle Support for more details.
April, 202222.2.1Updated for CRI-O support.
October, 202121.4.1Initial release of Oracle Identity Directory on Kubernetes.
+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oig/configure-design-console/index.html b/docs/23.4.1/idm-products/oig/configure-design-console/index.html new file mode 100644 index 000000000..f59b515e3 --- /dev/null +++ b/docs/23.4.1/idm-products/oig/configure-design-console/index.html @@ -0,0 +1,4012 @@ + + + + + + + + + + + + Configure Design Console :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + Configure Design Console +

+ + + + + + + +

Configure an Ingress to allow Design Console to connect to your Kubernetes cluster.

+ + + + + + + + + + + + + + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oig/configure-design-console/index.xml b/docs/23.4.1/idm-products/oig/configure-design-console/index.xml new file mode 100644 index 000000000..fbc303137 --- /dev/null +++ b/docs/23.4.1/idm-products/oig/configure-design-console/index.xml @@ -0,0 +1,50 @@ + + + + Configure Design Console on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oig/configure-design-console/ + Recent content in Configure Design Console on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + a. Using Design Console with NGINX(non-SSL) + /fmw-kubernetes/23.4.1/idm-products/oig/configure-design-console/using-the-design-console-with-nginx-non-ssl/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oig/configure-design-console/using-the-design-console-with-nginx-non-ssl/ + Configure an NGINX ingress (non-SSL) to allow Design Console to connect to your Kubernetes cluster. + Prerequisites + Setup routing rules for the Design Console ingress + Create the ingress + Update the T3 channel + Restart the OIG Managed Server + Design Console client +a. Using an on-premises installed Design Console +b. Using a container image for Design Console + Login to the Design Console + + + + b. Using Design Console with NGINX(SSL) + /fmw-kubernetes/23.4.1/idm-products/oig/configure-design-console/using-the-design-console-with-nginx-ssl/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oig/configure-design-console/using-the-design-console-with-nginx-ssl/ + Configure an NGINX ingress (SSL) to allow Design Console to connect to your Kubernetes cluster. + Prerequisites + Setup routing rules for the Design Console ingress + Create the ingress + Update the T3 channel + Restart the OIG Managed Server + Design Console client +a. Using an on-premises installed Design Console +b. Using a container image for Design Console + Login to the Design Console + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oig/configure-design-console/using-the-design-console-with-nginx-non-ssl/index.html b/docs/23.4.1/idm-products/oig/configure-design-console/using-the-design-console-with-nginx-non-ssl/index.html new file mode 100644 index 000000000..7910b4748 --- /dev/null +++ b/docs/23.4.1/idm-products/oig/configure-design-console/using-the-design-console-with-nginx-non-ssl/index.html @@ -0,0 +1,4237 @@ + + + + + + + + + + + + a. Using Design Console with NGINX(non-SSL) :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+ +
+ +
+ +
+ +

+ + a. Using Design Console with NGINX(non-SSL) +

+ + + + + + +

Configure an NGINX ingress (non-SSL) to allow Design Console to connect to your Kubernetes cluster.

+
    +
  1. +

    Prerequisites

    +
  2. +
  3. +

    Setup routing rules for the Design Console ingress

    +
  4. +
  5. +

    Create the ingress

    +
  6. +
  7. +

    Update the T3 channel

    +
  8. +
  9. +

    Restart the OIG Managed Server

    +
  10. +
  11. +

    Design Console client

    +

    a. Using an on-premises installed Design Console

    +

    b. Using a container image for Design Console

    +
  12. +
  13. +

    Login to the Design Console

    +
  14. +
+

Prerequisites

+

If you haven’t already configured an NGINX ingress controller (Non-SSL) for OIG, follow Using an Ingress with NGINX (non-SSL).

+

Make sure you know the master hostname and ingress port for NGINX before proceeding e.g http://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}.

+

Note: In all steps below if you are using a load balancer for your ingress instead of NodePort then replace ${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT} with `${LOADBALANCER-HOSTNAME}:${LOADBALANCER-PORT}.

+

Setup routing rules for the Design Console ingress

+
    +
  1. +

    Setup routing rules by running the following commands:

    +
    $ cd $WORKDIR/kubernetes/design-console-ingress
    +

    Edit values.yaml and ensure that tls: NONSSL and domainUID: governancedomain are set, for example:

    +
    # Load balancer type.  Supported values are: NGINX
    +type: NGINX
    +# Type of Configuration Supported Values are : NONSSL,SSL
    +# tls: NONSSL
    +tls: NONSSL
    +# TLS secret name if the mode is SSL
    +secretName: dc-tls-cert
    +
    +
    +# WLS domain as backend to the load balancer
    +wlsDomain:
    +  domainUID: governancedomain
    +  oimClusterName: oim_cluster
    +  oimServerT3Port: 14002
    +
  2. +
+

Create the ingress

+
    +
  1. +

    Run the following command to create the ingress:

    +
    $ cd $WORKDIR
    +$ helm install governancedomain-nginx-designconsole kubernetes/design-console-ingress  --namespace oigns  --values kubernetes/design-console-ingress/values.yaml
    +

    For example:

    +

    The output will look similar to the following:

    +
    NAME: governancedomain-nginx-designconsole
    +LAST DEPLOYED: <DATE>
    +NAMESPACE: oigns
    +STATUS: deployed
    +REVISION: 1
    +TEST SUITE: None
    +
  2. +
  3. +

    Run the following command to show the ingress is created successfully:

    +
    $ kubectl describe ing governancedomain-nginx-designconsole -n <domain_namespace>
    +

    For example:

    +
    $ kubectl describe ing governancedomain-nginx-designconsole -n oigns
    +

    The output will look similar to the following:

    +
    Name:             governancedomain-nginx-designconsole
    +Namespace:        oigns
    +Address:
    +Default backend:  default-http-backend:80 (<error: endpoints "default-http-backend" not found>)
    +Rules:
    +  Host        Path  Backends
    +  ----        ----  --------
    +  *
    +                 governancedomain-cluster-oim-cluster:14002 (10.244.1.25:14002)
    +Annotations:  kubernetes.io/ingress.class: nginx
    +              meta.helm.sh/release-name: governancedomain-nginx-designconsole
    +              meta.helm.sh/release-namespace: oigns
    +              nginx.ingress.kubernetes.io/affinity: cookie
    +              nginx.ingress.kubernetes.io/enable-access-log: false
    +Events:
    +  Type    Reason  Age   From                      Message
    +  ----    ------  ----  ----                      -------
    +  Normal  Sync    13s   nginx-ingress-controller  Scheduled for sync
    +
  4. +
+

Update the T3 channel

+
    +
  1. +

    Log in to the WebLogic Console using http://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console.

    +
  2. +
  3. +

    Navigate to Environment, click Servers, and then select oim_server1.

    +
  4. +
  5. +

    Click Protocols, and then Channels.

    +
  6. +
  7. +

    Click the default T3 channel called T3Channel.

    +
  8. +
  9. +

    Click Lock and Edit.

    +
  10. +
  11. +

    Set the External Listen Address to the ingress controller hostname ${MASTERNODE-HOSTNAME}.

    +
  12. +
  13. +

    Set the External Listen Port to the ingress controller port ${MASTERNODE-PORT}.

    +
  14. +
  15. +

    Click Save.

    +
  16. +
  17. +

    Click Activate Changes.

    +
  18. +
+

Restart the OIG Managed Server

+

Restart the OIG Managed Server for the above changes to take effect:

+
$ cd $WORKDIR/kubernetes/domain-lifecycle
+$ ./restartServer.sh -s oim_server1 -d <domain_uid> -n <domain_namespace>
+

For example:

+
$ cd $WORKDIR/kubernetes/domain-lifecycle
+./restartServer.sh -s oim_server1 -d governancedomain -n oigns
+

Make sure the <domain_uid>-oim-server1 has a READY status of 1/1 before continuing:

+
$ kubectl get pods -n oigns | grep oim-server1   
+

The output will look similar to the following:

+
governancedomain-oim-server1                                1/1     Running     0          8m
+

Design Console client

+

It is possible to use Design Console from an on-premises install, or from a container image.

+

Using an on-premises installed Design Console

+
    +
  1. +

    Install Design Console on an on-premises machine

    +
  2. +
  3. +

    Follow Login to the Design Console.

    +
  4. +
+

Using a container image for Design Console

+
Using Docker
+

The Design Console can be run from a container using X windows emulation.

+
    +
  1. +

    On the parent machine where the Design Console is to be displayed, run xhost +.

    +
  2. +
  3. +

    Find which worker node the <domain>-oim-server1 pod is running. For example:

    +
    $ kubectl get pods -n oigns -o wide | grep governancedomain-oim-server1 
    +

    The output will look similar to the following:

    +
    governancedomain-oim-server1                                1/1     Running     0          31m     10.244.2.98   worker-node2   <none>           <none>
    +
  4. +
  5. +

    On the worker node returned above e.g worker-node2, execute the following command to find the OIG container image name:

    +
    $ docker images
    +

    Then execute the following command to start a container to run Design Console:

    +
    $ docker run -u root --name oigdcbase -it <image> bash
    +

    For example:

    +
    $ docker run -u root -it --name oigdcbase container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-<January'23> bash
    +

    This will take you into a bash shell inside the container:

    +
    bash-4.2#
    +
  6. +
  7. +

    Inside the container set the proxy, for example:

    +
    bash-4.2# export https_proxy=http://proxy.example.com:80
    +
  8. +
  9. +

    Install the relevant X windows packages in the container:

    +
    bash-4.2# yum install libXext libXrender libXtst
    +
  10. +
  11. +

    Execute the following outside the container to create a new Design Console image from the container:

    +
    $ docker commit <container_name> <design_console_image_name>
    +

    For example:

    +
    $ docker commit oigdcbase oigdc
    +
  12. +
  13. +

    Exit the container bash session:

    +
    bash-4.2# exit
    +
  14. +
  15. +

    Start a new container using the Design Console image:

    +
    $ docker run --name oigdc -it oigdc /bin/bash
    +

    This will take you into a bash shell for the container:

    +
    bash-4.2#
    +
  16. +
  17. +

    In the container run the following to export the DISPLAY:

    +
    $ export DISPLAY=<parent_machine_hostname:1>
    +
  18. +
  19. +

    Start the Design Console from the container:

    +
    bash-4.2# cd idm/designconsole
    +bash-4.2# sh xlclient.sh
    +

    The Design Console login should be displayed. Now follow Login to the Design Console.

    +
  20. +
+
Using podman
+
    +
  1. +

    On the parent machine where the Design Console is to be displayed, run xhost +.

    +
  2. +
  3. +

    Find which worker node the <domain>-oim-server1 pod is running. For example:

    +
    $ kubectl get pods -n oigns -o wide | grep governancedomain-oim-server1 
    +

    The output will look similar to the following:

    +
    governancedomain-oim-server1                                1/1     Running     0          31m     10.244.2.98   worker-node2   <none>           <none>
    +
  4. +
  5. +

    On the worker node returned above e.g worker-node2, execute the following command to find the OIG container image name:

    +
    $ podman images
    +

    Then execute the following command to start a container to run Design Console:

    +
    $ podman run -u root --name oigdcbase -it <image> bash
    +

    For example:

    +
    $ podman run -u root -it --name oigdcbase container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-<January'23> bash
    +

    This will take you into a bash shell inside the container:

    +
    bash-4.2#
    +
  6. +
  7. +

    Inside the container set the proxy, for example:

    +
    bash-4.2# export https_proxy=http://proxy.example.com:80
    +
  8. +
  9. +

    Install the relevant X windows packages in the container:

    +
    bash-4.2# yum install libXext libXrender libXtst
    +
  10. +
  11. +

    Execute the following outside the container to create a new Design Console image from the container:

    +
    $ podman commit <container_name> <design_console_image_name>
    +

    For example:

    +
    $ podman commit oigdcbase oigdc
    +
  12. +
  13. +

    Exit the container bash session:

    +
    bash-4.2# exit
    +
  14. +
  15. +

    Start a new container using the Design Console image:

    +
    $ podman run --name oigdc -it oigdc /bin/bash
    +

    This will take you into a bash shell for the container:

    +
    bash-4.2#
    +
  16. +
  17. +

    In the container run the following to export the DISPLAY:

    +
    $ export DISPLAY=<parent_machine_hostname:1>
    +
  18. +
  19. +

    Start the Design Console from the container:

    +
    bash-4.2# cd idm/designconsole
    +bash-4.2# sh xlclient.sh
    +

    The Design Console login should be displayed. Now follow Login to the Design Console.

    +
  20. +
+

Login to the Design Console

+
    +
  1. +

    Launch the Design Console and in the Oracle Identity Manager Design Console login page enter the following details:

    +

    Enter the following details and click Login:

    +
      +
    • Server URL: <url>
    • +
    • User ID: xelsysadm
    • +
    • Password: <password>.
    • +
    +

    where <url> is http://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}

    +
  2. +
  3. +

    If successful the Design Console will be displayed.

    +
  4. +
+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oig/configure-design-console/using-the-design-console-with-nginx-ssl/index.html b/docs/23.4.1/idm-products/oig/configure-design-console/using-the-design-console-with-nginx-ssl/index.html new file mode 100644 index 000000000..f89ae8c89 --- /dev/null +++ b/docs/23.4.1/idm-products/oig/configure-design-console/using-the-design-console-with-nginx-ssl/index.html @@ -0,0 +1,4284 @@ + + + + + + + + + + + + b. Using Design Console with NGINX(SSL) :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + b. Using Design Console with NGINX(SSL) +

+ + + + + + +

Configure an NGINX ingress (SSL) to allow Design Console to connect to your Kubernetes cluster.

+
    +
  1. +

    Prerequisites

    +
  2. +
  3. +

    Setup routing rules for the Design Console ingress

    +
  4. +
  5. +

    Create the ingress

    +
  6. +
  7. +

    Update the T3 channel

    +
  8. +
  9. +

    Restart the OIG Managed Server

    +
  10. +
  11. +

    Design Console client

    +

    a. Using an on-premises installed Design Console

    +

    b. Using a container image for Design Console

    +
  12. +
  13. +

    Login to the Design Console

    +
  14. +
+

Prerequisites

+

If you haven’t already configured an NGINX ingress controller (SSL) for OIG, follow Using an Ingress with NGINX (SSL).

+

Make sure you know the master hostname and ingress port for NGINX before proceeding e.g https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}. Also make sure you know the Kubernetes secret for SSL that was generated e.g governancedomain-tls-cert.

+

Setup routing rules for the Design Console ingress

+
    +
  1. +

    Setup routing rules by running the following commands:

    +
    $ cd $WORKDIR/kubernetes/design-console-ingress
    +

    Edit values.yaml and ensure that tls: SSL is set. Change domainUID: and secretName: to match the values for your <domain_uid> and your SSL Kubernetes secret, for example:

    +
    # Load balancer type.  Supported values are: NGINX
    +type: NGINX
    +# Type of Configuration Supported Values are : NONSSL,SSL
    +# tls: NONSSL
    +tls: SSL
    +# TLS secret name if the mode is SSL
    +secretName: governancedomain-tls-cert
    +
    +
    +# WLS domain as backend to the load balancer
    +wlsDomain:
    +  domainUID: governancedomain
    +  oimClusterName: oim_cluster
    +  oimServerT3Port: 14002
    +
  2. +
+

Create the ingress

+
    +
  1. +

    Run the following command to create the ingress:

    +
    $ cd $WORKDIR
    +$ helm install governancedomain-nginx-designconsole kubernetes/design-console-ingress  --namespace oigns  --values kubernetes/design-console-ingress/values.yaml
    +

    The output will look similar to the following:

    +
    NAME: governancedomain-nginx-designconsole
    +<DATE>
    +NAMESPACE: oigns
    +STATUS: deployed
    +REVISION: 1
    +TEST SUITE: None
    +
  2. +
  3. +

    Run the following command to show the ingress is created successfully:

    +
    $ kubectl describe ing governancedomain-nginx-designconsole -n <domain_namespace>
    +

    For example:

    +
    $ kubectl describe ing governancedomain-nginx-designconsole -n oigns
    +

    The output will look similar to the following:

    +
    Name:             governancedomain-nginx-designconsole
    +Namespace:        oigns
    +Address:
    +Default backend:  default-http-backend:80 (<error: endpoints "default-http-backend" not found>)
    +Rules:
    +  Host        Path  Backends
    +  ----        ----  --------
    +  *
    +                 governancedomain-cluster-oim-cluster:14002 (10.244.2.103:14002)
    +Annotations:  kubernetes.io/ingress.class: nginx
    +              meta.helm.sh/release-name: governancedomain-nginx-designconsole
    +              meta.helm.sh/release-namespace: oigns
    +              nginx.ingress.kubernetes.io/affinity: cookie
    +              nginx.ingress.kubernetes.io/configuration-snippet:
    +                more_set_input_headers "X-Forwarded-Proto: https";
    +                more_set_input_headers "WL-Proxy-SSL: true";
    +              nginx.ingress.kubernetes.io/enable-access-log: false
    +              nginx.ingress.kubernetes.io/ingress.allow-http: false
    +              nginx.ingress.kubernetes.io/proxy-buffer-size: 2000k
    +Events:
    +  Type    Reason  Age   From                      Message
    +  ----    ------  ----  ----                      -------
    +  Normal  Sync    6s    nginx-ingress-controller  Scheduled for sync
    +
  4. +
+

Update the T3 channel

+
    +
  1. +

    Log in to the WebLogic Console using https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console.

    +
  2. +
  3. +

    Navigate to Environment, click Servers, and then select oim_server1.

    +
  4. +
  5. +

    Click Protocols, and then Channels.

    +
  6. +
  7. +

    Click the default T3 channel called T3Channel.

    +
  8. +
  9. +

    Click Lock and Edit.

    +
  10. +
  11. +

    Set the External Listen Address to the ingress controller hostname ${MASTERNODE-HOSTNAME}.

    +
  12. +
  13. +

    Set the External Listen Port to the ingress controller port ${MASTERNODE-PORT}.

    +
  14. +
  15. +

    Click Save.

    +
  16. +
  17. +

    Click Activate Changes.

    +
  18. +
+

Restart the OIG Managed Server

+

Restart the OIG Managed Server for the above changes to take effect:

+
$ cd $WORKDIR/kubernetes/domain-lifecycle
+$ ./restartServer.sh -s oim_server1 -d <domain_uid> -n <domain_namespace>
+

For example:

+
$ cd $WORKDIR/kubernetes/domain-lifecycle
+./restartServer.sh -s oim_server1 -d governancedomain -n oigns
+

Make sure the <domain_uid>-oim-server1 has a READY status of 1/1 before continuing:

+
$ kubectl get pods -n oigns | grep oim-server1   
+

The output will look similar to the following:

+
governancedomain-oim-server1                                1/1     Running     0          8m
+

Design Console Client

+

It is possible to use Design Console from an on-premises install, or from a container image.

+

Using an on-premises installed Design Console

+

The instructions below should be performed on the client where Design Console is installed.

+
    +
  1. +

    Import the CA certificate into the java keystore

    +

    If in Generate SSL Certificate you requested a certificate from a Certificate Authority (CA), then you must import the CA certificate (e.g cacert.crt) that signed your certificate, into the java truststore used by Design Console.

    +

    If in Generate SSL Certificate you generated a self-signed certicate (e.g tls.crt), you must import the self-signed certificate into the java truststore used by Design Console.

    +

    Import the certificate using the following command:

    +
    $ keytool -import -trustcacerts -alias dc -file <certificate> -keystore $JAVA_HOME/jre/lib/security/cacerts
    +

    where <certificate> is the CA certificate, or self-signed certicate.

    +
  2. +
  3. +

    Once complete follow Login to the Design Console.

    +
  4. +
+

Using a container image for Design Console

+
Using Docker
+

The Design Console can be run from a container using X windows emulation.

+
    +
  1. +

    On the parent machine where the Design Console is to be displayed, run xhost +.

    +
  2. +
  3. +

    Find which worker node the <domain>-oim-server1 pod is running. For example:

    +
    $ kubectl get pods -n oigns -o wide | grep governancedomain-oim-server1 
    +

    The output will look similar to the following:

    +
    governancedomain-oim-server1                                1/1     Running     0          31m     10.244.2.98   worker-node2   
    +
  4. +
  5. +

    On the worker node returned above e.g worker-node2, execute the following command to find the OIG container image name:

    +
    $ docker images
    +

    Then execute the following command to start a container to run Design Console:

    +
    $ docker run -u root --name oigdcbase -it <image> bash
    +

    For example:

    +
    $ docker run -u root -it --name oigdcbase container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-<January'23> bash
    +

    This will take you into a bash shell inside the container:

    +
    bash-4.2#
    +
  6. +
  7. +

    Inside the container set the proxy, for example:

    +
    bash-4.2# export https_proxy=http://proxy.example.com:80
    +
  8. +
  9. +

    Install the relevant X windows packages in the container:

    +
    bash-4.2# yum install libXext libXrender libXtst
    +
  10. +
  11. +

    Execute the following outside the container to create a new Design Console image from the container:

    +
    $ docker commit <container_name> <design_console_image_name>
    +

    For example:

    +
    $ docker commit oigdcbase oigdc
    +
  12. +
  13. +

    Exit the container bash session:

    +
    bash-4.2# exit
    +
  14. +
  15. +

    Start a new container using the Design Console image:

    +
    $ docker run --name oigdc -it oigdc /bin/bash
    +

    This will take you into a bash shell for the container:

    +
    bash-4.2#
    +
  16. +
  17. +

    Copy the Ingress CA certificate into the container

    +

    If in Generate SSL Certificate you requested a certificate from a Certificate Authority (CA), then you must copy the CA certificate (e.g cacert.crt) that signed your certificate, into the container

    +

    If in Generate SSL Certificate you generated a self-signed certicate (e.g tls.crt), you must copy the self-signed certificate into the container

    +

    Note: You will have to copy the certificate over to the worker node where the oigdc image is created before running the following.

    +

    Run the following command outside the container:

    +
    $ cd <workdir>/ssl
    +$ docker cp <certificate> <container_name>:/u01/jdk/jre/lib/security/<certificate>
    +

    For example:

    +
    $ cd /scratch/OIGK8S/ssl
    +$ docker cp tls.crt oigdc:/u01/jdk/jre/lib/security/tls.crt
    +
  18. +
  19. +

    Import the certificate using the following command:

    +
    bash-4.2# /u01/jdk/bin/keytool -import -trustcacerts -alias dc -file /u01/jdk/jre/lib/security/<certificate> -keystore /u01/jdk/jre/lib/security/cacerts
    +

    For example:

    +
    bash-4.2# /u01/jdk/bin/keytool -import -trustcacerts -alias dc -file /u01/jdk/jre/lib/security/tls.crt -keystore /u01/jdk/jre/lib/security/cacerts
    +
  20. +
  21. +

    In the container run the following to export the DISPLAY:

    +
    $ export DISPLAY=<parent_machine_hostname:1>
    +
  22. +
  23. +

    Start the Design Console from the container:

    +
    bash-4.2# cd idm/designconsole
    +bash-4.2# sh xlclient.sh
    +

    The Design Console login should be displayed. Now follow Login to the Design Console.

    +
  24. +
+
Using podman
+
    +
  1. +

    On the parent machine where the Design Console is to be displayed, run xhost +.

    +
  2. +
  3. +

    Find which worker node the <domain>-oim-server1 pod is running. For example:

    +
    $ kubectl get pods -n oigns -o wide | grep governancedomain-oim-server1 
    +

    The output will look similar to the following:

    +
    governancedomain-oim-server1                                1/1     Running     0          19h   10.244.2.55   worker-node2   <none> 
    +
  4. +
  5. +

    On the worker node returned above e.g worker-node2, execute the following command to find the OIG container image name:

    +
    $ podman images
    +

    Then execute the following command to start a container to run Design Console:

    +
    $ podman run -u root --name oigdcbase -it <image> bash
    +

    For example:

    +
    $ podman run -u root -it --name oigdcbase container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-<January'23> bash
    +

    This will take you into a bash shell inside the container:

    +
    bash-4.2#
    +
  6. +
  7. +

    Inside the container set the proxy, for example:

    +
    bash-4.2# export https_proxy=http://proxy.example.com:80
    +
  8. +
  9. +

    Install the relevant X windows packages in the container:

    +
    bash-4.2# yum install libXext libXrender libXtst
    +
  10. +
  11. +

    Execute the following outside the container to create a new Design Console image from the container:

    +
    $ podman commit <container_name> <design_console_image_name>
    +

    For example:

    +
    $ podman commit oigdcbase oigdc
    +
  12. +
  13. +

    Exit the container bash session:

    +
    bash-4.2# exit
    +
  14. +
  15. +

    Start a new container using the Design Console image:

    +
    $ podman run --name oigdc -it oigdc /bin/bash
    +

    This will take you into a bash shell for the container:

    +
    bash-4.2#
    +
  16. +
  17. +

    Copy the Ingress CA certificate into the container

    +

    If in Generate SSL Certificate you requested a certificate from a Certificate Authority (CA), then you must copy the CA certificate (e.g cacert.crt) that signed your certificate, into the container

    +

    If in Generate SSL Certificate you generated a self-signed certicate (e.g tls.crt), you must copy the self-signed certificate into the container

    +

    Note: You will have to copy the certificate over to the worker node where the oigdc image is created before running the following.

    +

    Run the following command outside the container:

    +
    $ cd <workdir>/ssl
    +$  podman cp <certificate> <container_name>:/u01/jdk/jre/lib/security/<certificate>
    +

    For example:

    +
    $ cd /scratch/OIGK8S/ssl
    +$ podman cp tls.crt oigdc:/u01/jdk/jre/lib/security/tls.crt
    +
  18. +
  19. +

    Inside the container, import the certificate using the following command:

    +
    bash-4.2# /u01/jdk/bin/keytool -import -trustcacerts -alias dc -file /u01/jdk/jre/lib/security/<certificate> -keystore /u01/jdk/jre/lib/security/cacerts
    +

    For example:

    +
    bash-4.2# /u01/jdk/bin/keytool -import -trustcacerts -alias dc -file /u01/jdk/jre/lib/security/tls.crt -keystore /u01/jdk/jre/lib/security/cacerts
    +
  20. +
  21. +

    In the container run the following to export the DISPLAY:

    +
    $ export DISPLAY=<parent_machine_hostname:1>
    +
  22. +
  23. +

    Start the Design Console from the container:

    +
    bash-4.2# cd idm/designconsole
    +bash-4.2# sh xlclient.sh
    +

    The Design Console login should be displayed. Now follow Login to the Design Console.

    +
  24. +
+

Login to the Design Console

+
    +
  1. +

    Launch the Design Console and in the Oracle Identity Manager Design Console login page enter the following details:

    +

    Enter the following details and click Login:

    +
      +
    • Server URL: <url>
    • +
    • User ID: xelsysadm
    • +
    • Password: <password>.
    • +
    +

    where <url> is where <url> is https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}.

    +
  2. +
  3. +

    If successful the Design Console will be displayed.

    +
  4. +
+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oig/configure-ingress/index.html b/docs/23.4.1/idm-products/oig/configure-ingress/index.html new file mode 100644 index 000000000..93bc7bc4a --- /dev/null +++ b/docs/23.4.1/idm-products/oig/configure-ingress/index.html @@ -0,0 +1,4012 @@ + + + + + + + + + + + + Configure an ingress for an OIG domain :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + Configure an ingress for an OIG domain +

+ + + + + + + +

Choose one of the following supported methods to configure an Ingress to direct traffic for your OIG domain.

+ + + + + + + + + + + + + + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oig/configure-ingress/index.xml b/docs/23.4.1/idm-products/oig/configure-ingress/index.xml new file mode 100644 index 000000000..ec115c3ab --- /dev/null +++ b/docs/23.4.1/idm-products/oig/configure-ingress/index.xml @@ -0,0 +1,46 @@ + + + + Configure an ingress for an OIG domain on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oig/configure-ingress/ + Recent content in Configure an ingress for an OIG domain on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + a. Using an Ingress with NGINX (non-SSL) + /fmw-kubernetes/23.4.1/idm-products/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-k8s/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-k8s/ + Setting up an ingress for NGINX for the OIG domain on Kubernetes (non-SSL) The instructions below explain how to set up NGINX as an ingress for the OIG domain with non-SSL termination. +Note: All the steps below should be performed on the master node. + Install NGINX +a. Configure the repository +b. Create a namespace +c. Install NGINX using helm +d. Setup routing rules for the domain + Create an ingress for the domain + + + + b. Using an Ingress with NGINX (SSL) + /fmw-kubernetes/23.4.1/idm-products/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-k8s-ssl/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-k8s-ssl/ + Setting up an ingress for NGINX for the OIG domain on Kubernetes The instructions below explain how to set up NGINX as an ingress for the OIG domain with SSL termination. +Note: All the steps below should be performed on the master node. + Create a SSL certificate +a. Generate SSL certificate +b. Create a Kubernetes secret for SSL + Install NGINX +a. Configure the repository +b. Create a namespace + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-k8s-ssl/index.html b/docs/23.4.1/idm-products/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-k8s-ssl/index.html new file mode 100644 index 000000000..ee4bd1655 --- /dev/null +++ b/docs/23.4.1/idm-products/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-k8s-ssl/index.html @@ -0,0 +1,4349 @@ + + + + + + + + + + + + b. Using an Ingress with NGINX (SSL) :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + b. Using an Ingress with NGINX (SSL) +

+ + + + + + +

Setting up an ingress for NGINX for the OIG domain on Kubernetes

+

The instructions below explain how to set up NGINX as an ingress for the OIG domain with SSL termination.

+

Note: All the steps below should be performed on the master node.

+
    +
  1. +

    Create a SSL certificate

    +

    a. Generate SSL certificate

    +

    b. Create a Kubernetes secret for SSL

    +
  2. +
  3. +

    Install NGINX

    +

    a. Configure the repository

    +

    b. Create a namespace

    +

    c. Install NGINX using helm

    +
  4. +
  5. +

    Create an ingress for the domain

    +
  6. +
  7. +

    Verify that you can access the domain URL

    +
  8. +
+

Create a SSL certificate

+

Generate SSL certificate

+
    +
  1. +

    Generate a private key and certificate signing request (CSR) using a tool of your choice. Send the CSR to your certificate authority (CA) to generate the certificate.

    +

    If you want to use a certificate for testing purposes you can generate a self signed certificate using openssl:

    +
    $ mkdir <workdir>/ssl
    +$ cd <workdir>/ssl
    +$ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=<nginx-hostname>"
    +

    For example:

    +
    $ mkdir /scratch/OIGK8S/ssl
    +$ cd /scratch/OIGK8S/ssl
    +$ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=masternode.example.com"
    +

    Note: The CN should match the host.domain of the master node in order to prevent hostname problems during certificate verification.

    +

    The output will look similar to the following:

    +
    Generating a 2048 bit RSA private key
    +..........................................+++
    +.......................................................................................................+++
    +writing new private key to 'tls.key'
    +-----
    +
  2. +
+

Create a Kubernetes secret for SSL

+
    +
  1. +

    Create a secret for SSL containing the SSL certificate by running the following command:

    +
    $ kubectl -n oigns create secret tls <domain_uid>-tls-cert --key <workdir>/tls.key --cert <workdir>/tls.crt
    +

    For example:

    +
    $ kubectl -n oigns create secret tls governancedomain-tls-cert --key /scratch/OIGK8S/ssl/tls.key --cert /scratch/OIGK8S/ssl/tls.crt
    +

    The output will look similar to the following:

    +
    secret/governancedomain-tls-cert created
    +
  2. +
  3. +

    Confirm that the secret is created by running the following command:

    +
    $ kubectl get secret <domain_uid>-tls-cert -o yaml -n oigns
    +

    For example:

    +
    $ kubectl get secret governancedomain-tls-cert -o yaml -n oigns
    +

    The output will look similar to the following:

    +
    apiVersion: v1
    +data:
    +  tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURGVENDQWYyZ0F3SUJBZ0lKQUl3ZjVRMWVxZnljTUEwR0NTcUdTSWIzRFFFQkN3VUFNQ0V4SHpBZEJnTlYKQkFNTUZtUmxiakF4WlhadkxuVnpMbTl5WVdOc1pTNWpiMjB3SGhjTk1qQXdPREV3TVRReE9UUXpXaGNOTWpFdwpPREV3TVRReE9UUXpXakFoTVI4d0hRWURWUVFEREJaa1pXNHdNV1YyYnk1MWN5NXZjbUZqYkdVdVkyOXRNSUlCCklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUEyY0lpVUhwcTRVZzBhaGR6aXkycHY2cHQKSVIza2s5REd2eVRNY0syaWZQQ2dtUU5CdHV6VXNFN0l4c294eldITmU5RFpXRXJTSjVON3Ym1lTzJkMVd2NQp1aFhzbkFTbnkwY1N9xVDNQSlpDVk1MK0llZVFKdnhaVjZaWWU4V2FFL1NQSGJzczRjYy9wcG1mc3pxCnErUi83cXEyMm9ueHNHaE9vQ1h1TlQvMFF2WXVzMnNucGtueWRKRHUxelhGbDREYkFIZGMvamNVK0NPWWROeS8KT3Iza2JIV0FaTkR4OWxaZUREOTRmNXZLcUF2V0FkSVJZa2UrSmpNTHg0VHo2ZlM0VXoxbzdBSTVuSApPQ1ZMblV5U0JkaGVuWTNGNEdFU0wwbnorVlhFWjRWVjRucWNjRmo5cnJ0Q29pT1BBNlgvNGdxMEZJbi9Qd0lECkFRQUJvMUF3VGpBZEJnTlZIUTRFRmdRVWw1VnVpVDBDT0xGTzcxMFBlcHRxSC9DRWZyY3dId1lEVlIwakJCZ3cKRm9BVWw1VnVpVDBDT0xGTzcxMFBlcHRxSC9DRWZyY3dEQVlEVlIwVEJBVXdBd0VCL3pBTkJna3Foa2lHOXcwQgpBUXNGQUFPQ0FRRUFXdEN4b2ZmNGgrWXZEcVVpTFFtUnpqQkVBMHJCOUMwL1FWOG9JQzJ3d1hzYi9KaVNuMHdOCjNMdHppejc0aStEbk1yQytoNFQ3enRaSkc3NVluSGRKcmxQajgzVWdDLzhYTlFCSUNDbTFUa3RlVU1jWG0reG4KTEZEMHpReFhpVzV0N1FHcWtvK2FjeTlhUnUvN3JRMXlNSE9HdVVkTTZETzErNXF4cTdFNXFMamhyNEdKejV5OAoraW8zK25UcUVKMHFQOVRocG96RXhBMW80OEY0ZHJybWdqd3ROUldEQVpBYmYyV1JNMXFKWXhxTTJqdU1FQWNsCnFMek1TdEZUQ2o1UGFTQ0NUV1VEK3ZlSWtsRWRpaFdpRm02dzk3Y1diZ0lGMlhlNGk4L2szMmF1N2xUTDEvd28KU3Q2dHpsa20yV25uUFlVMzBnRURnVTQ4OU02Z1dybklpZz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
    +  tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV1d0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktVd2dnU2hBZ0VBQW9JQkFRRFp3aUpRZW1yaFNEUnEKRjNPTExhbS9xbTBoSGVTVDBNYS9KTXh3cmFKODhLQ1pBMEcyN05Td1Rzakd5akhOWWMxNzBObFlTdEluazN1cApkdVo0N1ozVmEvbTZGZXljQktmTFJ4SW84NnIwSmhQYzhsa0pVd3Y0aDU1QW0vRmxYcGxoN3hab1Q5SThkdXl6Cmh4eittbVorek9xcjVIL3VxcmJhaWZHd2FFNmdKZTQxUC9SQzlpNnpheWVtU2ZKMGtPN1hOY1dYZ05zQWQxeisKTnhUNEk1aDAzTDg2dmVSc2RZQmswUEgyVmw0TVAzaC9tOHFWdW5mK1NvQzlZQjBoRmlSNzRtTXd2SGhQUHA5TApoVFBXanNBam1jYzRKVXVkVEpJRjJGNmRqY1hnWVJJdlNmUDVWY1JuaFZYaWVweHdXUDJ1dTBLaUk0OERwZi9pCkNyUVVpZjgvQWdNQkFBRUNnZjl6cnE2TUVueTFNYWFtdGM2c0laWU1QSDI5R2lSVVlwVXk5bG1sZ3BqUHh3V0sKUkRDay9Td0FmZG9yd1Q2ejNVRk1oYWJ4UU01a04vVjZFYkJlamQxT15bjdvWTVEQWJRRTR3RG9SZWlrVApONndWU0FrVC92Z1RXc1RqRlY1bXFKMCt6U2ppOWtySkZQNVNRN1F2cUswQ3BHRlNhVjY2dW8ycktiNmJWSkJYCkxPZmZPMytlS0tVazBaTnE1Q1NVQk9mbnFoNVFJSGdpaDNiMTRlNjB6bndrNWhaMHBHZE9BQm9aTkoKZ21lanUyTEdzVWxXTjBLOVdsUy9lcUllQzVzQm9jaWlocmxMVUpGWnpPRUV6LzErT2cyemhmT29yTE9rMTIrTgpjQnV0cTJWQ2I4ZFJDaFg1ZzJ0WnBrdzgzcXN5RSt3M09zYlQxa0VDZ1lFQTdxUnRLWGFONUx1SENvWlM1VWhNCm1WcnYxTEg0eGNhaDJIZnMksrMHJqQkJONGpkZkFDMmF3R3ZzU1EyR0lYRzVGYmYyK0pwL1kxbktKOEgKZU80MzNLWVgwTDE4NlNNLzFVay9HSEdTek1CWS9KdGR6WkRrbTA4UnBwaTl4bExTeDBWUWtFNVJVcnJJcTRJVwplZzBOM2RVTHZhTVl1UTBrR2dncUFETUNnWUVBNlpqWCtjU2VMZ1BVajJENWRpUGJ1TmVFd2RMeFNPZDFZMUFjCkUzQ01YTWozK2JxQ3BGUVIrTldYWWVuVmM1QiszajlSdHVnQ0YyTkNSdVdkZWowalBpL243UExIRHdCZVY0bVIKM3VQVHJmamRJbFovSFgzQ2NjVE94TmlaajU4VitFdkRHNHNHOGxtRTRieStYRExIYTJyMWxmUk9sUVRMSyswVgpyTU93eU1VQ2dZRUF1dm14WGM4NWxZRW9hU0tkU0cvQk9kMWlYSUtmc2VDZHRNT2M1elJ0UXRsSDQwS0RscE54CmxYcXBjbVc3MWpyYzk1RzVKNmE1ZG5xTE9OSFZoWW8wUEpmSXhPU052RXI2MTE5NjRBMm5sZXRHYlk0M0twUkEKaHBPRHlmdkZoSllmK29kaUJpZFUyL3ZBMCtUczNSUHJzRzBSOUVDOEZqVDNaZVhaNTF1R0xPa0NnWUFpTmU0NwplQjRxWXdrNFRsMTZmZG5xQWpaQkpLR05xY2c1V1R3alpMSkp6R3owdCtuMkl4SFd2WUZFSjdqSkNmcHFsaDlqCmlDcjJQZVV3K09QTlNUTG1JcUgydzc5L1pQQnNKWXVsZHZ4RFdGVWFlRXg1aHpkNDdmZlNRRjZNK0NHQmthYnIKVzdzU3R5V000ZFdITHpDaGZMS20yWGJBd0VqNUQrbkN1WTRrZVFLQmdFSkRHb0puM1NCRXcra2xXTE85N09aOApnc3lYQm9mUW1lRktIS2NHNzFZUFhJbTRlV1kyUi9KOCt5anc5b1FJQ3o5NlRidkdSZEN5QlJhbWhoTmFGUzVyCk9MZUc0ejVENE4zdThUc0dNem9QcU13KzBGSXJiQ3FzTnpGWTg3ekZweEdVaXZvRWZLNE82YkdERTZjNHFqNGEKNmlmK0RSRSt1TWRMWTQyYTA3ekoKLS0tLS1FTkQgUFJJVkFURSBLRVktLS0tLQo=
    +kind: Secret
    +metadata:
    +  creationTimestamp: "<DATE>"
    +  name: governancedomain-tls-cert
    +  namespace: oigns
    +  resourceVersion: "3319899"
    +  uid: 274cc960-281a-494c-a3e3-d93c3abd051f
    +type: kubernetes.io/tls
    +
    +
  4. +
+

Install NGINX

+

Use helm to install NGINX.

+

Configure the repository

+
    +
  1. +

    Add the Helm chart repository for installing NGINX using the following command:

    +
    $ helm repo add stable https://kubernetes.github.io/ingress-nginx
    +

    The output will look similar to the following:

    +
    "stable" has been added to your repositories
    +
  2. +
  3. +

    Update the repository using the following command:

    +
    $ helm repo update
    +

    The output will look similar to the following:

    +
    Hang tight while we grab the latest from your chart repositories...
    +...Successfully got an update from the "stable" chart repository
    +Update Complete. Happy Helming!
    +
  4. +
+

Create a namespace

+
    +
  1. +

    Create a Kubernetes namespace for NGINX:

    +
    $ kubectl create namespace nginxssl
    +

    The output will look similar to the following:

    +
    namespace/nginxssl created
    +
  2. +
+

Install NGINX using helm

+

If you can connect directly to the master node IP address from a browser, then install NGINX with the --set controller.service.type=NodePort parameter.

+

If you are using a Managed Service for your Kubernetes cluster, for example Oracle Kubernetes Engine (OKE) on Oracle Cloud Infrastructure (OCI), and connect from a browser to the Load Balancer IP address, then use the --set controller.service.type=LoadBalancer parameter. This instructs the Managed Service to setup a Load Balancer to direct traffic to the NGINX ingress.

+
    +
  1. +

    To install NGINX use the following helm command depending on if you are using NodePort or LoadBalancer:

    +

    a) Using NodePort

    +
    $ helm install nginx-ingress -n nginxssl --set controller.extraArgs.default-ssl-certificate=oigns/governancedomain-tls-cert  --set controller.service.type=NodePort --set controller.admissionWebhooks.enabled=false stable/ingress-nginx
    +

    The output will look similar to the following:

    +
    $ helm install nginx-ingress -n nginxssl --set controller.extraArgs.default-ssl-certificate=oigns/governancedomain-tls-cert  --set controller.service.type=NodePort --set controller.admissionWebhooks.enabled=false stable/ingress-nginx
    +NAME: nginx-ingress
    +LAST DEPLOYED: <DATE>
    +NAMESPACE: nginxssl
    +STATUS: deployed
    +REVISION: 1
    +TEST SUITE: None
    +NOTES:
    +The nginx-ingress controller has been installed.
    +Get the application URL by running these commands:
    +  export HTTP_NODE_PORT=$(kubectl --namespace nginxssl get services -o jsonpath="{.spec.ports[0].nodePort}" nginx-ingress-controller)
    +  export HTTPS_NODE_PORT=$(kubectl --namespace nginxssl get services -o jsonpath="{.spec.ports[1].nodePort}" nginx-ingress-controller)
    +  export NODE_IP=$(kubectl --namespace nginxssl get nodes -o jsonpath="{.items[0].status.addresses[1].address}")
    +
    +  echo "Visit http://$NODE_IP:$HTTP_NODE_PORT to access your application via HTTP."
    +  echo "Visit https://$NODE_IP:$HTTPS_NODE_PORT to access your application via HTTPS."
    +
    +An example Ingress that makes use of the controller:
    +
    +  apiVersion: networking.k8s.io/v1
    +  kind: Ingress
    +  metadata:
    +    annotations:
    +      kubernetes.io/ingress.class: nginx
    +    name: example
    +    namespace: foo
    +  spec:
    +    ingressClassName: example-class
    +    rules:
    +      - host: www.example.com
    +        http:
    +          paths:
    +            - path: /
    +              pathType: Prefix
    +              backend:
    +                serviceName: exampleService
    +                servicePort: 80
    +              path: /
    +    # This section is only required if TLS is to be enabled for the Ingress
    +    tls:
    +        - hosts:
    +            - www.example.com
    +          secretName: example-tls
    +
    +If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided:
    +
    +  apiVersion: v1
    +  kind: Secret
    +  metadata:
    +    name: example-tls
    +    namespace: foo
    +  data:
    +    tls.crt: <base64 encoded cert>
    +    tls.key: <base64 encoded key>
    +  type: kubernetes.io/tls
    +

    b) Using LoadBalancer

    +
    $ helm install nginx-ingress -n nginxssl --set controller.extraArgs.default-ssl-certificate=oigns/governancedomain-tls-cert  --set controller.service.type=LoadBalancer --set controller.admissionWebhooks.enabled=false stable/ingress-nginx
    +

    The output will look similar to the following:

    +
    NAME: nginx-ingress
    +LAST DEPLOYED: <DATE>
    +NAMESPACE: nginxssl
    +STATUS: deployed
    +REVISION: 1
    +TEST SUITE: None
    +NOTES:
    +The ingress-nginx controller has been installed.
    +It may take a few minutes for the LoadBalancer IP to be available.
    +You can watch the status by running 'kubectl --namespace nginxssl get services -o wide -w nginx-ingress-ingress-nginx-controller'
    +
    +An example Ingress that makes use of the controller:
    +
    +  apiVersion: networking.k8s.io/v1
    +  kind: Ingress
    +  metadata:
    +    annotations:
    +      kubernetes.io/ingress.class: nginx
    +    name: example
    +    namespace: foo
    +  spec:
    +    rules:
    +      - host: www.example.com
    +        http:
    +          paths:
    +            - path: /
    +              pathType: Prefix
    +              backend:
    +                service:
    +                name: exampleService
    +                port: 80
    +
    +    # This section is only required if TLS is to be enabled for the Ingress
    +    tls:
    +        - hosts:
    +            - www.example.com
    +          secretName: example-tls
    +
    +If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided:
    +
    +  apiVersion: v1
    +  kind: Secret
    +  metadata:
    +    name: example-tls
    +    namespace: foo
    +  data:
    +    tls.crt: <base64 encoded cert>
    +    tls.key: <base64 encoded key>
    +  type: kubernetes.io/tls
    +
  2. +
+

Setup routing rules for the domain

+
    +
  1. +

    Setup routing rules by running the following commands:

    +
    $ cd $WORKDIR/kubernetes/charts/ingress-per-domain
    +

    Edit values.yaml and change the domainUID parameter to match your domainUID, for example domainUID: governancedomain. Change sslType to SSL. The file should look as follows:

    +
    # Load balancer type. Supported values are: NGINX
    +type: NGINX
    +
    +# SSL configuration Type. Supported Values are : NONSSL,SSL
    +sslType: SSL
    +
    +# domainType. Supported values are: oim
    +domainType: oim
    +
    +#WLS domain as backend to the load balancer
    +wlsDomain:
    +  domainUID: governancedomain
    +  adminServerName: AdminServer
    +  adminServerPort: 7001
    +  adminServerSSLPort:
    +  soaClusterName: soa_cluster
    +  soaManagedServerPort: 8001
    +  soaManagedServerSSLPort:
    +  oimClusterName: oim_cluster
    +  oimManagedServerPort: 14000
    +  oimManagedServerSSLPort:
    +
    +# Host  specific values
    +hostName:
    +  enabled: false
    +  admin:
    +  runtime:
    +  internal:
    +
    +# Ngnix specific values
    +nginx:
    +  nginxTimeOut: 180
    +
  2. +
+

Create an ingress for the domain

+
    +
  1. +

    Create an Ingress for the domain (governancedomain-nginx), in the domain namespace by using the sample Helm chart:

    +
    $ cd $WORKDIR
    +$ helm install governancedomain-nginx kubernetes/charts/ingress-per-domain --namespace oigns --values kubernetes/charts/ingress-per-domain/values.yaml
    +

    Note: The $WORKDIR/kubernetes/charts/ingress-per-domain/templates/nginx-ingress-ssl.yaml has nginx.ingress.kubernetes.io/enable-access-log set to false. If you want to enable access logs then set this value to true before executing the command. Enabling access-logs can cause issues with disk space if not regularly maintained.

    +

    For example:

    +
    $ cd $WORKDIR
    +$ helm install governancedomain-nginx kubernetes/charts/ingress-per-domain --namespace oigns --values kubernetes/charts/ingress-per-domain/values.yaml
    +

    The output will look similar to the following:

    +
    NAME: governancedomain-nginx
    +LAST DEPLOYED:  <DATE>
    +NAMESPACE: oigns
    +STATUS: deployed
    +REVISION: 1
    +TEST SUITE: None
    +
  2. +
  3. +

    Run the following command to show the ingress is created successfully:

    +
    $ kubectl get ing -n <namespace>
    +

    For example:

    +
    $ kubectl get ing -n oigns
    +

    The output will look similar to the following:

    +
    NAME                     CLASS    HOSTS   ADDRESS   PORTS   AGE
    +governancedomain-nginx   <none>   *       x.x.x.x   80      49s
    +
  4. +
  5. +

    Find the node port of NGINX using the following command:

    +
    $ kubectl get services -n nginxssl -o jsonpath="{.spec.ports[1].nodePort}" nginx-ingress-ingress-nginx-controller
    +

    The output will look similar to the following:

    +
    32033
    +
  6. +
  7. +

    Run the following command to check the ingress:

    +
    $ kubectl describe ing governancedomain-nginx -n <namespace>
    +

    For example:

    +
    $ kubectl describe ing governancedomain-nginx -n oigns
    +

    The output will look similar to the following:

    +
    Name:             governancedomain-nginx
    +Namespace:        oigns
    +Address:          10.111.175.104
    +Default backend:  default-http-backend:80 (<error: endpoints "default-http-backend" not found>)
    +Rules:
    +  Host        Path  Backends
    +  ----        ----  --------
    +  *
    +              /console                        governancedomain-adminserver:7001 (10.244.2.50:7001)
    +              /consolehelp                    governancedomain-adminserver:7001 (10.244.2.50:7001)
    +              /em                             governancedomain-adminserver:7001 (10.244.2.50:7001)
    +              /ws_utc                         governancedomain-cluster-soa-cluster:8001 (10.244.2.51:8001)
    +              /soa                            governancedomain-cluster-soa-cluster:8001 (10.244.2.51:8001)
    +              /integration                    governancedomain-cluster-soa-cluster:8001 (10.244.2.51:8001)
    +              /soa-infra                      governancedomain-cluster-soa-cluster:8001 (10.244.2.51:8001)
    +              /identity                       governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
    +              /admin                          governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
    +              /oim                            governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
    +              /sysadmin                       governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
    +              /workflowservice                governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
    +              /callbackResponseService        governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
    +              /spml-xsd                       governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
    +              /HTTPClnt                       governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
    +              /reqsvc                         governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
    +              /iam                            governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
    +              /provisioning-callback          governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
    +              /CertificationCallbackService   governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
    +              /ucs                            governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
    +              /FacadeWebApp                   governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
    +              /OIGUI                          governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
    +              /weblogic                       governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
    + Annotations:  kubernetes.io/ingress.class: nginx
    +              meta.helm.sh/release-name: governancedomain-nginx
    +              meta.helm.sh/release-namespace: oigns
    +              nginx.ingress.kubernetes.io/affinity: cookie
    +              nginx.ingress.kubernetes.io/affinity-mode: persistent
    +              nginx.ingress.kubernetes.io/configuration-snippet:
    +                more_clear_input_headers "WL-Proxy-Client-IP" "WL-Proxy-SSL";
    +                more_set_input_headers "X-Forwarded-Proto: https";
    +                more_set_input_headers "WL-Proxy-SSL: true";
    +              nginx.ingress.kubernetes.io/enable-access-log: false
    +              nginx.ingress.kubernetes.io/ingress.allow-http: false
    +              nginx.ingress.kubernetes.io/proxy-buffer-size: 2000k
    +              nginx.ingress.kubernetes.io/proxy-read-timeout: 180
    +              nginx.ingress.kubernetes.io/proxy-send-timeout: 180
    +              nginx.ingress.kubernetes.io/session-cookie-name: sticky
    +Events:
    +  Type    Reason  Age                From                      Message
    +  ----    ------  ----               ----                      -------
    +  Normal  Sync    18s (x2 over 38s)  nginx-ingress-controller  Scheduled for sync
    +
  8. +
  9. +

    To confirm that the new Ingress is successfully routing to the domain’s server pods, run the following command to send a request to the URL for the WebLogic ReadyApp framework:

    +

    Note: If using a load balancer for your ingress replace ${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT} with ${LOADBALANCER-HOSTNAME}:${LOADBALANCER-PORT}.

    +
    $ curl -v -k https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/weblogic/ready
    +

    For example:

    +
    $ curl -v -k  https://masternode.example.com:32033/weblogic/ready
    +

    The output will look similar to the following:

    +
    $ curl -v -k https://masternode.example.com:32033/weblogic/ready
    +* About to connect() to X.X.X.X port 32033 (#0)
    +*   Trying X.X.X.X...
    +* Connected to masternode.example.com (X.X.X.X) port 32033 (#0)
    +* Initializing NSS with certpath: sql:/etc/pki/nssdb
    +* skipping SSL peer certificate verification
    +* SSL connection using TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
    +* Server certificate:
    +*       subject: CN=masternode.example.com
    +*       start date: <DATE>
    +*       expire date: <DATE>
    +*       common name: masternode.example.com
    +*       issuer: CN=masternode.example.com
    +> GET /weblogic/ready HTTP/1.1
    +> User-Agent: curl/7.29.0
    +> Host: X.X.X.X:32033
    +> Accept: */*
    +>
    +< HTTP/1.1 200 OK
    +< Server: nginx/1.19.1
    +< Date: <DATE>
    +< Content-Length: 0
    +< Connection: keep-alive
    +< Strict-Transport-Security: max-age=15724800; includeSubDomains
    +<
    +* Connection #0 to host X.X.X.X left intact
    +
  10. +
+

Verify that you can access the domain URL

+

After setting up the NGINX ingress, verify that the domain applications are accessible through the NGINX ingress port (for example 32033) as per Validate Domain URLs

+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-k8s/index.html b/docs/23.4.1/idm-products/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-k8s/index.html new file mode 100644 index 000000000..f95300117 --- /dev/null +++ b/docs/23.4.1/idm-products/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-k8s/index.html @@ -0,0 +1,4277 @@ + + + + + + + + + + + + a. Using an Ingress with NGINX (non-SSL) :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+ +
+ +
+ +
+ +

+ + a. Using an Ingress with NGINX (non-SSL) +

+ + + + + + +

Setting up an ingress for NGINX for the OIG domain on Kubernetes (non-SSL)

+

The instructions below explain how to set up NGINX as an ingress for the OIG domain with non-SSL termination.

+

Note: All the steps below should be performed on the master node.

+
    +
  1. +

    Install NGINX

    +

    a. Configure the repository

    +

    b. Create a namespace

    +

    c. Install NGINX using helm

    +

    d. Setup routing rules for the domain

    +
  2. +
  3. +

    Create an ingress for the domain

    +
  4. +
  5. +

    Verify that you can access the domain URL

    +
  6. +
+

Install NGINX

+

Use helm to install NGINX.

+

Configure the repository

+
    +
  1. +

    Add the Helm chart repository for NGINX using the following command:

    +
    $ helm repo add stable https://kubernetes.github.io/ingress-nginx
    +

    The output will look similar to the following:

    +
    "stable" has been added to your repositories
    +
  2. +
  3. +

    Update the repository using the following command:

    +
    $ helm repo update
    +

    The output will look similar to the following:

    +
    Hang tight while we grab the latest from your chart repositories...
    +...Successfully got an update from the "stable" chart repository
    +Update Complete. Happy Helming!
    +
  4. +
+

Create a namespace

+
    +
  1. +

    Create a Kubernetes namespace for NGINX by running the following command:

    +
    $ kubectl create namespace nginx
    +

    The output will look similar to the following:

    +
    namespace/nginx created
    +
  2. +
+

Install NGINX using helm

+

If you can connect directly to the master node IP address from a browser, then install NGINX with the --set controller.service.type=NodePort parameter.

+

If you are using a Managed Service for your Kubernetes cluster,for example Oracle Kubernetes Engine (OKE) on Oracle Cloud Infrastructure (OCI), and connect from a browser to the Load Balancer IP address, then use the --set controller.service.type=LoadBalancer parameter. This instructs the Managed Service to setup a Load Balancer to direct traffic to the NGINX ingress.

+
    +
  1. +

    To install NGINX use the following helm command depending on if you are using NodePort or LoadBalancer:

    +

    a) Using NodePort

    +
    $ helm install nginx-ingress -n nginx --set controller.service.type=NodePort --set controller.admissionWebhooks.enabled=false stable/ingress-nginx
    +

    The output will look similar to the following:

    +
    NAME: nginx-ingress
    +LAST DEPLOYED: <DATE>
    +NAMESPACE: nginx
    +STATUS: deployed
    +REVISION: 1
    +TEST SUITE: None
    +NOTES:
    +The ingress-nginx controller has been installed.
    +Get the application URL by running these commands:
    +  export HTTP_NODE_PORT=$(kubectl --namespace nginx get services -o jsonpath="{.spec.ports[0].nodePort}" nginx-ingress-ingress-nginx-controller)
    +  export HTTPS_NODE_PORT=$(kubectl --namespace nginx get services -o jsonpath="{.spec.ports[1].nodePort}" nginx-ingress-ingress-nginx-controller)
    +  export NODE_IP=$(kubectl --namespace nginx get nodes -o jsonpath="{.items[0].status.addresses[1].address}")
    +
    +  echo "Visit http://$NODE_IP:$HTTP_NODE_PORT to access your application via HTTP."
    +  echo "Visit https://$NODE_IP:$HTTPS_NODE_PORT to access your application via HTTPS."
    +
    +An example Ingress that makes use of the controller:
    +
    +  apiVersion: networking.k8s.io/v1
    +  kind: Ingress
    +  metadata:
    +    annotations:
    +      kubernetes.io/ingress.class: nginx
    +    name: example
    +    namespace: foo
    +  spec:
    +    ingressClassName: example-class
    +    rules:
    +      - host: www.example.com
    +        http:
    +          paths:
    +            - path: /
    +              pathType: Prefix
    +              backend:
    +                service:
    +                  name: exampleService
    +                  port: 80
    +    # This section is only required if TLS is to be enabled for the Ingress
    +    tls:
    +      - hosts:
    +        - www.example.com
    +        secretName: example-tls
    +
    +If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided:
    +
    +  apiVersion: v1
    +  kind: Secret
    +  metadata:
    +    name: example-tls
    +    namespace: foo
    +  data:
    +    tls.crt: <base64 encoded cert>
    +    tls.key: <base64 encoded key>
    +  type: kubernetes.io/tls
    +

    b) Using LoadBalancer

    +
    $ helm install nginx-ingress -n nginx --set controller.service.type=LoadBalancer --set controller.admissionWebhooks.enabled=false stable/ingress-nginx
    +

    The output will look similar to the following:

    +
    NAME: nginx-ingress
    +LAST DEPLOYED: <DATE>
    +NAMESPACE: nginx
    +STATUS: deployed
    +REVISION: 1
    +TEST SUITE: None
    +NOTES:
    +The nginx-ingress controller has been installed.
    +It may take a few minutes for the LoadBalancer IP to be available.
    +You can watch the status by running 'kubectl --namespace nginx get services -o wide -w nginx-ingress-controller'
    +
    +An example Ingress that makes use of the controller:
    +
    +  apiVersion: networking.k8s.io/v1
    +  kind: Ingress
    +  metadata:
    +    annotations:
    +      kubernetes.io/ingress.class: nginx
    +    name: example
    +    namespace: foo
    +  spec:
    +    ingressClassName: example-class
    +    rules:
    +      - host: www.example.com
    +        http:
    +          paths:
    +            - path: /
    +              pathType: Prefix
    +              backend:
    +                service:
    +                  name: exampleService
    +                  port: 80
    +    # This section is only required if TLS is to be enabled for the Ingress
    +    tls:
    +      - hosts:
    +        - www.example.com
    +        secretName: example-tls
    +
    +If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided:
    +
    +  apiVersion: v1
    +  kind: Secret
    +  metadata:
    +    name: example-tls
    +    namespace: foo
    +  data:
    +    tls.crt: <base64 encoded cert>
    +    tls.key: <base64 encoded key>
    +  type: kubernetes.io/tls
    +
  2. +
+

Setup routing rules for the domain

+
    +
  1. +

    Setup routing rules by running the following commands:

    +
    $ cd $WORKDIR/kubernetes/charts/ingress-per-domain
    +

    Edit values.yaml and change the domainUID parameter to match your domainUID, for example domainUID: governancedomain. Also change sslType to NONSSL. The file should look as follows:

    +
    # Load balancer type. Supported values are: NGINX
    +type: NGINX
    +
    +# SSL configuration Type. Supported Values are : NONSSL,SSL
    +sslType: NONSSL
    +
    +# domainType. Supported values are: oim
    +domainType: oim
    +
    +#WLS domain as backend to the load balancer
    +wlsDomain:
    +  domainUID: governancedomain
    +  adminServerName: AdminServer
    +  adminServerPort: 7001
    +  adminServerSSLPort:
    +  soaClusterName: soa_cluster
    +  soaManagedServerPort: 8001
    +  soaManagedServerSSLPort:
    +  oimClusterName: oim_cluster
    +  oimManagedServerPort: 14000
    +  oimManagedServerSSLPort:
    +
    +# Host  specific values
    +hostName:
    +  enabled: false
    +  admin:
    +  runtime:
    +  internal:
    +
    +# Ngnix specific values
    +nginx:
    +  nginxTimeOut: 180
    +
  2. +
+

Create an ingress for the domain

+
    +
  1. +

    Create an Ingress for the domain (governancedomain-nginx), in the domain namespace by using the sample Helm chart:

    +
    $ cd $WORKDIR
    +$ helm install governancedomain-nginx kubernetes/charts/ingress-per-domain --namespace <namespace> --values kubernetes/charts/ingress-per-domain/values.yaml
    +

    Note: The <workdir>/kubernetes/charts/ingress-per-domain/templates/nginx-ingress-nonssl.yaml has nginx.ingress.kubernetes.io/enable-access-log set to false. If you want to enable access logs then set this value to true before executing the command. Enabling access-logs can cause issues with disk space if not regularly maintained.

    +

    For example:

    +
    $ cd $WORKDIR
    +$ helm install governancedomain-nginx kubernetes/charts/ingress-per-domain --namespace oigns --values kubernetes/charts/ingress-per-domain/values.yaml
    +

    The output will look similar to the following:

    +
    $ helm install governancedomain-nginx kubernetes/charts/ingress-per-domain --namespace oigns --values kubernetes/charts/ingress-per-domain/values.yaml
    +NAME: governancedomain-nginx
    +LAST DEPLOYED:  <DATE>
    +NAMESPACE: oigns
    +STATUS: deployed
    +REVISION: 1
    +TEST SUITE: None
    +
  2. +
  3. +

    Run the following command to show the ingress is created successfully:

    +
    $ kubectl get ing -n <domain_namespace>
    +

    For example:

    +
    $ kubectl get ing -n oigns
    +

    The output will look similar to the following:

    +
    NAME                     CLASS    HOSTS   ADDRESS   PORTS   AGE
    +governancedomain-nginx   <none>   *       x.x.x.x   80      47s
    +
  4. +
  5. +

    Find the NodePort of NGINX using the following command (only if you installed NGINX using NodePort):

    +
    $ kubectl get services -n nginx -o jsonpath={.spec.ports[0].nodePort}” nginx-ingress-ingress-nginx-controller
    +

    The output will look similar to the following:

    +
    31530
    +
  6. +
  7. +

    Run the following command to check the ingress:

    +
    $ kubectl describe ing governancedomain-ingress -n <namespace>
    +

    For example:

    +
    $ kubectl describe ing governancedomain-nginx -n oigns
    +

    The output will look similar to the following:

    +
    Name:             governancedomain-nginx
    +Namespace:        oigns
    +Address:
    +Default backend:  default-http-backend:80 (<error: endpoints "default-http-backend" not found>)
    +Rules:
    +  Host        Path  Backends
    +  ----        ----  --------
    +  *
    +              /console                        governancedomain-adminserver:7001 (10.244.2.50:7001)
    +              /consolehelp                    governancedomain-adminserver:7001 (10.244.2.50:7001)
    +              /em                             governancedomain-adminserver:7001 (10.244.2.50:7001)
    +              /ws_utc                         governancedomain-cluster-soa-cluster:8001 (10.244.2.51:8001)
    +              /soa                            governancedomain-cluster-soa-cluster:8001 (10.244.2.51:8001)
    +              /integration                    governancedomain-cluster-soa-cluster:8001 (10.244.2.51:8001)
    +              /soa-infra                      governancedomain-cluster-soa-cluster:8001 (10.244.2.51:8001)
    +              /identity                       governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
    +              /admin                          governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
    +              /oim                            governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
    +              /sysadmin                       governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
    +              /workflowservice                governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
    +              /callbackResponseService        governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
    +              /spml-xsd                       governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
    +              /HTTPClnt                       governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
    +              /reqsvc                         governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
    +              /iam                            governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
    +              /provisioning-callback          governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
    +              /CertificationCallbackService   governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
    +              /ucs                            governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
    +              /FacadeWebApp                   governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
    +              /OIGUI                          governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
    +              /weblogic                       governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
    +Annotations:  kubernetes.io/ingress.class: nginx
    +              meta.helm.sh/release-name: governancedomain-nginx
    +              meta.helm.sh/release-namespace: oigns
    +              nginx.ingress.kubernetes.io/affinity: cookie
    +              nginx.ingress.kubernetes.io/affinity-mode: persistent
    +              nginx.ingress.kubernetes.io/enable-access-log: false
    +              nginx.ingress.kubernetes.io/proxy-read-timeout: 180
    +              nginx.ingress.kubernetes.io/proxy-send-timeout: 180
    +              nginx.ingress.kubernetes.io/session-cookie-name: sticky
    +Events:
    +  Type    Reason  Age   From                      Message
    +  ----    ------  ----  ----                      -------
    +  Normal  Sync    27s   nginx-ingress-controller  Scheduled for sync
    +
  8. +
  9. +

    To confirm that the new ingress is successfully routing to the domain’s server pods, run the following command to send a request to the URL for the WebLogic ReadyApp framework:

    +

    Note: If using a load balancer for your ingress replace ${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT} with ${LOADBALANCER-HOSTNAME}:${LOADBALANCER-PORT}.

    +
    $ curl -v http://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/weblogic/ready
    +

    For example:

    +

    a) For NodePort

    +
    $ curl -v http://masternode.example.com:31530/weblogic/ready
    +

    b) For LoadBalancer

    +
    $ curl -v http://masternode.example.com:80/weblogic/ready
    +

    The output will look similar to the following:

    +
    $ curl -v http://masternode.example.com:31530/weblogic/ready
    +* About to connect() to masternode.example.com port 31530 (#0)
    +*   Trying X.X.X.X...
    +* Connected to masternode.example.com (X.X.X.X) port 31530 (#0)
    +> GET /weblogic/ready HTTP/1.1
    +> User-Agent: curl/7.29.0
    +> Host: masternode.example.com:31530
    +> Accept: */*
    +>
    +< HTTP/1.1 200 OK
    +< Server: nginx/1.19.2
    +< Date: <DATE>
    +< Content-Length: 0
    +< Connection: keep-alive
    +<
    +* Connection #0 to host masternode.example.com left intact
    +
  10. +
+

Verify that you can access the domain URL

+

After setting up the NGINX ingress, verify that the domain applications are accessible through the NGINX ingress port (for example 31530) as per Validate Domain URLs

+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oig/create-oig-domains/index.html b/docs/23.4.1/idm-products/oig/create-oig-domains/index.html new file mode 100644 index 000000000..c648ebc70 --- /dev/null +++ b/docs/23.4.1/idm-products/oig/create-oig-domains/index.html @@ -0,0 +1,4771 @@ + + + + + + + + + + + + Create OIG domains :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + Create OIG domains +

+ + + + + + + +
    +
  1. +

    Introduction

    +
  2. +
  3. +

    Prerequisites

    +
  4. +
  5. +

    Prepare the create domain script

    +
  6. +
  7. +

    Run the create domain script

    +

    a. Generate the create domain script

    +

    b. Setting the OIM server memory parameters

    +

    c. Run the create domain scripts

    +
  8. +
  9. +

    Verify the results

    +

    a. Verify the domain, pods and services

    +

    b. Verify the domain

    +

    c. Verify the pods

    +
  10. +
+

Introduction

+

The OIG deployment scripts demonstrate the creation of an OIG domain home on an existing Kubernetes persistent volume (PV) and persistent volume claim (PVC). The scripts also generate the domain YAML file, which can then be used to start the Kubernetes artifacts of the corresponding domain.

+

Prerequisites

+

Before you begin, perform the following steps:

+
    +
  1. Review the Domain resource documentation.
  2. +
  3. Ensure that you have executed all the preliminary steps documented in Prepare your environment.
  4. +
  5. Ensure that the database is up and running.
  6. +
+

Prepare the create domain script

+

The sample scripts for Oracle Identity Governance domain deployment are available at $WORKDIR/kubernetes/create-oim-domain.

+
    +
  1. +

    Make a copy of the create-domain-inputs.yaml file:

    +
    $ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv
    +$ cp create-domain-inputs.yaml create-domain-inputs.yaml.orig   
    +
  2. +
  3. +

    Edit the create-domain-inputs.yaml and modify the following parameters. Save the file when complete:

    +

    Note: Do not edit any other parameters other than ones mentioned below.

    +
    domainUID: <domain_uid>
    +domainHome: /u01/oracle/user_projects/domains/<domain_uid>
    +image: <image_name>
    +imagePullSecretName: <container_registry_secret>
    +weblogicCredentialsSecretName: <kubernetes_domain_secret>
    +logHome: /u01/oracle/user_projects/domains/logs/<domain_id>
    +namespace: <domain_namespace>
    +persistentVolumeClaimName: <pvc_name>
    +rcuSchemaPrefix: <rcu_prefix>
    +rcuDatabaseURL: <rcu_db_host>:<rcu_db_port>/<rcu_db_service_name>
    +rcuCredentialsSecret: <kubernetes_rcu_secret>
    +frontEndHost: <front_end_hostname>
    +frontEndPort: <front_end_port>
    +

    For example:

    +
    domainUID: governancedomain
    +domainHome: /u01/oracle/user_projects/domains/governancedomain
    +image: container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-<October'23>
    +imagePullSecretName: orclcred
    +weblogicCredentialsSecretName: oig-domain-credentials
    +logHome: /u01/oracle/user_projects/domains/logs/governancedomain
    +namespace: oigns
    +persistentVolumeClaimName: governancedomain-domain-pvc
    +rcuSchemaPrefix: OIGK8S
    +rcuDatabaseURL: mydatabasehost.example.com:1521/orcl.example.com
    +rcuCredentialsSecret: oig-rcu-credentials
    +frontEndHost: example.com
    +frontEndPort: 14100
    +

    Note: For now frontEndHost and front_end_port should be set to example.com and 14100 respectively. These values will be changed to the correct values in post installation tasks in Set OIMFrontendURL using MBeans.

    +
  4. +
+

A full list of parameters in the create-domain-inputs.yaml file are shown below:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterDefinitionDefault
adminPortPort number for the Administration Server inside the Kubernetes cluster.7001
adminNodePortPort number of the Administration Server outside the Kubernetes cluster.30701
adminServerNameName of the Administration Server.AdminServer
clusterNameName of the WebLogic cluster instance to generate for the domain. By default the cluster name is oimcluster for the OIG domain.oimcluster
configuredManagedServerCountNumber of Managed Server instances to generate for the domain.5
createDomainFilesDirDirectory on the host machine to locate all the files to create a WebLogic domain, including the script that is specified in the createDomainScriptName property. By default, this directory is set to the relative path wlst, and the create script will use the built-in WLST offline scripts in the wlst directory to create the WebLogic domain. It can also be set to the relative path wdt, and then the built-in WDT scripts will be used instead. An absolute path is also supported to point to an arbitrary directory in the file system. The built-in scripts can be replaced by the user-provided scripts or model files as long as those files are in the specified directory. Files in this directory are put into a Kubernetes config map, which in turn is mounted to the createDomainScriptsMountPath, so that the Kubernetes pod can use the scripts and supporting files to create a domain home.wlst
createDomainScriptsMountPathMount path where the create domain scripts are located inside a pod. The create-domain.sh script creates a Kubernetes job to run the script (specified in the createDomainScriptName property) in a Kubernetes pod to create a domain home. Files in the createDomainFilesDir directory are mounted to this location in the pod, so that the Kubernetes pod can use the scripts and supporting files to create a domain home./u01/weblogic
createDomainScriptNameScript that the create domain script uses to create a WebLogic domain. The create-domain.sh script creates a Kubernetes job to run this script to create a domain home. The script is located in the in-pod directory that is specified in the createDomainScriptsMountPath property. If you need to provide your own scripts to create the domain home, instead of using the built-it scripts, you must use this property to set the name of the script that you want the create domain job to run.create-domain-job.sh
domainHomeHome directory of the OIG domain. If not specified, the value is derived from the domainUID as /shared/domains/<domainUID>./u01/oracle/user_projects/domains/oimcluster
domainPVMountPathMount path of the domain persistent volume./u01/oracle/user_projects/domains
domainUIDUnique ID that will be used to identify this particular domain. Used as the name of the generated WebLogic domain as well as the name of the Kubernetes domain resource. This ID must be unique across all domains in a Kubernetes cluster. This ID cannot contain any character that is not valid in a Kubernetes service name.oimcluster
exposeAdminNodePortBoolean indicating if the Administration Server is exposed outside of the Kubernetes cluster.false
exposeAdminT3ChannelBoolean indicating if the T3 administrative channel is exposed outside the Kubernetes cluster.true
imageOIG container image. The operator requires OIG 12.2.1.4. Refer to OIG domains for details on how to obtain or create the image.oracle/oig:12.2.1.4.0
imagePullPolicyWebLogic container image pull policy. Legal values are IfNotPresent, Always, or NeverIfNotPresent
imagePullSecretNameName of the Kubernetes secret to access the container registry to pull the OIG container image. The presence of the secret will be validated when this parameter is specified.
includeServerOutInPodLogBoolean indicating whether to include the server .out to the pod’s stdout.true
initialManagedServerReplicasNumber of Managed Servers to initially start for the domain.2
javaOptionsJava options for starting the Administration Server and Managed Servers. A Java option can have references to one or more of the following pre-defined variables to obtain WebLogic domain information: $(DOMAIN_NAME), $(DOMAIN_HOME), $(ADMIN_NAME), $(ADMIN_PORT), and $(SERVER_NAME).-Dweblogic.StdoutDebugEnabled=false
logHomeThe in-pod location for the domain log, server logs, server out, and Node Manager log files. If not specified, the value is derived from the domainUID as /shared/logs/<domainUID>./u01/oracle/user_projects/domains/logs/oimcluster
managedServerNameBaseBase string used to generate Managed Server names.oim_server
managedServerPortPort number for each Managed Server.8001
namespaceKubernetes namespace in which to create the domain.oimcluster
persistentVolumeClaimNameName of the persistent volume claim created to host the domain home. If not specified, the value is derived from the domainUID as <domainUID>-weblogic-sample-pvc.oimcluster-domain-pvc
productionModeEnabledBoolean indicating if production mode is enabled for the domain.true
serverStartPolicyDetermines which WebLogic Server instances will be started. Legal values are Never, IfNeeded, AdminOnly.IfNeeded
t3ChannelPortPort for the T3 channel of the NetworkAccessPoint.30012
t3PublicAddressPublic address for the T3 channel. This should be set to the public address of the Kubernetes cluster. This would typically be a load balancer address. For development environments only: In a single server (all-in-one) Kubernetes deployment, this may be set to the address of the master, or at the very least, it must be set to the address of one of the worker nodes.If not provided, the script will attempt to set it to the IP address of the Kubernetes cluster
weblogicCredentialsSecretNameName of the Kubernetes secret for the Administration Server’s user name and password. If not specified, then the value is derived from the domainUID as <domainUID>-weblogic-credentials.oimcluster-domain-credentials
weblogicImagePullSecretNameName of the Kubernetes secret for the container registry, used to pull the WebLogic Server image.
serverPodCpuRequest, serverPodMemoryRequest, serverPodCpuCLimit, serverPodMemoryLimitThe maximum amount of compute resources allowed, and minimum amount of compute resources required, for each server pod. Please refer to the Kubernetes documentation on Managing Compute Resources for Containers for details.Resource requests and resource limits are not specified.
rcuSchemaPrefixThe schema prefix to use in the database, for example OIGK8S. You may wish to make this the same as the domainUID in order to simplify matching domains to their RCU schemas.OIGK8S
rcuDatabaseURLThe database URL.oracle-db.default.svc.cluster.local:1521/devpdb.k8s
rcuCredentialsSecretThe Kubernetes secret containing the database credentials.oimcluster-rcu-credentials
frontEndHostThe entry point URL for the OIM.Not set
frontEndPortThe entry point port for the OIM.Not set
datasourceTypeType of JDBC datasource applicable for the OIG domain. Legal values are agl and generic. Choose agl for Active GridLink datasource and generic for Generic datasource. For enterprise deployments, Oracle recommends that you use GridLink data sources to connect to Oracle RAC databases. See the Enterprise Deployment Guide for further details.generic
+

Note that the names of the Kubernetes resources in the generated YAML files may be formed with the +value of some of the properties specified in the create-inputs.yaml file. Those properties include +the adminServerName, clusterName and managedServerNameBase. If those values contain any +characters that are invalid in a Kubernetes service name, those characters are converted to +valid values in the generated YAML files. For example, an uppercase letter is converted to a +lowercase letter and an underscore ("_") is converted to a hyphen ("-").

+

The sample demonstrates how to create an OIG domain home and associated Kubernetes resources for a domain +that has one cluster only. In addition, the sample provides the capability for users to supply their own scripts +to create the domain home for other use cases. The generated domain YAML file could also be modified to cover more use cases.

+

Run the create domain script

+

Generate the create domain script

+
    +
  1. +

    Run the create domain script, specifying your inputs file and an output directory to store the +generated artifacts:

    +
    $ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv
    +$ mkdir output
    +$ ./create-domain.sh -i create-domain-inputs.yaml -o /<path to output-directory>
    +

    For example:

    +
    $ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv
    +$ mkdir output
    +$ ./create-domain.sh -i create-domain-inputs.yaml -o output
    +

    The output will look similar to the following:

    +
    Input parameters being used
    +export version="create-weblogic-sample-domain-inputs-v1"
    +export adminPort="7001"
    +export adminServerName="AdminServer"
    +export domainUID="governancedomain"
    +export domainHome="/u01/oracle/user_projects/domains/governancedomain"
    +export serverStartPolicy="IfNeeded"
    +export clusterName="oim_cluster"
    +export configuredManagedServerCount="5"
    +export initialManagedServerReplicas="1"
    +export managedServerNameBase="oim_server"
    +export managedServerPort="14000"
    +export image="container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-<October'23>"
    +export imagePullPolicy="IfNotPresent"
    +export imagePullSecretName="orclcred"
    +export productionModeEnabled="true"
    +export weblogicCredentialsSecretName="oig-domain-credentials"
    +export includeServerOutInPodLog="true"
    +export logHome="/u01/oracle/user_projects/domains/logs/governancedomain"
    +export t3ChannelPort="30012"
    +export exposeAdminT3Channel="false"
    +export adminNodePort="30701"
    +export exposeAdminNodePort="false"
    +export namespace="oigns"
    +javaOptions=-Dweblogic.StdoutDebugEnabled=false
    +export persistentVolumeClaimName="governancedomain-domain-pvc"
    +export domainPVMountPath="/u01/oracle/user_projects/domains"
    +export createDomainScriptsMountPath="/u01/weblogic"
    +export createDomainScriptName="create-domain-job.sh"
    +export createDomainFilesDir="wlst"
    +export rcuSchemaPrefix="OIGK8S"
    +export rcuDatabaseURL="mydatabasehost.example.com:1521/orcl.example.com"
    +export rcuCredentialsSecret="oig-rcu-credentials"
    +export frontEndHost="example.com"
    +export frontEndPort="14100"
    +export datasourceType="generic"
    +
    +validateWlsDomainName called with governancedomain
    +createFiles - valuesInputFile is create-domain-inputs.yaml
    +createDomainScriptName is create-domain-job.sh
    +Generating output/weblogic-domains/governancedomain/create-domain-job.yaml
    +Generating output/weblogic-domains/governancedomain/delete-domain-job.yaml
    +Generating output/weblogic-domains/governancedomain/domain.yaml
    +Checking to see if the secret governancedomain-domain-credentials exists in namespace oigns
    +configmap/governancedomain-create-fmw-infra-sample-domain-job-cm created
    +Checking the configmap governancedomain-create-fmw-infra-sample-domain-job-cm was created
    +configmap/governancedomain-create-fmw-infra-sample-domain-job-cm labeled
    +Checking if object type job with name governancedomain-create-fmw-infra-sample-domain-job exists
    +No resources found in oigns namespace.
    +Creating the domain by creating the job output/weblogic-domains/governancedomain/create-domain-job.yaml
    +job.batch/governancedomain-create-fmw-infra-sample-domain-job created
    +Waiting for the job to complete...
    +status on iteration 1 of 40
    +pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running
    +status on iteration 2 of 40
    +pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running
    +status on iteration 3 of 40
    +pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running
    +status on iteration 4 of 40
    +pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running
    +status on iteration 5 of 40
    +pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running
    +status on iteration 6 of 40
    +pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running
    +status on iteration 7 of 40
    +pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running
    +status on iteration 8 of 40
    +pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running
    +status on iteration 9 of 40
    +pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running
    +status on iteration 10 of 40
    +pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running
    +status on iteration 11 of 40
    +pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Completed
    +
    +Domain governancedomain was created and will be started by the WebLogic Kubernetes Operator
    +
    +The following files were generated:
    +  output/weblogic-domains/governancedomain/create-domain-inputs.yaml
    +  output/weblogic-domains/governancedomain/create-domain-job.yaml
    +  output/weblogic-domains/governancedomain/domain.yaml
    +sed
    +
    +Completed
    +$
    +

    Note: If the create domain script creation fails, refer to the Troubleshooting section.

    +
  2. +
+

Setting the OIM server memory parameters

+
    +
  1. +

    Navigate to the /output/weblogic-domains/<domain_uid> directory:

    +
    $ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv/output/weblogic-domains/<domain_uid>
    +

    For example:

    +
    $ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv/output/weblogic-domains/governancedomain
    +
  2. +
  3. +

    Edit the domain.yaml and locate the section of the file starting with: - clusterName: oim_cluster under governancedomain-oim-cluster. Add the following lines:

    +
    serverPod:
    + env:
    + - name: USER_MEM_ARGS
    +   value: "-Djava.security.egd=file:/dev/./urandom -Xms2408m -Xmx8192m
    + resources:
    +   limits:
    +     cpu: "2"
    +     memory: "8Gi"
    +   requests:
    +     cpu: "1000m"
    +     memory: "4Gi"	  
    +

    The file should looks as follows:

    +
    ...
    +apiVersion: weblogic.oracle/v1
    +kind: Cluster
    +metadata:
    +  name: governancedomain-oim-cluster
    +  namespace: oigns
    +spec:
    +  clusterName: oim_cluster
    +  serverService:
    +    precreateService: true
    +  replicas: 0
    +  serverPod:
    +    env:
    +    - name: USER_MEM_ARGS
    +      value: "-XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m"
    +    resources:
    +      limits:
    +        cpu: "2"
    +        memory: "8Gi"
    +      requests:
    +        cpu: "1000m"
    +        memory: "4Gi"
    +...
    +

    Note: The above CPU and memory values are for development environments only. For Enterprise Deployments, please review the performance recommendations and sizing requirements in Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster.

    +

    Note: Limits and requests for CPU resources are measured in CPU units. One CPU in Kubernetes is equivalent to 1 vCPU/Core for cloud providers, and 1 hyperthread on bare-metal Intel processors. An “m” suffix in a CPU attribute indicates ‘milli-CPU’, so 500m is 50% of a CPU. Memory can be expressed in various units, where one Mi is one IEC unit mega-byte (1024^2), and one Gi is one IEC unit giga-byte (1024^3). For more information, see Resource Management for Pods and Containers, Assign Memory Resources to Containers and Pods, and Assign CPU Resources to Containers and Pods.

    +

    Note: The parameters above are also utilized by the Kubernetes Horizontal Pod Autoscaler (HPA). For more details on HPA, see Kubernetes Horizontal Pod Autoscaler.

    +

    Note: If required you can also set the same resources and limits for the governancedomain-soa-cluster.

    +
  4. +
+

Run the create domain scripts

+
    +
  1. +

    Create the Kubernetes resource using the following command:

    +
    $ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv/output/weblogic-domains/<domain_uid>
    +$ kubectl apply -f domain.yaml
    +

    For example:

    +
    $ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv/output/weblogic-domains/governancedomain
    +$ kubectl apply -f domain.yaml
    +

    The output will look similar to the following:

    +
    domain.weblogic.oracle/governancedomain unchanged
    +cluster.weblogic.oracle/governancedomain-oim-cluster created
    +cluster.weblogic.oracle/governancedomain-soa-cluster created
    +
  2. +
  3. +

    Run the following command to view the status of the OIG pods:

    +
    $ kubectl get pods -n oigns
    +

    The output will initially look similar to the following:

    +
    NAME                                                        READY   STATUS      RESTARTS   AGE
    +governancedomain-create-fmw-infra-sample-domain-job-8cww8   0/1     Completed   0          27m
    +governancedomain-introspect-domain-job-p4brt                1/1     Running     0          6s
    +helper                                                      1/1     Running     0          3h30m
    +

    The introspect-domain-job pod will be displayed first. Run the command again after several minutes and check to see that the Administration Server and SOA Server are both started. When started they should have STATUS = Running and READY = 1/1.

    +
    NAME                                                        READY   STATUS      RESTARTS   AGE/
    +governancedomain-adminserver                                1/1     Running     0          7m30s
    +governancedomain-create-fmw-infra-sample-domain-job-8cww8   0/1     Completed   0          35m
    +governancedomain-soa-server1                                1/1     Running     0          4m
    +helper                                                      1/1     Running     0          3h38m
    +

    Note: It will take several minutes before all the pods listed above show. When a pod has a STATUS of 0/1 the pod is started but the OIG server associated with it is currently starting. While the pods are starting you can check the startup status in the pod logs, by running the following command:

    +
    $ kubectl logs governancedomain-adminserver -n oigns
    +$ kubectl logs governancedomain-soa-server1 -n oigns
    +
  4. +
  5. +

    Check the clusters using the following command:

    +
    $ kubectl get cluster -n oigns
    +

    The output will look similar to the following:

    +
    NAME                           AGE
    +governancedomain-oim-cluster   9m
    +governancedomain-soa-cluster   9m
    +
  6. +
  7. +

    Start the OIM server using the following command:

    +
    $ kubectl patch cluster -n <namespace> <OIMClusterName> --type=merge -p '{"spec":{"replicas":<initialManagedServerReplicas>}}'
    +

    For example:

    +
    $ kubectl patch cluster -n oigns governancedomain-oim-cluster --type=merge -p '{"spec":{"replicas":1}}'
    +

    The output will look similar to the following:

    +
    cluster.weblogic.oracle/governancedomain-oim-cluster patched
    +
  8. +
  9. +

    Run the following command to view the status of the OIG pods:

    +
    $ kubectl get pods -n oigns
    +

    The output will initially look similar to the following:

    +
    NAME                                                        READY   STATUS      RESTARTS   AGE
    +governancedomain-adminserver                                1/1     Running     0          7m30s
    +governancedomain-create-fmw-infra-sample-domain-job-8cww8   0/1     Completed   0          35m
    +governancedomain-oim-server1                                1/1     Running     0          4m25s
    +governancedomain-soa-server1                                1/1     Running     0          4m
    +helper                                                      1/1     Running     0          3h38m
    +

    Note: It will take several minutes before the governancedomain-oim-server1 pod has a STATUS of 1/1. While the pod is starting you can check the startup status in the pod log, by running the following command:

    +
    $ kubectl logs governancedomain-oim-server1 -n oigns
    +
  10. +
+

Verify the results

+

Verify the domain, pods and services

+
    +
  1. +

    Verify the domain, servers pods and services are created and in the READY state with a STATUS of 1/1, by running the following command:

    +
    $ kubectl get all,domains -n <domain_namespace>
    +

    For example:

    +
    $ kubectl get all,domains -n oigns
    +

    The output will look similar to the following:

    +
    NAME                                                            READY   STATUS      RESTARTS   AGE
    +pod/governancedomain-adminserver                                1/1     Running     0          19m30s
    +pod/governancedomain-create-fmw-infra-sample-domain-job-8cww8   0/1     Completed   0          47m
    +pod/governancedomain-oim-server1                                1/1     Running     0          16m25s
    +pod/governancedomain-soa-server1                                1/1     Running     0          16m
    +pod/helper                                                      1/1     Running     0          3h50m
    +
    +NAME                                           TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)               AGE
    +service/governancedomain-adminserver           ClusterIP   None             <none>        7001/TCP              28m
    +service/governancedomain-cluster-oim-cluster   ClusterIP   10.106.198.40    <none>        14002/TCP,14000/TCP   25m
    +service/governancedomain-cluster-soa-cluster   ClusterIP   10.102.218.11    <none>        8001/TCP              25m
    +service/governancedomain-oim-server1           ClusterIP   None             <none>        14002/TCP,14000/TCP   16m24s
    +service/governancedomain-oim-server2           ClusterIP   10.97.32.112     <none>        14002/TCP,14000/TCP   25m
    +service/governancedomain-oim-server3           ClusterIP   10.100.233.109   <none>        14002/TCP,14000/TCP   25m
    +service/governancedomain-oim-server4           ClusterIP   10.96.154.17     <none>        14002/TCP,14000/TCP   25m
    +service/governancedomain-oim-server5           ClusterIP   10.103.222.213   <none>        14002/TCP,14000/TCP   25m
    +service/governancedomain-soa-server1           ClusterIP   None             <none>        8001/TCP              25m
    +service/governancedomain-soa-server2           ClusterIP   10.104.43.118    <none>        8001/TCP              25m
    +service/governancedomain-soa-server3           ClusterIP   10.110.180.120   <none>        8001/TCP              25m
    +service/governancedomain-soa-server4           ClusterIP   10.99.161.73     <none>        8001/TCP              25m
    +service/governancedomain-soa-server5           ClusterIP   10.97.67.196     <none>        8001/TCP              25m
    +
    +NAME                                                            COMPLETIONS   DURATION   AGE
    +job.batch/governancedomain-create-fmw-infra-sample-domain-job   1/1           3m6s       125m
    +
    +NAME                                      AGE
    +domain.weblogic.oracle/governancedomain   24m
    +
    +NAME                                                   AGE
    +cluster.weblogic.oracle/governancedomain-oim-cluster   23m
    +cluster.weblogic.oracle/governancedomain-soa-cluster   23m
    +
  2. +
+

The default domain created by the script has the following characteristics:

+
    +
  • An Administration Server named AdminServer listening on port 7001.
  • +
  • A configured OIG cluster named oig_cluster of size 5.
  • +
  • A configured SOA cluster named soa_cluster of size 5.
  • +
  • One started OIG managed Server, named oim_server1, listening on port 14000.
  • +
  • One started SOA managed Server, named soa_server1, listening on port 8001.
  • +
  • Log files that are located in <persistent_volume>/logs/<domainUID>
  • +
+

Verify the domain

+
    +
  1. +

    Run the following command to describe the domain:

    +
    $ kubectl describe domain <domain_uid> -n <namespace>
    +

    For example:

    +
    $ kubectl describe domain governancedomain -n oigns
    +

    The output will look similar to the following:

    +
    Name:         governancedomain
    +Namespace:    oigns
    +Labels:       weblogic.domainUID=governancedomain
    +Annotations:  <none>
    +API Version:  weblogic.oracle/v9
    +Kind:         Domain
    +Metadata:
    +  Creation Timestamp:  <DATE>
    +  Generation:          1
    +  Managed Fields:
    +    API Version:  weblogic.oracle/v9
    +    Fields Type:  FieldsV1
    +    fieldsV1:
    +      f:metadata:
    +        f:annotations:
    +          .:
    +          f:kubectl.kubernetes.io/last-applied-configuration:
    +        f:labels:
    +          .:
    +          f:weblogic.domainUID:
    +      f:spec:
    +        .:
    +        f:adminServer:
    +          .:
    +          f:adminChannelPortForwardingEnabled:
    +          f:serverPod:
    +            .:
    +            f:env:
    +          f:serverStartPolicy:
    +        f:clusters:
    +        f:dataHome:
    +        f:domainHome:
    +        f:domainHomeSourceType:
    +        f:failureRetryIntervalSeconds:
    +        f:failureRetryLimitMinutes:
    +        f:httpAccessLogInLogHome:
    +        f:image:
    +        f:imagePullPolicy:
    +        f:imagePullSecrets:
    +        f:includeServerOutInPodLog:
    +        f:logHome:
    +        f:logHomeEnabled:
    +        f:logHomeLayout:
    +        f:maxClusterConcurrentShutdown:
    +        f:maxClusterConcurrentStartup:
    +        f:maxClusterUnavailable:
    +        f:replicas:
    +        f:serverPod:
    +          .:
    +          f:env:
    +          f:volumeMounts:
    +          f:volumes:
    +        f:serverStartPolicy:
    +        f:webLogicCredentialsSecret:
    +          .:
    +          f:name:
    +    Manager:      kubectl-client-side-apply
    +    Operation:    Update
    +    Time:         <DATE>
    +    API Version:  weblogic.oracle/v9
    +    Fields Type:  FieldsV1
    +    fieldsV1:
    +      f:status:
    +        .:
    +        f:clusters:
    +        f:conditions:
    +        f:observedGeneration:
    +        f:servers:
    +        f:startTime:
    +    Manager:         Kubernetes Java Client
    +    Operation:       Update
    +    Subresource:     status
    +    Time:            <DATE>
    +  Resource Version:  1247307
    +  UID:               4933be73-df97-493f-a20c-bf1e24f6b3f2
    +Spec:
    +  Admin Server:
    +    Admin Channel Port Forwarding Enabled:  true
    +    Server Pod:
    +      Env:
    +        Name:             USER_MEM_ARGS
    +        Value:            -Djava.security.egd=file:/dev/./urandom -Xms512m -Xmx1024m
    +   Server Start Policy:  IfNeeded
    +  Clusters:
    +    Name:                          governancedomain-oim-cluster
    +    Name:                          governancedomain-soa-cluster
    +  Data Home:
    +  Domain Home:                     /u01/oracle/user_projects/domains/governancedomain
    +  Domain Home Source Type:         PersistentVolume
    +  Failure Retry Interval Seconds:  120
    +  Failure Retry Limit Minutes:     1440
    +  Http Access Log In Log Home:     true
    +  Image:                           container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-<October'23>
    +  Image Pull Policy:               IfNotPresent
    +  Image Pull Secrets:
    +    Name:                           orclcred
    +  Include Server Out In Pod Log:    true
    +  Log Home:                         /u01/oracle/user_projects/domains/logs/governancedomain
    +  Log Home Enabled:                 true
    +  Log Home Layout:                  ByServers
    +  Max Cluster Concurrent Shutdown:  1
    +  Max Cluster Concurrent Startup:   0
    +  Max Cluster Unavailable:          1
    +  Replicas:                         1
    +  Server Pod:
    +    Env:
    +      Name:   JAVA_OPTIONS
    +      Value:  -Dweblogic.StdoutDebugEnabled=false
    +      Name:   USER_MEM_ARGS
    +      Value:  -Djava.security.egd=file:/dev/./urandom -Xms256m -Xmx1024m
    +    Volume Mounts:
    +      Mount Path:  /u01/oracle/user_projects/domains
    +      Name:        weblogic-domain-storage-volume
    +    Volumes:
    +      Name:  weblogic-domain-storage-volume
    +      Persistent Volume Claim:
    +        Claim Name:     governancedomain-domain-pvc
    +  Server Start Policy:  IfNeeded
    +  Web Logic Credentials Secret:
    +    Name:  oig-domain-credentials
    +Status:
    +  Clusters:
    +    Cluster Name:  oim_cluster
    +    Conditions:
    +      Last Transition Time:  <DATE>
    +      Status:                True
    +      Type:                  Available
    +      Last Transition Time:  <DATE>
    +      Status:                True
    +      Type:                  Completed
    +    Label Selector:          weblogic.domainUID=governancedomain,weblogic.clusterName=oim_cluster
    +    Maximum Replicas:        5
    +    Minimum Replicas:        0
    +    Observed Generation:     2
    +    Ready Replicas:          1
    +    Replicas:                1
    +    Replicas Goal:           1
    +    Cluster Name:            soa_cluster
    +    Conditions:
    +      Last Transition Time:  <DATE>
    +      Status:                True
    +      Type:                  Available
    +      Last Transition Time:  <DATE>
    +      Status:                True
    +      Type:                  Completed
    +    Label Selector:          weblogic.domainUID=governancedomain,weblogic.clusterName=soa_cluster
    +    Maximum Replicas:        5
    +    Minimum Replicas:        0
    +    Observed Generation:     1
    +    Ready Replicas:          1
    +    Replicas:                1
    +    Replicas Goal:           1
    +  Conditions:
    +    Last Transition Time:  <DATE>
    +    Status:                True
    +    Type:                  Available
    +    Last Transition Time:  <DATE>
    +    Status:                True
    +    Type:                  Completed
    +  Observed Generation:     1
    +  Servers:
    +   Health:
    +      Activation Time:  <DATE>
    +      Overall Health:   ok
    +      Subsystems:
    +        Subsystem Name:  ServerRuntime
    +        Symptoms:
    +    Node Name:     worker-node2
    +    Pod Phase:     Running
    +    Pod Ready:     True
    +    Server Name:   AdminServer
    +    State:         RUNNING
    +    State Goal:    RUNNING
    +    Cluster Name:  oim_cluster
    +    Health:
    +      Activation Time:  <DATE>
    +      Overall Health:   ok
    +      Subsystems:
    +        Subsystem Name:  ServerRuntime
    +        Symptoms:
    +    Node Name:     worker-node1
    +    Pod Phase:     Running
    +    Pod Ready:     True
    +    Server Name:   oim_server1
    +    State:         RUNNING
    +    State Goal:    RUNNING
    +    Cluster Name:  oim_cluster
    +    Server Name:   oim_server2
    +    State:         SHUTDOWN
    +    State Goal:    SHUTDOWN
    +    Cluster Name:  oim_cluster
    +    Server Name:   oim_server3
    +    State:         SHUTDOWN
    +    State Goal:    SHUTDOWN
    +    Cluster Name:  oim_cluster
    +    Server Name:   oim_server4
    +    State:         SHUTDOWN
    +    State Goal:    SHUTDOWN
    +    Cluster Name:  oim_cluster
    +    Server Name:   oim_server5
    +    State:         SHUTDOWN
    +    State Goal:    SHUTDOWN
    +    Cluster Name:  soa_cluster
    +    Health:
    +      Activation Time:  <DATE>
    +      Overall Health:   ok
    +      Subsystems:
    +        Subsystem Name:  ServerRuntime
    +        Symptoms:
    +    Node Name:     worker-node1
    +    Pod Phase:     Running
    +    Pod Ready:     True
    +    Server Name:   soa_server1
    +    State:         RUNNING
    +    State Goal:    RUNNING
    +    Cluster Name:  soa_cluster
    +    Server Name:   soa_server2
    +    State:         SHUTDOWN
    +    State Goal:    SHUTDOWN
    +    Cluster Name:  soa_cluster
    +    Server Name:   soa_server3
    +    State:         SHUTDOWN
    +    State Goal:    SHUTDOWN
    +    Cluster Name:  soa_cluster
    +    Server Name:   soa_server4
    +    State:         SHUTDOWN
    +    State Goal:    SHUTDOWN
    +    Cluster Name:  soa_cluster
    +    Server Name:   soa_server5
    +    State:         SHUTDOWN
    +    State Goal:    SHUTDOWN
    +  Start Time:      <DATE>
    +Events:
    +  Type     Reason   Age                   From               Message
    +  ----     ------   ----                  ----               -------
    +  Normal   Created  35m                   weblogic.operator  Domain governancedomain was created.
    +  Normal   Changed  34m (x1127 over 35m)  weblogic.operator  Domain governancedomain was changed.
    +  Warning  Failed   34m (x227 over 35m)   weblogic.operator  Domain governancedomain failed due to 'Domain validation error': Cluster resource 'governancedomain-oim-cluster' not found in namespace 'oigns'
    +   Cluster resource 'governancedomain-soa-cluster' not found in namespace 'oigns'. Update the domain resource to correct the validation error.
    +  Warning  Unavailable  17m                weblogic.operator  Domain governancedomain is unavailable: an insufficient number of its servers that are expected to be running are ready.";
    +  Warning  Incomplete   17m                weblogic.operator  Domain governancedomain is incomplete for one or more of the following reasons: there are failures detected, there are pending server shutdowns, or not all servers expected to be running are ready and at their target image, auxiliary images, restart version, and introspect version.
    +  Normal   Completed    13m (x2 over 26m)  weblogic.operator  Domain governancedomain is complete because all of the following are true: there is no failure detected, there are no pending server shutdowns, and all servers expected to be running are ready and at their target image, auxiliary images, restart version, and introspect version.
    +  Normal   Available    13m (x2 over 26m)  weblogic.operator  Domain governancedomain is available: a sufficient number of its servers have reached the ready state.
    +

    In the Status section of the output, the available servers and clusters are listed.

    +
  2. +
+

Verify the pods

+
    +
  1. +

    Run the following command to see the pods running the servers and which nodes they are running on:

    +
    $ kubectl get pods -n <namespace> -o wide
    +

    For example:

    +
    $ kubectl get pods -n oigns -o wide
    +

    The output will look similar to the following:

    +
    NAME                                                        READY   STATUS      RESTARTS   AGE     IP              NODE           NOMINATED NODE   READINESS GATES
    +governancedomain-adminserver                                1/1     Running     0          24m     10.244.1.42   worker-node2   <none>           <none>
    +governancedomain-create-fmw-infra-sample-domain-job-8cww8   0/1     Completed   0          52m     10.244.1.40   worker-node2   <none>           <none>
    +governancedomain-oim-server1                                1/1     Running     0          52m     10.244.1.44   worker-node2   <none>           <none>
    +governancedomain-soa-server1                                1/1     Running     0          21m     10.244.1.43   worker-node2   <none>           <none>
    +helper                                                      1/1     Running     0          3h55m   10.244.1.39   worker-node2   <none>           <none>
    +

    You are now ready to configure an Ingress to direct traffic for your OIG domain as per Configure an ingress for an OIG domain.

    +
  2. +
+ + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oig/create-oig-domains/index.xml b/docs/23.4.1/idm-products/oig/create-oig-domains/index.xml new file mode 100644 index 000000000..67fb1ba49 --- /dev/null +++ b/docs/23.4.1/idm-products/oig/create-oig-domains/index.xml @@ -0,0 +1,14 @@ + + + + Create OIG domains on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oig/create-oig-domains/ + Recent content in Create OIG domains on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oig/create-or-update-image/index.html b/docs/23.4.1/idm-products/oig/create-or-update-image/index.html new file mode 100644 index 000000000..a5060035f --- /dev/null +++ b/docs/23.4.1/idm-products/oig/create-or-update-image/index.html @@ -0,0 +1,4250 @@ + + + + + + + + + + + + Create or update an image :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + Create or update an image +

+ + + + + + + +

As described in Prepare Your Environment you can create your own OIG container image. If you have access to the My Oracle Support (MOS), and there is a need to build a new image with an interim or one off patch, it is recommended to use the WebLogic Image Tool to build an Oracle Identity Governance image for production deployments.

+

Create or update an Oracle Identity Governance image using the WebLogic Image Tool

+

Using the WebLogic Image Tool, you can create a new Oracle Identity Governance image with PSU’s and interim patches or update an existing image with one or more interim patches.

+
+

Recommendations:

+
    +
  • Use create for creating a new Oracle Identity Governance image containing the Oracle Identity Governance binaries, bundle patch and interim patches. This is the recommended approach if you have access to the OIG patches because it optimizes the size of the image.
  • +
  • Use update for patching an existing Oracle Identity Governance image with a single interim patch. Note that the patched image size may increase considerably due to additional image layers introduced by the patch application tool.
  • +
+
+

Create an image

+

Set up the WebLogic Image Tool

+ +
Prerequisites
+

Verify that your environment meets the following prerequisites:

+
    +
  • Docker client and daemon on the build machine, with minimum Docker version 18.03.1.ce.
  • +
  • Bash version 4.0 or later, to enable the command complete feature.
  • +
  • JAVA_HOME environment variable set to the appropriate JDK location e.g: /scratch/export/oracle/product/jdk
  • +
+
Set up the WebLogic Image Tool
+

To set up the WebLogic Image Tool:

+
    +
  1. +

    Create a working directory and change to it:

    +
    $ mkdir <workdir>
    +$ cd <workdir>
    +

    For example:

    +
    $ mkdir /scratch/imagetool-setup
    +$ cd /scratch/imagetool-setup
    +
  2. +
  3. +

    Download the latest version of the WebLogic Image Tool from the releases page.

    +
    $ wget https://github.com/oracle/weblogic-image-tool/releases/download/release-X.X.X/imagetool.zip
    +

    where X.X.X is the latest release referenced on the releases page.

    +
  4. +
  5. +

    Unzip the release ZIP file in the imagetool-setup directory.

    +
    $ unzip imagetool.zip
    +
  6. +
  7. +

    Execute the following commands to set up the WebLogic Image Tool:

    +
    $ cd <workdir>/imagetool-setup/imagetool/bin
    +$ source setup.sh
    +

    For example:

    +
    $ cd /scratch/imagetool-setup/imagetool/bin
    +$ source setup.sh
    +
  8. +
+
Validate setup
+

To validate the setup of the WebLogic Image Tool:

+
    +
  1. +

    Enter the following command to retrieve the version of the WebLogic Image Tool:

    +
    $ imagetool --version
    +
  2. +
  3. +

    Enter imagetool then press the Tab key to display the available imagetool commands:

    +
    $ imagetool <TAB>
    +cache   create  help    rebase  update
    +
  4. +
+
WebLogic Image Tool build directory
+

The WebLogic Image Tool creates a temporary Docker context directory, prefixed by wlsimgbuilder_temp, every time the tool runs. Under normal circumstances, this context directory will be deleted. However, if the process is aborted or the tool is unable to remove the directory, it is safe for you to delete it manually. By default, the WebLogic Image Tool creates the Docker context directory under the user’s home directory. If you prefer to use a different directory for the temporary context, set the environment variable WLSIMG_BLDDIR:

+
$ export WLSIMG_BLDDIR="/path/to/buid/dir"
+
WebLogic Image Tool cache
+

The WebLogic Image Tool maintains a local file cache store. This store is used to look up where the Java, WebLogic Server installers, and WebLogic Server patches reside in the local file system. By default, the cache store is located in the user’s $HOME/cache directory. Under this directory, the lookup information is stored in the .metadata file. All automatically downloaded patches also reside in this directory. You can change the default cache store location by setting the environment variable WLSIMG_CACHEDIR:

+
$ export WLSIMG_CACHEDIR="/path/to/cachedir"
+
Set up additional build scripts
+

Creating an Oracle Identity Governance container image using the WebLogic Image Tool requires additional container scripts for Oracle Identity Governance domains.

+
    +
  1. +

    Clone the docker-images repository to set up those scripts. In these steps, this directory is DOCKER_REPO:

    +
    $ cd <workdir>/imagetool-setup
    +$ git clone https://github.com/oracle/docker-images.git
    +

    For example:

    +
    $ cd /scratch/imagetool-setup
    +$ git clone https://github.com/oracle/docker-images.git
    +
  2. +
+
+

Note: If you want to create the image continue with the following steps, otherwise to update the image see update an image.

+
+

Create an image

+

After setting up the WebLogic Image Tool, follow these steps to use the WebLogic Image Tool to create a new Oracle Identity Governance image.

+
Download the Oracle Identity Governance installation binaries and patches
+

You must download the required Oracle Identity Governance installation binaries and patches as listed below from the Oracle Software Delivery Cloud and save them in a directory of your choice.

+

The installation binaries and patches required are:

+
    +
  • +

    Oracle Identity and Access Management 12.2.1.4.0

    +
      +
    • fmw_12.2.1.4.0_idm.jar
    • +
    +
  • +
  • +

    Oracle Fusion Middleware 12c Infrastructure 12.2.1.4.0

    +
      +
    • fmw_12.2.1.4.0_infrastructure.jar
    • +
    +
  • +
  • +

    Oracle SOA Suite for Oracle Middleware 12.2.1.4.0

    +
      +
    • fmw_12.2.1.4.0_soa.jar
    • +
    +
  • +
  • +

    Oracle Service Bus 12.2.1.4.0

    +
      +
    • fmw_12.2.1.4.0_osb.jar
    • +
    +
  • +
  • +

    OIG and FMW Infrastructure Patches:

    +
      +
    • View document ID 2723908.1 on My Oracle Support. In the Container Image Download/Patch Details section, locate the Oracle Identity Governance (OIG) table. For the latest PSU click the README link in the Documentation column. In the README, locate the “Installed Software” section. All the patch numbers to be download are listed here. Download all these individual patches from My Oracle Support.
    • +
    +
  • +
  • +

    Oracle JDK v8

    +
      +
    • jdk-8uXXX-linux-x64.tar.gz where XXX is the JDK version referenced in the README above.
    • +
    +
  • +
+
Update required build files
+

The following files in the code repository location <imagetool-setup-location>/docker-images/OracleIdentityGovernance/imagetool/12.2.1.4.0 are used for creating the image:

+
    +
  • additionalBuildCmds.txt
  • +
  • buildArgs
  • +
+

. Edit the <workdir>/imagetool-setup/docker-images/OracleIdentityGovernance/imagetool/12.2.1.4.0/buildArgs file and change %DOCKER_REPO%, %JDK_VERSION% and %BUILDTAG% appropriately.

+

For example:

+
create
+--jdkVersion=8u311
+--type oig
+--chown oracle:root
+--version=12.2.1.4.0
+--tag=oig-latestpsu:12.2.1.4.0
+--pull
+--installerResponseFile /scratch/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file,/scratch/imagetool-setup/docker-images/OracleSOASuite/dockerfiles/12.2.1.4.0/install/soasuite.response,/scratch/imagetool-setup/docker-images/OracleSOASuite/dockerfiles/12.2.1.4.0/install/osb.response,/scratch/imagetool-setup/docker-images/OracleIdentityGovernance/dockerfiles/12.2.1.4.0/idmqs.response
+--additionalBuildCommands /scratch/imagetool-setup/docker-images/OracleIdentityGovernance/imagetool/12.2.1.4.0/additionalBuildCmds.txt
+--additionalBuildFiles /scratch/imagetool-setup/docker-images/OracleIdentityGovernance/dockerfiles/12.2.1.4.0/container-scripts
+
    +
  1. +

    Edit the <workdir>/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4.0/install.file and under the GENERIC section add the line INSTALL_TYPE="Fusion Middleware Infrastructure”. For example:

    +
    [GENERIC]
    +INSTALL_TYPE="Fusion Middleware Infrastructure"
    +DECLINE_SECURITY_UPDATES=true
    +SECURITY_UPDATES_VIA_MYORACLESUPPORT=false
    +
  2. +
+
Create the image
+
    +
  1. +

    Add a JDK package to the WebLogic Image Tool cache. For example:

    +
    $ imagetool cache addInstaller --type jdk --version 8uXXX --path <download location>/jdk-8uXXX-linux-x64.tar.gz
    +

    where XXX is the JDK version downloaded

    +
  2. +
  3. +

    Add the downloaded installation binaries to the WebLogic Image Tool cache. For example:

    +
    $ imagetool cache addInstaller --type fmw --version 12.2.1.4.0 --path <download location>/fmw_12.2.1.4.0_infrastructure.jar
    +   
    +$ imagetool cache addInstaller --type soa --version 12.2.1.4.0 --path <download location>/fmw_12.2.1.4.0_soa.jar
    +   
    +$ imagetool cache addInstaller --type osb --version 12.2.1.4.0 --path <download location>/fmw_12.2.1.4.0_osb.jar
    +   
    +$ imagetool cache addInstaller --type idm --version 12.2.1.4.0 --path <download location>/fmw_12.2.1.4.0_idm.jar
    +
  4. +
  5. +

    Add the downloaded OPatch patch to the WebLogic Image Tool cache. For example:

    +
    $ imagetool cache addEntry --key 28186730_13.9.4.2.8 --value <download location>/p28186730_139428_Generic.zip
    +
  6. +
  7. +

    Add the rest of the downloaded product patches to the WebLogic Image Tool cache:

    +
    $ imagetool cache addEntry --key <patch>_12.2.1.4.0 --value <download location>/p<patch>_122140_Generic.zip
    +

    For example:

    +
    $ imagetool cache addEntry --key 33416868_12.2.1.4.0 --value <download location>/p33416868_122140_Generic.zip
    +$ imagetool cache addEntry --key 33453703_12.2.1.4.0 --value <download location>/p33453703_122140_Generic.zip
    +$ imagetool cache addEntry --key 32999272_12.2.1.4.0 --value <download location>/p32999272_122140_Generic.zip
    +$ imagetool cache addEntry --key 33093748_12.2.1.4.0 --value <download location>/p33093748_122140_Generic.zip
    +$ imagetool cache addEntry --key 33281560_12.2.1.4.0 --value <download location>/p33281560_122140_Generic.zip
    +$ imagetool cache addEntry --key 31544353_12.2.1.4.0 --value <download location>/p31544353_122140_Linux-x86-64.zip
    +$ imagetool cache addEntry --key 33313802_12.2.1.4.0 --value <download location>/p33313802_122140_Generic.zip
    +$ imagetool cache addEntry --key 33408307_12.2.1.4.0 --value <download location>/p33408307_122140_Generic.zip
    +$ imagetool cache addEntry --key 33286160_12.2.1.4.0 --value <download location>/p33286160_122140_Generic.zip
    +$ imagetool cache addEntry --key 32880070_12.2.1.4.0 --value <download location>/p32880070_122140_Generic.zip
    +$ imagetool cache addEntry --key 32905339_12.2.1.4.0 --value <download location>/p32905339_122140_Generic.zip
    +$ imagetool cache addEntry --key 32784652_12.2.1.4.0 --value <download location>/p32784652_122140_Generic.zip
    +
  8. +
  9. +

    Edit the <workdir>/imagetool-setup/docker-images/OracleIdentityGovernance/imagetool/12.2.1.4.0/buildArgs file and append the product patches and opatch patch as follows:

    +
    --patches 33416868_12.2.1.4.0,33453703_12.2.1.4.0,32999272_12.2.1.4.0,33093748_12.2.1.4.0,33281560_12.2.1.4.0,31544353_12.2.1.4.0,33313802_12.2.1.4.0,33408307_12.2.1.4.0,33286160_12.2.1.4.0,32880070_12.2.1.4.0,32905339_12.2.1.4.0,32784652_12.2.1.4.0
    +--opatchBugNumber=28186730_13.9.4.2.8
    +

    An example buildArgs file is now as follows:

    +
    create
    +--jdkVersion=8u301
    +--type oig
    +--version=12.2.1.4.0
    +--tag=oig-latestpsu:12.2.1.4.0
    +--pull
    +--installerResponseFile /scratch/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file,/scratch/docker-images/OracleSOASuite/dockerfiles/12.2.1.4.0/install/soasuite.response,/scratch/docker-images/OracleSOASuite/dockerfiles/12.2.1.4.0/install/osb.response,/scratch/docker-images/OracleIdentityGovernance/dockerfiles/12.2.1.4.0/idmqs.response
    +--additionalBuildCommands /scratch/imagetool-setup/docker-images/OracleIdentityGovernance/imagetool/12.2.1.4.0/additionalBuildCmds.txt
    +--additionalBuildFiles /scratch/imagetool-setup/docker-images/OracleIdentityGovernance/dockerfiles/12.2.1.4.0/container-scripts
    +--patches 33416868_12.2.1.4.0,33453703_12.2.1.4.0,32999272_12.2.1.4.0,33093748_12.2.1.4.0,33281560_12.2.1.4.0,31544353_12.2.1.4.0,33313802_12.2.1.4.0,33408307_12.2.1.4.0,33286160_12.2.1.4.0,32880070_12.2.1.4.0,32905339_12.2.1.4.0,32784652_12.2.1.4.0
    +--opatchBugNumber=28186730_13.9.4.2.8
    +
    +

    Note: In the buildArgs file:

    +
      +
    • --jdkVersion value must match the --version value used in the imagetool cache addInstaller command for --type jdk.
    • +
    • --version value must match the --version value used in the imagetool cache addInstaller command for --type idm.
    • +
    +
    +

    Refer to this page for the complete list of options available with the WebLogic Image Tool create command.

    +
  10. +
  11. +

    Create the Oracle Identity Governance image:

    +
    $ imagetool @<absolute path to buildargs file> --fromImage ghcr.io/oracle/oraclelinux:7-slim
    +
    +

    Note: Make sure that the absolute path to the buildargs file is prepended with a @ character, as shown in the example above.

    +
    +

    For example:

    +
    $ imagetool @<imagetool-setup-location>/docker-images/OracleIdentityGovernance/imagetool/12.2.1.4.0/buildArgs --fromImage ghcr.io/oracle/oraclelinux:7-slim
    +
  12. +
  13. +

    Check the created image using the docker images command:

    +
    $ docker images | grep oig
    +

    The output will look similar to the following:

    +
    oig-latestpsu                                    12.2.1.4.0                     e391ed154bcb        50 seconds ago      4.43GB
    +
  14. +
  15. +

    Run the following command to save the container image to a tar file:

    +
    $ docker save -o <path>/<file>.tar <image>
    +

    For example:

    +
    $ docker save -o $WORKDIR/oig-latestpsu.tar oig-latestpsu:12.2.1.4.0
    +
  16. +
+

Update an image

+

The steps below show how to update an existing Oracle Identity Governance image with an interim patch.

+

The container image to be patched must be loaded in the local docker images repository before attempting these steps.

+

In the examples below the image oracle/oig:12.2.1.4.0 is updated with an interim patch.

+
$ docker images
+
+REPOSITORY     TAG          IMAGE ID          CREATED             SIZE
+oracle/oig     12.2.1.4.0   298fdb98e79c      3 months ago        4.42GB
+
    +
  1. +

    Set up the WebLogic Image Tool.

    +
  2. +
  3. +

    Download the required interim patch and latest Opatch (28186730) from My Oracle Support. and save them in a directory of your choice.

    +
  4. +
  5. +

    Add the OPatch patch to the WebLogic Image Tool cache, for example:

    +
    $ imagetool cache addEntry --key 28186730_13.9.4.2.8 --value <downloaded-patches-location>/p28186730_139428_Generic.zip
    +
  6. +
  7. +

    Execute the imagetool cache addEntry command for each patch to add the required patch(es) to the WebLogic Image Tool cache. For example, to add patch p32701831_12214210607_Generic.zip:

    +
    $ imagetool cache addEntry --key=33165837_12.2.1.4.210708 --value <downloaded-patches-location>/p33165837_12214210708_Generic.zip
    +
  8. +
  9. +

    Provide the following arguments to the WebLogic Image Tool update command:

    +
      +
    • –-fromImage - Identify the image that needs to be updated. In the example below, the image to be updated is oracle/oig:12.2.1.4.0.
    • +
    • –-patches - Multiple patches can be specified as a comma-separated list.
    • +
    • --tag - Specify the new tag to be applied for the image being built.
    • +
    +

    Refer here for the complete list of options available with the WebLogic Image Tool update command.

    +
    +

    Note: The WebLogic Image Tool cache should have the latest OPatch zip. The WebLogic Image Tool will update the OPatch if it is not already updated in the image.

    +
    +

    For example:

    +
    $ imagetool update --fromImage oracle/oig:12.2.1.4.0 --tag=oracle/oig-new:12.2.1.4.0 --patches=33165837_12.2.1.4.210708 --opatchBugNumber=28186730_13.9.4.2.8
    +
    +

    Note: If the command fails because the files in the image being upgraded are not owned by oracle:oracle, then add the parameter --chown <userid>:<groupid> to correspond with the values returned in the error.

    +
    +
  10. +
  11. +

    Check the built image using the docker images command:

    +
    $ docker images | grep oig
    +

    The output will look similar to the following:

    +
    REPOSITORY         TAG          IMAGE ID        CREATED             SIZE
    +oracle/oig-new     12.2.1.4.0   0c8381922e95    16 seconds ago      4.91GB
    +oracle/oig         12.2.1.4.0   298fdb98e79c    3 months ago        4.42GB
    +
  12. +
  13. +

    Run the following command to save the patched container image to a tar file:

    +
    $ docker save -o <path>/<file>.tar <image>
    +

    For example:

    +
    $ docker save -o $WORKDIR/oig-new.tar oracle/oig-new:12.2.1.4.0
    +
  14. +
+ + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oig/create-or-update-image/index.xml b/docs/23.4.1/idm-products/oig/create-or-update-image/index.xml new file mode 100644 index 000000000..37cddc4ca --- /dev/null +++ b/docs/23.4.1/idm-products/oig/create-or-update-image/index.xml @@ -0,0 +1,14 @@ + + + + Create or update an image on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oig/create-or-update-image/ + Recent content in Create or update an image on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oig/index.html b/docs/23.4.1/idm-products/oig/index.html new file mode 100644 index 000000000..f3aaa1c56 --- /dev/null +++ b/docs/23.4.1/idm-products/oig/index.html @@ -0,0 +1,4192 @@ + + + + + + + + + + + + Oracle Identity Governance :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + Oracle Identity Governance +

+ + + + + + + +

Oracle Identity Governance on Kubernetes

+

Oracle supports the deployment of Oracle Identity Governance on Kubernetes. See the following sections:

+ + + + + + + + + + + + + + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oig/index.xml b/docs/23.4.1/idm-products/oig/index.xml new file mode 100644 index 000000000..a79e37990 --- /dev/null +++ b/docs/23.4.1/idm-products/oig/index.xml @@ -0,0 +1,14 @@ + + + + Oracle Identity Governance on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oig/ + Recent content in Oracle Identity Governance on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oig/introduction/index.html b/docs/23.4.1/idm-products/oig/introduction/index.html new file mode 100644 index 000000000..6dba12cea --- /dev/null +++ b/docs/23.4.1/idm-products/oig/introduction/index.html @@ -0,0 +1,3994 @@ + + + + + + + + + + + + Introduction :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+ +
+ +
+ +
+ +

+ + Introduction +

+ + + + + + + +

The WebLogic Kubernetes Operator supports deployment of Oracle Identity Governance (OIG).

+

In this release, OIG domains are supported using the “domain on a persistent volume” +model only, where the domain home is located in a persistent volume (PV).

+

The operator has several key features to assist you with deploying and managing OIG domains in a Kubernetes +environment. You can:

+
    +
  • Create OIG instances in a Kubernetes persistent volume. This persistent volume can reside in an NFS file system or other Kubernetes volume types.
  • +
  • Start servers based on declarative startup parameters and desired states.
  • +
  • Expose the OIG Services for external access.
  • +
  • Scale OIG domains by starting and stopping Managed Servers on demand.
  • +
  • Publish operator and WebLogic Server logs into Elasticsearch and interact with them in Kibana.
  • +
  • Monitor the OIG instance using Prometheus and Grafana.
  • +
+

Current production release

+

The current production release for the Oracle Identity Governance domain deployment on Kubernetes is 23.4.1. This release uses the WebLogic Kubernetes Operator version 4.1.2.

+

For 4.0.X WebLogic Kubernetes Operator refer to Version 23.3.1

+

For 3.4.X WebLogic Kubernetes Operator refer to Version 23.1.1

+

Recent changes and known issues

+

See the Release Notes for recent changes and known issues for Oracle Identity Governance domain deployment on Kubernetes.

+

Limitations

+

See here for limitations in this release.

+

Getting started

+

This documentation explains how to configure OIG on a Kubernetes cluster where no other Oracle Identity Management products will be deployed. For detailed information about this type of deployment, start at Prerequisites and follow this documentation sequentially. Please note that this documentation does not explain how to configure a Kubernetes cluster given the product can be deployed on any compliant Kubernetes vendor.

+

If you are deploying multiple Oracle Identity Management products on the same Kubernetes cluster, then you must follow the Enterprise Deployment Guide outlined in Enterprise Deployments. +Please note, you also have the option to follow the Enterprise Deployment Guide even if you are only installing OIG and no other Oracle Identity Management products.

+

Note: If you need to understand how to configure a Kubernetes cluster ready for an Oracle Identity Governance deployment, you should follow the Enterprise Deployment Guide referenced in Enterprise Deployments. The Enterprise Deployment Automation section also contains details on automation scripts that can:

+
    +
  • Automate the creation of a Kubernetes cluster on Oracle Cloud Infrastructure (OCI), ready for the deployment of Oracle Identity Management products.
  • +
  • Automate the deployment of Oracle Identity Management products on any compliant Kubernetes cluster.
  • +
+

Documentation for earlier releases

+

To view documentation for an earlier release, see:

+ + + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oig/introduction/index.xml b/docs/23.4.1/idm-products/oig/introduction/index.xml new file mode 100644 index 000000000..367e81f35 --- /dev/null +++ b/docs/23.4.1/idm-products/oig/introduction/index.xml @@ -0,0 +1,14 @@ + + + + Introduction on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oig/introduction/ + Recent content in Introduction on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oig/manage-oig-domains/delete-domain-home/index.html b/docs/23.4.1/idm-products/oig/manage-oig-domains/delete-domain-home/index.html new file mode 100644 index 000000000..4bb33a577 --- /dev/null +++ b/docs/23.4.1/idm-products/oig/manage-oig-domains/delete-domain-home/index.html @@ -0,0 +1,4034 @@ + + + + + + + + + + + + g. Delete the OIG domain home :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + g. Delete the OIG domain home +

+ + + + + + +

Sometimes in production, but most likely in testing environments, you might want to remove the domain home that is generated using the create-domain.sh script.

+
    +
  1. +

    Run the following command to delete the domain:

    +
    $ cd $WORKDIR/kubernetes/delete-domain
    +$ ./delete-weblogic-domain-resources.sh -d <domain_uid>
    +

    For example:

    +
    $ cd $WORKDIR/kubernetes/delete-domain
    +$ ./delete-weblogic-domain-resources.sh -d governancedomain
    +
  2. +
  3. +

    Drop the RCU schemas as follows:

    +
    $ kubectl exec -it helper -n <domain_namespace> -- /bin/bash
    +[oracle@helper ~]$
    +[oracle@helper ~]$ export CONNECTION_STRING=<db_host.domain>:<db_port>/<service_name>
    +[oracle@helper ~]$ export RCUPREFIX=<rcu_schema_prefix>
    +   
    +/u01/oracle/oracle_common/bin/rcu -silent -dropRepository -databaseType ORACLE -connectString $CONNECTION_STRING \
    +-dbUser sys -dbRole sysdba -selectDependentsForComponents true -schemaPrefix $RCUPREFIX \
    +-component MDS -component IAU -component IAU_APPEND -component IAU_VIEWER -component OPSS \
    +-component WLS -component STB -component OIM -component SOAINFRA -component UCSUMS -f < /tmp/pwd.txt
    +

    For example:

    +
    $ kubectl exec -it helper -n oigns -- /bin/bash
    +[oracle@helper ~]$ export CONNECTION_STRING=mydatabasehost.example.com:1521/orcl.example.com
    +[oracle@helper ~]$ export RCUPREFIX=OIGK8S
    +/u01/oracle/oracle_common/bin/rcu -silent -dropRepository -databaseType ORACLE -connectString $CONNECTION_STRING \
    +-dbUser sys -dbRole sysdba -selectDependentsForComponents true -schemaPrefix $RCUPREFIX \
    +-component MDS -component IAU -component IAU_APPEND -component IAU_VIEWER -component OPSS \
    +-component WLS -component STB -component OIM -component SOAINFRA -component UCSUMS -f < /tmp/pwd.txt
    +
  4. +
  5. +

    Delete the contents of the persistent volume:

    +
    $ rm -rf <persistent_volume>/governancedomainpv/*
    +

    For example:

    +
    $ rm -rf /scratch/shared/governancedomainpv/*
    +
  6. +
  7. +

    Delete the WebLogic Kubernetes Operator, by running the following command:

    +
    $ helm delete weblogic-kubernetes-operator -n opns
    +
  8. +
  9. +

    Delete the label from the OIG namespace:

    +
    $ kubectl label namespaces <domain_namespace> weblogic-operator-
    +

    For example:

    +
    $ kubectl label namespaces oigns weblogic-operator-
    +
  10. +
  11. +

    Delete the service account for the operator:

    +
    $ kubectl delete serviceaccount <sample-kubernetes-operator-sa> -n <domain_namespace>
    +

    For example:

    +
    $ kubectl delete serviceaccount op-sa -n opns
    +
  12. +
  13. +

    Delete the operator namespace:

    +
    $ kubectl delete namespace <sample-kubernetes-operator-ns>
    +

    For example:

    +
    $ kubectl delete namespace opns
    +
  14. +
  15. +

    To delete NGINX:

    +
    $ helm delete governancedomain-nginx-designconsole -n <domain_namespace>
    +

    For example:

    +
    $ helm delete governancedomain-nginx-designconsole -n oigns
    +

    Then run:

    +
    $ helm delete governancedomain-nginx -n <domain_namespace>
    +

    For example:

    +
    $ helm delete governancedomain-nginx -n oigns
    +

    Then run:

    +
    $ helm delete nginx-ingress -n <domain_namespace>
    +

    For example:

    +
    $ helm delete nginx-ingress -n nginxssl
    +

    Then delete the NGINX namespace:

    +
    $ kubectl delete namespace <namespace>
    +

    For example:

    +
    $ kubectl delete namespace nginxssl
    +
  16. +
  17. +

    Delete the OIG namespace:

    +
    $ kubectl delete namespace <domain_namespace>
    +

    For example:

    +
    $ kubectl delete namespace oigns
    +
  18. +
+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oig/manage-oig-domains/domain-lifecycle/index.html b/docs/23.4.1/idm-products/oig/manage-oig-domains/domain-lifecycle/index.html new file mode 100644 index 000000000..5d9c64467 --- /dev/null +++ b/docs/23.4.1/idm-products/oig/manage-oig-domains/domain-lifecycle/index.html @@ -0,0 +1,4198 @@ + + + + + + + + + + + + a. Domain life cycle :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+ +
+ +
+ +
+ +

+ + a. Domain life cycle +

+ + + + + + +
    +
  1. View existing OIG servers
  2. +
  3. Starting/Scaling up OIG Managed servers
  4. +
  5. Stopping/Scaling down OIG Managed servers
  6. +
  7. Stopping and starting the Administration Server and Managed Servers
  8. +
  9. Domain lifecycle sample scripts
  10. +
+

As OIG domains use the WebLogic Kubernetes Operator, domain lifecyle operations are managed using the WebLogic Kubernetes Operator itself.

+

This document shows the basic operations for starting, stopping and scaling servers in the OIG domain.

+

For more detailed information refer to Domain Life Cycle in the WebLogic Kubernetes Operator documentation.

+ +

Do not use the WebLogic Server Administration Console or Oracle Enterprise Manager Console to start or stop servers.

+
+ +

Note: The instructions below are for starting, stopping, or scaling servers manually. If you wish to use autoscaling, see Kubernetes Horizontal Pod Autoscaler. Please note, if you have enabled autoscaling, it is recommended to delete the autoscaler before running the commands below.

+

View existing OIG Servers

+

The default OIG deployment starts the Administration Server (AdminServer), one OIG Managed Server (oim_server1) and one SOA Managed Server (soa_server1).

+

The deployment also creates, but doesn’t start, four extra OIG Managed Servers (oim-server2 to oim-server5) and four more SOA Managed Servers (soa_server2 to soa_server5).

+

All these servers are visible in the WebLogic Server Administration Console https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console by navigating to Domain Structure > governancedomain > Environment > Servers.

+

To view the running servers using kubectl, run the following command:

+
$ kubectl get pods -n <domain_namespace>
+

For example:

+
$ kubectl get pods -n oigns
+

The output should look similar to the following:

+
NAME                                                        READY   STATUS      RESTARTS   AGE
+governancedomain-adminserver                                1/1     Running     0          23h
+governancedomain-create-fmw-infra-sample-domain-job-8cww8   0/1     Completed   0          24h
+governancedomain-oim-server1                                1/1     Running     0          23h
+governancedomain-soa-server1                                1/1     Running     0          23h
+

Starting/Scaling up OIG Managed Servers

+

The number of OIG Managed Servers running is dependent on the replicas parameter configured for the cluster. To start more OIG Managed Servers perform the following steps:

+
    +
  1. +

    Run the following kubectl command to edit the oim_cluster:

    +
    $ kubectl edit cluster <cluster_name> -n <domain_namespace>
    +

    For example:

    +
    $ kubectl edit cluster governancedomain-oim-cluster -n oigns
    +

    Note: This opens an edit session for the domain where parameters can be changed using standard vi commands.

    +
  2. +
  3. +

    In the edit session, search for spec:, and then look for the replicas parameter under clusterName: oim_cluster. By default the replicas parameter is set to “1” hence a single OIG Managed Server is started (oim_server1):

    +
    spec:
    +  clusterName: oim_cluster
    +  replicas: 1
    +  serverPod:
    +    env:
    +    - name: USER_MEM_ARGS
    +      value: -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m
    +...
    +
  4. +
  5. +

    To start more OIG Managed Servers, increase the replicas value as desired. In the example below, one more Managed Server will be started by setting replicas to “2”:

    +
    spec:
    +  clusterName: oim_cluster
    +  replicas: 2
    +  serverPod:
    +    env:
    +    - name: USER_MEM_ARGS
    +      value: -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m
    +...
    +
  6. +
  7. +

    Save the file and exit (:wq)

    +

    The output will look similar to the following:

    +
    cluster.weblogic.oracle/governancedomain-oim-cluster edited
    +
  8. +
  9. +

    Run the following kubectl command to view the pods:

    +
    $ kubectl get pods -n <domain_namespace>
    +

    For example:

    +
    $ kubectl get pods -n oigns
    +

    The output will look similar to the following:

    +
    NAME                                                        READY   STATUS      RESTARTS   AGE
    +governancedomain-adminserver                                1/1     Running     0          23h
    +governancedomain-create-fmw-infra-sample-domain-job-8cww8   0/1     Completed   0          24h
    +governancedomain-oim-server1                                1/1     Running     0          23h
    +governancedomain-oim-server2                                0/1     Running     0          7s
    +governancedomain-soa-server1                                1/1     Running     0          23h
    +

    One new pod (governancedomain-oim-server2) is started, but currently has a READY status of 0/1. This means oim_server2 is not currently running but is in the process of starting. The server will take several minutes to start so keep executing the command until READY shows 1/1:

    +
    NAME                                                        READY   STATUS      RESTARTS   AGE 
    +governancedomain-adminserver                                1/1     Running     0          23h
    +governancedomain-create-fmw-infra-sample-domain-job-8cww8   0/1     Completed   0          24h
    +governancedomain-oim-server1                                1/1     Running     0          23h
    +governancedomain-oim-server2                                1/1     Running     0          5m27s
    +governancedomain-soa-server1                                1/1     Running     0          23h
    +

    Note: To check what is happening during server startup when READY is 0/1, run the following command to view the log of the pod that is starting:

    +
    $ kubectl logs <pod> -n <domain_namespace>
    +

    For example:

    +
    $ kubectl logs governancedomain-oim-server2 -n oigns
    +
  10. +
+

Stopping/Scaling down OIG Managed Servers

+

As mentioned in the previous section, the number of OIG Managed Servers running is dependent on the replicas parameter configured for the cluster. To stop one or more OIG Managed Servers, perform the following:

+
    +
  1. +

    Run the following kubectl command to edit the oim_cluster:

    +
    $ kubectl edit cluster <cluster_name> -n <domain_namespace>
    +

    For example:

    +
    $ kubectl edit cluster governancedomain-oim-cluster -n oigns
    +
  2. +
  3. +

    In the edit session, search for spec:, and then look for the replicas parameter under clusterName: oim_cluster. In the example below replicas is set to “2” hence two OIG Managed Servers are started (oim_server1 and oim_server2):

    +
    spec:
    +  clusterName: oim_cluster
    +  replicas: 2
    +  serverPod:
    +    env:
    +    - name: USER_MEM_ARGS
    +      value: -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m
    +...
    +
  4. +
  5. +

    To stop OIG Managed Servers, decrease the replicas value as desired. In the example below, we will stop one Managed Server by setting replicas to “1”:

    +
    spec:
    +  clusterName: oim_cluster
    +  replicas: 1
    +  serverPod:
    +    env:
    +    - name: USER_MEM_ARGS
    +      value: -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m
    +... 
    +
  6. +
  7. +

    Save the file and exit (:wq)

    +
  8. +
  9. +

    Run the following kubectl command to view the pods:

    +
    $ kubectl get pods -n <domain_namespace>
    +

    For example:

    +
    $ kubectl get pods -n oigns
    +

    The output will look similar to the following:

    +
    NAME                                                        READY   STATUS      RESTARTS   AGE
    +governancedomain-adminserver                                1/1     Running       0          23h
    +governancedomain-create-fmw-infra-sample-domain-job-8cww8   0/1     Completed     0          24h
    +governancedomain-oim-server1                                1/1     Running       0          23h
    +governancedomain-oim-server2                                1/1     Terminating   0          7m30s
    +governancedomain-soa-server1                                1/1     Running       0          23h
    +

    The exiting pod shows a STATUS of Terminating (governancedomain-oim-server2). The server may take a minute or two to stop, so keep executing the command until the pod has disappeared:

    +
    NAME                                                        READY   STATUS      RESTARTS   AGE
    +governancedomain-adminserver                                1/1     Running     0          23h
    +governancedomain-create-fmw-infra-sample-domain-job-8cww8   0/1     Completed   0          24h
    +governancedomain-oim-server1                                1/1     Running     0          23h
    +governancedomain-soa-server1                                1/1     Running     0          23h
    +
  10. +
+

Stopping and Starting the Administration Server and Managed Servers

+

To stop all the OIG Managed Servers and the Administration Server in one operation:

+
    +
  1. +

    Run the following kubectl command to edit the domain:

    +
    $ kubectl edit domain <domain_uid> -n <domain_namespace>
    +

    For example:

    +
    $ kubectl edit domain governancedomain -n oigns
    +
  2. +
  3. +

    In the edit session search for serverStartPolicy: IfNeeded under the domain spec:

    +
    ...
    +    volumeMounts:
    +    - mountPath: /u01/oracle/user_projects/domains
    +      name: weblogic-domain-storage-volume
    +    volumes:
    +    - name: weblogic-domain-storage-volume
    +      persistentVolumeClaim:
    +        claimName: governancedomain-domain-pvc
    +  serverStartPolicy: IfNeeded
    +  webLogicCredentialsSecret:
    +    name: oig-domain-credentials
    + ...
    +
  4. +
  5. +

    Change serverStartPolicy: IfNeeded to Never as follows:

    +
     ...
    +    volumeMounts:
    +    - mountPath: /u01/oracle/user_projects/domains
    +      name: weblogic-domain-storage-volume
    +    volumes:
    +    - name: weblogic-domain-storage-volume
    +      persistentVolumeClaim:
    +        claimName: governancedomain-domain-pvc
    +  serverStartPolicy: Never
    +  webLogicCredentialsSecret:
    +    name: oig-domain-credentials
    + ...
    +
  6. +
  7. +

    Save the file and exit (:wq).

    +
  8. +
  9. +

    Run the following kubectl command to view the pods:

    +
    $ kubectl get pods -n <domain_namespace>
    +

    For example:

    +
    $ kubectl get pods -n oigns
    +

    The output will look similar to the following:

    +
    NAME                                                        READY   STATUS        RESTARTS   AGE
    +governancedomain-adminserver                                1/1     Terminating   0          23h
    +governancedomain-create-fmw-infra-sample-domain-job-8cww8   0/1     Completed     0          24h
    +governancedomain-oim-server1                                1/1     Terminating   0          23h
    +governancedomain-soa-server1                                1/1     Terminating   0          23h
    +

    The AdminServer pod and Managed Server pods will move to a STATUS of Terminating. After a few minutes, run the command again and the pods should have disappeared:

    +
    NAME                                                        READY   STATUS      RESTARTS   AGE
    +governancedomain-create-fmw-infra-sample-domain-job-8cww8   0/1     Completed   0          24h
    +
  10. +
  11. +

    To start the Administration Server and Managed Servers up again, repeat the previous steps but change serverStartPolicy: Never to IfNeeded as follows:

    +
      ...
    +    volumeMounts:
    +    - mountPath: /u01/oracle/user_projects/domains
    +      name: weblogic-domain-storage-volume
    +    volumes:
    +    - name: weblogic-domain-storage-volume
    +      persistentVolumeClaim:
    +        claimName: governancedomain-domain-pvc
    +  serverStartPolicy: IfNeeded
    +  webLogicCredentialsSecret:
    +    name: oig-domain-credentials
    +  ...
    +
  12. +
  13. +

    Run the following kubectl command to view the pods:

    +
    $ kubectl get pods -n <domain_namespace>
    +

    For example:

    +
    $ kubectl get pods -n oigns
    +

    The output will look similar to the following:

    +
    NAME                                                        READY   STATUS      RESTARTS   AGE
    +governancedomain-adminserver                                0/1     Running     0          4s
    +governancedomain-create-fmw-infra-sample-domain-job-8cww8   0/1     Completed   0          24h
    +

    The Administration Server pod will start followed by the OIG Managed Servers pods. This process will take several minutes, so keep executing the command until all the pods are running with READY status 1/1 :

    +
    NAME                                                        READY   STATUS      RESTARTS   AGE
    +governancedomain-adminserver                                1/1     Running     0          6m57s
    +governancedomain-create-fmw-infra-sample-domain-job-8cww8   0/1     Completed   0          24h
    +governancedomain-oim-server1                                1/1     Running     0          4m33s
    +governancedomain-soa-server1                                1/1     Running     0          4m33s
    +
  14. +
+

Domain lifecycle sample scripts

+

The WebLogic Kubernetes Operator provides sample scripts to start up or shut down a specific Managed Server or cluster in a deployed domain, or the entire deployed domain.

+

Note: Prior to running these scripts, you must have previously created and deployed the domain.

+

The scripts are located in the $WORKDIR/kubernetes/domain-lifecycle directory. For more information, see the README.

+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oig/manage-oig-domains/hpa/index.html b/docs/23.4.1/idm-products/oig/manage-oig-domains/hpa/index.html new file mode 100644 index 000000000..6fc327962 --- /dev/null +++ b/docs/23.4.1/idm-products/oig/manage-oig-domains/hpa/index.html @@ -0,0 +1,4254 @@ + + + + + + + + + + + + f. Kubernetes Horizontal Pod Autoscaler :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + f. Kubernetes Horizontal Pod Autoscaler +

+ + + + + + +
    +
  1. Prerequisite configuration
  2. +
  3. Deploy the Kubernetes Metrics Server +
      +
    1. Troubleshooting
    2. +
    +
  4. +
  5. Deploy HPA
  6. +
  7. Testing HPA
  8. +
  9. Delete the HPA
  10. +
  11. Other considerations
  12. +
+

Kubernetes Horizontal Pod Autoscaler (HPA) is supported from Weblogic Kubernetes Operator 4.0.X and later.

+

HPA allows automatic scaling (up and down) of the OIG Managed Servers. If load increases then extra OIG Managed Servers will be started as required, up to the value configuredManagedServerCount defined when the domain was created (see Prepare the create domain script). Similarly, if load decreases, OIG Managed Servers will be automatically shutdown.

+

For more information on HPA, see Horizontal Pod Autoscaling.

+

The instructions below show you how to configure and run an HPA to scale an OIG cluster (governancedomain-oim-cluster) resource, based on CPU utilization or memory resource metrics. If required, you can also perform the following for the governancedomain-soa-cluster.

+

Note: If you enable HPA and then decide you want to start/stop/scale OIG Managed servers manually as per Domain Life Cycle, it is recommended to delete HPA beforehand as per Delete the HPA.

+

Prerequisite configuration

+

In order to use HPA, the OIG domain must have been created with the required resources parameter as per Set the OIM server memory parameters. For example:

+
serverPod:
+  env:
+  - name: USER_MEM_ARGS
+    value: "-XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m"
+  resources:
+    limits:
+      cpu: "2"
+      memory: "8Gi"
+    requests:
+      cpu: "1000m"
+      memory: "4Gi"
+

If you created the OIG domain without setting these parameters, then you can update the domain using the following steps:

+
    +
  1. +

    Run the following command to edit the cluster:

    +
    $ kubectl edit cluster governancedomain-oim-cluster -n oigns
    +

    Note: This opens an edit session for the governancedomain-oim-cluster where parameters can be changed using standard vi commands.

    +
  2. +
  3. +

    In the edit session, search for spec:, and then look for the replicas parameter under clusterName: oim_cluster. Change the entry so it looks as follows:

    +
    spec:
    +  clusterName: oim_cluster
    +  replicas: 1
    +  serverPod:
    +    env:
    +    - name: USER_MEM_ARGS
    +      value: -XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m
    +    resources:
    +      limits:
    +        cpu: "2"
    +        memory: 8Gi
    +      requests:
    +        cpu: 1000m
    +        memory: 4Gi
    +  serverService:
    +    precreateService: true
    +    ...
    +
  4. +
  5. +

    Save the file and exit (:wq!)

    +

    The output will look similar to the following:

    +
    cluster.weblogic.oracle/governancedomain-oim-cluster edited
    +

    The OIG Managed Server pods will then automatically be restarted.

    +
  6. +
+

Deploy the Kubernetes Metrics Server

+

Before deploying HPA you must deploy the Kubernetes Metrics Server.

+
    +
  1. +

    Check to see if the Kubernetes Metrics Server is already deployed:

    +
    $ kubectl get pods -n kube-system | grep metric
    +

    If a row is returned as follows, then Kubernetes Metric Server is deployed and you can move to Deploy HPA.

    +
    metrics-server-d9694457-mf69d           1/1     Running   0             5m13s
    +
  2. +
  3. +

    If no rows are returned by the previous command, then the Kubernetes Metric Server needs to be deployed. Run the following commands to get the components.yaml:

    +
    $ mkdir $WORKDIR/kubernetes/hpa
    +$ cd $WORKDIR/kubernetes/hpa
    +$ wget https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
    +
  4. +
  5. +

    Deploy the Kubernetes Metrics Server by running the following command:

    +
    $ kubectl apply -f components.yaml
    +

    The output will look similar to the following:

    +
    serviceaccount/metrics-server created
    +clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created
    +clusterrole.rbac.authorization.k8s.io/system:metrics-server created
    +rolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader created
    +clusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator created
    +clusterrolebinding.rbac.authorization.k8s.io/system:metrics-server created
    +service/metrics-server created
    +deployment.apps/metrics-server created
    +apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created
    +
  6. +
  7. +

    Run the following command to check Kubernetes Metric Server is running:

    +
    $ kubectl get pods -n kube-system | grep metric
    +

    Make sure the pod has a READY status of 1/1:

    +
    metrics-server-d9694457-mf69d           1/1     Running   0             39s
    +
  8. +
+

Troubleshooting

+

If the Kubernetes Metric Server does not reach the READY 1/1 state, run the following commands:

+
$ kubectl describe pod <metrics-server-pod> -n kube-system
+$ kubectl logs <metrics-server-pod> -n kube-system
+

If you see errors such as:

+
Readiness probe failed: HTTP probe failed with statuscode: 500
+

and:

+
E0907 13:07:50.937308       1 scraper.go:140] "Failed to scrape node" err="Get \"https://100.105.18.113:10250/metrics/resource\": x509: cannot validate certificate for 100.105.18.113 because it doesn't contain any IP SANs" node="worker-node1"
+

then you may need to install a valid cluster certificate for your Kubernetes cluster.

+

For testing purposes, you can resolve this issue by:

+
    +
  1. +

    Delete the Kubernetes Metrics Server by running the following command:

    +
    $ kubectl delete -f $WORKDIR/kubernetes/hpa/components.yaml
    +
  2. +
  3. +

    Edit the $WORKDIR/hpa/components.yaml and locate the args: section. Add kubelet-insecure-tls to the arguments. For example:

    +
    spec:
    +  containers:
    +  - args:
    +    - --cert-dir=/tmp
    +    - --secure-port=4443
    +    - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
    +    - --kubelet-use-node-status-port
    +    - --kubelet-insecure-tls
    +    - --metric-resolution=15s
    +    image: registry.k8s.io/metrics-server/metrics-server:v0.6.4
    + ...
    +
  4. +
  5. +

    Deploy the Kubenetes Metrics Server using the command:

    +
    $ kubectl apply -f components.yaml
    +

    Run the following and make sure the READY status shows 1/1:

    +
    $ kubectl get pods -n kube-system | grep metric
    +

    The output should look similar to the following:

    +
    metrics-server-d9694457-mf69d           1/1     Running   0             40s
    +
  6. +
+

Deploy HPA

+

The steps below show how to configure and run an HPA to scale the governancedomain-oim-cluster, based on the CPU or memory utilization resource metrics.

+

The default OIG deployment creates the cluster governancedomain-oim-cluster which starts one OIG Managed Server (oim_server1). The deployment also creates, but doesn’t start, four extra OIG Managed Servers (oim-server2 to oim-server5).

+

In the following example an HPA resource is created, targeted at the cluster resource governancedomain-oim-cluster. This resource will autoscale OIG Managed Servers from a minimum of 1 cluster member up to 5 cluster members. Scaling up will occur when the average CPU is consistently over 70%. Scaling down will occur when the average CPU is consistently below 70%.

+
    +
  1. +

    Navigate to the $WORKDIR/kubernetes/hpa and create an autoscalehpa.yaml file that contains the following.

    +
    #
    +apiVersion: autoscaling/v2
    +kind: HorizontalPodAutoscaler
    +metadata:
    +  name: governancedomain-oim-cluster-hpa
    +  namespace: oigns
    +spec:
    +  scaleTargetRef:
    +    apiVersion: weblogic.oracle/v1
    +    kind: Cluster
    +    name: governancedomain-oim-cluster
    +  behavior:
    +    scaleDown:
    +      stabilizationWindowSeconds: 60
    +    scaleUp:
    +      stabilizationWindowSeconds: 60
    +  minReplicas: 1
    +  maxReplicas: 5
    +  metrics:
    +  - type: Resource
    +    resource:
    +      name: cpu
    +      target:
    +        type: Utilization
    +        averageUtilization: 70
    +

    Note : minReplicas and maxReplicas should match your current domain settings.

    +

    Note: For setting HPA based on Memory Metrics, update the metrics block with the following content. Please note we recommend using only CPU or Memory, not both.

    +
    metrics:
    +- type: Resource
    +  resource:
    +    name: memory
    +    target:
    +      type: Utilization
    +      averageUtilization: 70
    +
  2. +
  3. +

    Run the following command to create the autoscaler:

    +
    $ kubectl apply -f autoscalehpa.yaml
    +

    The output will look similar to the following:

    +
    horizontalpodautoscaler.autoscaling/governancedomain-oim-cluster-hpa created
    +
  4. +
  5. +

    Verify the status of the autoscaler by running the following:

    +
    $ kubectl get hpa -n oigns
    +

    The output will look similar to the following:

    +
    NAME                               REFERENCE                              TARGETS   MINPODS   MAXPODS   REPLICAS   AGE
    +governancedomain-oim-cluster-hpa   Cluster/governancedomain-oim-cluster   16%/70%   1         5         1          20s
    +

    In the example above, this shows that CPU is currently running at 16% for the governancedomain-oim-cluster-hpa.

    +
  6. +
+

Testing HPA

+
    +
  1. +

    Check the current status of the OIG Managed Servers:

    +
    $ kubectl get pods -n oigns
    +

    The output will look similar to the following:

    +
    NAME                                                        READY   STATUS      RESTARTS   AGE
    +governancedomain-adminserver                                1/1     Running     0          20m
    +governancedomain-create-fmw-infra-sample-domain-job-8wd2b   0/1     Completed   0          2d18h
    +governancedomain-oim-server1                                1/1     Running     0          17m
    +governancedomain-soa-server1                                1/1     Running     0          17m
    +helper                                                      1/1     Running     0          2d18h
    +

    In the above only governancedomain-oim-server1 is running.

    +
  2. +
  3. +

    To test HPA can scale up the WebLogic cluster governancedomain-oim-cluster, run the following commands:

    +
    $ kubectl exec --stdin --tty governancedomain-oim-server1 -n oigns -- /bin/bash
    +

    This will take you inside a bash shell inside the oim_server1 pod:

    +
    [oracle@governancedomain-oim-server1 oracle]$
    +

    Inside the bash shell, run the following command to increase the load on the CPU:

    +
    [oracle@governancedomain-oim-server1 oracle]$ dd if=/dev/zero of=/dev/null
    +

    This command will continue to run in the foreground.

    +
  4. +
  5. +

    In a command window outside the bash shell, run the following command to view the current CPU usage:

    +
    $ kubectl get hpa -n oigns
    +

    The output will look similar to the following:

    +
    NAME                               REFERENCE                              TARGETS    MINPODS   MAXPODS   REPLICAS   AGE
    +governancedomain-oim-cluster-hpa   Cluster/governancedomain-oim-cluster   386%/70%   1         5         1          2m47s
    +

    In the above example the CPU has increased to 386%. As this is above the 70% limit, the autoscaler increases the replicas on the Cluster resource and the operator responds by starting additional cluster members.

    +
  6. +
  7. +

    Run the following to see if any more OIG Managed Servers are started:

    +
    $ kubectl get pods -n oigns
    +

    The output will look similar to the following:

    +
    NAME                                                     READY   STATUS      RESTARTS      AGE
    +governancedomain-adminserver                                1/1     Running     0          30m
    +governancedomain-create-fmw-infra-sample-domain-job-8wd2b   0/1     Completed   0          2d18h
    +governancedomain-oim-server1                                1/1     Running     0          27m
    +governancedomain-oim-server2                                1/1     Running     0          10m
    +governancedomain-oim-server3                                1/1     Running     0          10m
    +governancedomain-oim-server4                                1/1     Running     0          10m
    +governancedomain-oim-server5                                1/1     Running     0          10m
    +governancedomain-soa-server1                                1/1     Running     0          27m
    +helper                                                      1/1     Running     0          2d18h
    +

    In the example above four more OIG Managed Servers have been started (oim-server2 - oim-server5).

    +

    Note: It may take some time for the servers to appear and start. Once the servers are at READY status of 1/1, the servers are started.

    +
  8. +
  9. +

    To stop the load on the CPU, in the bash shell, issue a Control C, and then exit the bash shell:

    +
    [oracle@governancedomain-oim-server1 oracle]$ dd if=/dev/zero of=/dev/null
    +^C
    +[oracle@governancedomain-oim-server1 oracle]$ exit
    +
  10. +
  11. +

    Run the following command to view the current CPU usage:

    +
    $ kubectl get hpa -n oigns
    +

    The output will look similar to the following:

    +
    NAME                               REFERENCE                              TARGETS   MINPODS   MAXPODS   REPLICAS   AGE
    +governancedomain-oim-cluster-hpa   Cluster/governancedomain-oim-cluster   33%/70%   1         5         5          37m
    +

    In the above example CPU has dropped to 33%. As this is below the 70% threshold, you should see the autoscaler scale down the servers:

    +
    $ kubectl get pods -n oigns
    +

    The output will look similar to the following:

    +
    NAME                                                        READY   STATUS        RESTARTS      AGE
    +governancedomain-adminserver                                1/1     Running       0             43m
    +governancedomain-create-fmw-infra-sample-domain-job-8wd2b   0/1     Completed     0             2d18h
    +governancedomain-oim-server1                                1/1     Running       0             40m
    +governancedomain-oim-server2                                1/1     Running       0             13m
    +governancedomain-oim-server3                                1/1     Running       0             13m
    +governancedomain-oim-server4                                1/1     Running       0             13m
    +governancedomain-oim-server5                                0/1     Terminating   0             13m
    +governancedomain-soa-server1                                1/1     Running       0             40m
    +helper                                                      1/1     Running       0             2d19h
    +

    Eventually, all the servers except oim-server1 will disappear:

    +
    NAME                                                     READY   STATUS      RESTARTS       AGE
    +governancedomain-adminserver                                1/1     Running       0             44m
    +governancedomain-create-fmw-infra-sample-domain-job-8wd2b   0/1     Completed     0             2d18h
    +governancedomain-oim-server1                                1/1     Running       0             41m
    +governancedomain-soa-server1                                1/1     Running       0             41m
    +helper                                                      1/1     Running       0             2d20h
    +
  12. +
+

Delete the HPA

+
    +
  1. +

    If you need to delete the HPA, you can do so by running the following command:

    +
    $ cd $WORKDIR/kubernetes/hpa
    +$ kubectl delete -f autoscalehpa.yaml
    +
  2. +
+

Other considerations

+
    +
  • If HPA is deployed and you need to upgrade the OIG image, then you must delete the HPA before upgrading. Once the upgrade is successful you can deploy HPA again.
  • +
  • If you choose to start/stop an OIG Managed Server manually as per Domain Life Cycle, then it is recommended to delete the HPA before doing so.
  • +
+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oig/manage-oig-domains/index.html b/docs/23.4.1/idm-products/oig/manage-oig-domains/index.html new file mode 100644 index 000000000..2e9d468e5 --- /dev/null +++ b/docs/23.4.1/idm-products/oig/manage-oig-domains/index.html @@ -0,0 +1,4117 @@ + + + + + + + + + + + + Manage OIG domains :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + Manage OIG domains +

+ + + + + + + +

Important considerations for Oracle Identity Governance domains in Kubernetes.

+ + + + + + + +
    + + + + + + + + + + + + + + + + + +

    +a. Domain life cycle +

    + + + + + +

    Learn about the domain lifecycle of an OIG domain.

    + + + + + + + + + + + + +

    +b. WLST administration operations +

    + + + + + +

    Describes the steps for WLST administration using helper pod running in the same Kubernetes Cluster as OIG Domain.

    + + + + + + + + + + + + +

    +c. Runnning OIG utilities +

    + + + + + +

    Describes the steps for running OIG utilities in Kubernetes.

    + + + + + + + + + + + + +

    +d. Logging and visualization +

    + + + + + +

    Describes the steps for logging and visualization with Elasticsearch and Kibana.

    + + + + + + + + + + + + +

    +e. Monitoring an OIG domain +

    + + + + + +

    Describes the steps for Monitoring the OIG domain and Publishing the logs to Elasticsearch.

    + + + + + + + + + + + + +

    +f. Kubernetes Horizontal Pod Autoscaler +

    + + + + + +

    Describes the steps for implementing the Horizontal Pod Autoscaler.

    + + + + + + + + + + + + +

    +g. Delete the OIG domain home +

    + + + + + +

    Learn about the steps to cleanup the OIG domain home.

    + + + + + + + + +
+ + + + + + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oig/manage-oig-domains/index.xml b/docs/23.4.1/idm-products/oig/manage-oig-domains/index.xml new file mode 100644 index 000000000..1e693d8fa --- /dev/null +++ b/docs/23.4.1/idm-products/oig/manage-oig-domains/index.xml @@ -0,0 +1,96 @@ + + + + Manage OIG domains on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oig/manage-oig-domains/ + Recent content in Manage OIG domains on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + a. Domain life cycle + /fmw-kubernetes/23.4.1/idm-products/oig/manage-oig-domains/domain-lifecycle/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oig/manage-oig-domains/domain-lifecycle/ + View existing OIG servers Starting/Scaling up OIG Managed servers Stopping/Scaling down OIG Managed servers Stopping and starting the Administration Server and Managed Servers Domain lifecycle sample scripts As OIG domains use the WebLogic Kubernetes Operator, domain lifecyle operations are managed using the WebLogic Kubernetes Operator itself. +This document shows the basic operations for starting, stopping and scaling servers in the OIG domain. +For more detailed information refer to Domain Life Cycle in the WebLogic Kubernetes Operator documentation. + + + + b. WLST administration operations + /fmw-kubernetes/23.4.1/idm-products/oig/manage-oig-domains/wlst-admin-operations/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oig/manage-oig-domains/wlst-admin-operations/ + Invoke WLST and access Administration Server To use WLST to administer the OIG domain, use a helper pod in the same Kubernetes cluster as the OIG Domain. + Check to see if the helper pod exists by running: +$ kubectl get pods -n &lt;domain_namespace&gt; | grep helper For example: +$ kubectl get pods -n oigns | grep helper The output should look similar to the following: +helper 1/1 Running 0 26h If the helper pod doesn&rsquo;t exist then see Step 1 in Prepare your environment to create it. + + + + c. Runnning OIG utilities + /fmw-kubernetes/23.4.1/idm-products/oig/manage-oig-domains/running-oig-utilities/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oig/manage-oig-domains/running-oig-utilities/ + Run OIG utlities inside the OIG Kubernetes cluster. +Run utilities in an interactive bash shell Access a bash shell inside the &lt;domain_uid&gt;-oim-server1 pod: +$ kubectl -n oigns exec -it &lt;domain_uid&gt;-oim-server1 -- bash For example: +$ kubectl -n oigns exec -it governancedomain-oim-server1 -- bash This will take you into a bash shell in the running &lt;domain_uid&gt;-oim-server1 pod: +[oracle@governancedomain-oim-server1 oracle]$ Navigate to the /u01/oracle/idm/server/bin directory and execute the utility as required. + + + + d. Logging and visualization + /fmw-kubernetes/23.4.1/idm-products/oig/manage-oig-domains/logging-and-visualization/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oig/manage-oig-domains/logging-and-visualization/ + After the OIG domain is set up you can publish operator and WebLogic Server logs into Elasticsearch and interact with them in Kibana. +Install Elasticsearch and Kibana If you do not already have a centralized Elasticsearch (ELK) stack then you must configure this first. For details on how to configure the ELK stack, follow Installing Elasticsearch (ELK) Stack and Kibana +Create the logstash pod Variables used in this chapter In order to create the logstash pod, you must create several files. + + + + e. Monitoring an OIG domain + /fmw-kubernetes/23.4.1/idm-products/oig/manage-oig-domains/monitoring-oim-domains/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oig/manage-oig-domains/monitoring-oim-domains/ + After the OIG domain is set up you can monitor the OIG instance using Prometheus and Grafana. See Monitoring a domain. +The WebLogic Monitoring Exporter uses the WLS RESTful Management API to scrape runtime information and then exports Prometheus-compatible metrics. It is deployed as a web application in a WebLogic Server (WLS) instance, version 12.2.1 or later, typically, in the instance from which you want to get metrics. +There are two ways to setup monitoring and you should choose one method or the other: + + + + f. Kubernetes Horizontal Pod Autoscaler + /fmw-kubernetes/23.4.1/idm-products/oig/manage-oig-domains/hpa/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oig/manage-oig-domains/hpa/ + Prerequisite configuration Deploy the Kubernetes Metrics Server Troubleshooting Deploy HPA Testing HPA Delete the HPA Other considerations Kubernetes Horizontal Pod Autoscaler (HPA) is supported from Weblogic Kubernetes Operator 4.0.X and later. +HPA allows automatic scaling (up and down) of the OIG Managed Servers. If load increases then extra OIG Managed Servers will be started as required, up to the value configuredManagedServerCount defined when the domain was created (see Prepare the create domain script). + + + + g. Delete the OIG domain home + /fmw-kubernetes/23.4.1/idm-products/oig/manage-oig-domains/delete-domain-home/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oig/manage-oig-domains/delete-domain-home/ + Sometimes in production, but most likely in testing environments, you might want to remove the domain home that is generated using the create-domain.sh script. + Run the following command to delete the domain: +$ cd $WORKDIR/kubernetes/delete-domain $ ./delete-weblogic-domain-resources.sh -d &lt;domain_uid&gt; For example: +$ cd $WORKDIR/kubernetes/delete-domain $ ./delete-weblogic-domain-resources.sh -d governancedomain Drop the RCU schemas as follows: +$ kubectl exec -it helper -n &lt;domain_namespace&gt; -- /bin/bash [oracle@helper ~]$ [oracle@helper ~]$ export CONNECTION_STRING=&lt;db_host. + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oig/manage-oig-domains/logging-and-visualization/index.html b/docs/23.4.1/idm-products/oig/manage-oig-domains/logging-and-visualization/index.html new file mode 100644 index 000000000..83cec62a9 --- /dev/null +++ b/docs/23.4.1/idm-products/oig/manage-oig-domains/logging-and-visualization/index.html @@ -0,0 +1,4475 @@ + + + + + + + + + + + + d. Logging and visualization :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + d. Logging and visualization +

+ + + + + + +

After the OIG domain is set up you can publish operator and WebLogic Server logs into Elasticsearch and interact with them in Kibana.

+

Install Elasticsearch and Kibana

+

If you do not already have a centralized Elasticsearch (ELK) stack then you must configure this first. For details on how to configure the ELK stack, follow +Installing Elasticsearch (ELK) Stack and Kibana

+

Create the logstash pod

+

Variables used in this chapter

+

In order to create the logstash pod, you must create several files. These files contain variables which you must substitute with variables applicable to your environment.

+

Most of the values for the variables will be based on your ELK deployment as per Installing Elasticsearch (ELK) Stack and Kibana.

+

The table below outlines the variables and values you must set:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
VariableSample ValueDescription
<ELK_VER>8.3.1The version of logstash you want to install.
<ELK_SSL>trueIf SSL is enabled for ELK set the value to true, or if NON-SSL set to false. This value must be lowercase.
<ELK_HOSTS>https://elasticsearch.example.com:9200The URL for sending logs to Elasticsearch. HTTP if NON-SSL is used.
<ELKNS>oignsThe domain namespace.
<ELK_USER>logstash_internalThe name of the user for logstash to access Elasticsearch.
<ELK_PASSWORD>passwordThe password for ELK_USER.
<ELK_APIKEY>apikeyThe API key details.
+

You will also need the BASE64 version of the Certificate Authority (CA) certificate(s) that signed the certificate of the Elasticsearch server. If using a self-signed certificate, this is the self signed certificate of the Elasticsearch server. See Copying the Elasticsearch Certificate for details on how to get the correct certificate. In the example below the certificate is called elk.crt.

+

Create kubernetes secrets

+
    +
  1. +

    Create a Kubernetes secret for Elasticsearch using the API Key or Password.

    +

    a) If ELK uses an API Key for authentication:

    +
    $ kubectl create secret generic elasticsearch-pw-elastic -n <domain_namespace> --from-literal password=<ELK_APIKEY>
    +

    For example:

    +
    $ kubectl create secret generic elasticsearch-pw-elastic -n oigns --from-literal password=<ELK_APIKEY>
    +

    The output will look similar to the following:

    +
    secret/elasticsearch-pw-elastic created
    +

    b) If ELK uses a password for authentication:

    +
    $ kubectl create secret generic elasticsearch-pw-elastic -n <domain_namespace> --from-literal password=<ELK_PASSWORD>
    +

    For example:

    +
    $ kubectl create secret generic elasticsearch-pw-elastic -n oigns --from-literal password=<ELK_PASSWORD>
    +

    The output will look similar to the following:

    +
    secret/elasticsearch-pw-elastic created
    +

    Note: It is recommended that the ELK Stack is created with authentication enabled. If no authentication is enabled you may create a secret using the values above.

    +
  2. +
  3. +

    Create a Kubernetes secret to access the required images on hub.docker.com:

    +

    Note: Before executing the command below, you must first have a user account on hub.docker.com.

    +
    kubectl create secret docker-registry "dockercred" --docker-server="https://index.docker.io/v1/" \
    +--docker-username="<DOCKER_USER_NAME>" \
    +--docker-password=<DOCKER_PASSWORD> --docker-email=<DOCKER_EMAIL_ID> \
    +--namespace=<domain_namespace>
    +

    For example,

    +
    kubectl create secret docker-registry "dockercred" --docker-server="https://index.docker.io/v1/" \
    +--docker-username="user@example.com" \
    +--docker-password=password --docker-email=user@example.com \
    +--namespace=oigns
    +

    The output will look similar to the following:

    +
    secret/dockercred created
    +
  4. +
+

Find the mountPath details

+
    +
  1. +

    Run the following command to get the mountPath of your domain:

    +
    $ kubectl describe domains <domain_uid> -n <domain_namespace> | grep "Mount Path"
    +

    For example:

    +
    $ kubectl describe domains governancedomain -n oigns | grep "Mount Path"
    +

    The output will look similar to the following:

    +
    Mount Path:  /u01/oracle/user_projects/domains
    +
  2. +
+

Find the persistentVolumeClaim details

+
    +
  1. +

    Run the following command to get the OIG domain persistence volume details:

    +
    $ kubectl get pv -n <domain_namespace>
    +

    For example:

    +
    $ kubectl get pv -n oigns
    +

    The output will look similar to the following:

    +
    NAME                         CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                               STORAGECLASS                         REASON   AGE
    +governancedomain-domain-pv   10Gi       RWX            Retain           Bound    oigns/governancedomain-domain-pvc   governancedomain-oim-storage-class            28h
    +

    Make note of the CLAIM value, for example in this case governancedomain-oim-pvc.

    +
  2. +
+

Create the Configmap

+
    +
  1. +

    Copy the elk.crt file to the $WORKDIR/kubernetes/elasticsearch-and-kibana directory.

    +
  2. +
  3. +

    Navigate to the $WORKDIR/kubernetes/elasticsearch-and-kibana directory and run the following:

    +
    kubectl create configmap elk-cert --from-file=elk.crt -n <namespace>
    +

    For example:

    +
    kubectl create configmap elk-cert --from-file=elk.crt -n oigns
    +

    The output will look similar to the following:

    +
    configmap/elk-cert created
    +
  4. +
  5. +

    Create a logstash_cm.yaml file in the $WORKDIR/kubernetes/elasticsearch-and-kibana directory as follows:

    +
    apiVersion: v1
    +kind: ConfigMap
    +metadata:
    +  name: oig-logstash-configmap
    +  namespace: <ELKNS>
    +data:
    +  logstash.yml: |
    +    #http.host: "0.0.0.0"
    +  logstash-config.conf: |
    +    input {
    +      file {
    +        path => "/u01/oracle/user_projects/domains/logs/governancedomain/AdminServer*.log"
    +        tags => "Adminserver_log"
    +        start_position => beginning
    +      }
    +      file {
    +        path => "/u01/oracle/user_projects/domains/logs/governancedomain/soa_server*.log"
    +        tags => "soaserver_log"
    +        start_position => beginning
    +      }
    +      file {
    +        path => "/u01/oracle/user_projects/domains/logs/governancedomain/oim_server*.log"
    +        tags => "Oimserver_log"
    +        start_position => beginning
    +      }
    +      file {
    +        path => "/u01/oracle/user_projects/domains/governancedomain/servers/AdminServer/logs/AdminServer-diagnostic.log"
    +        tags => "Adminserver_diagnostic"
    +        start_position => beginning
    +      }
    +      file {
    +        path => "/u01/oracle/user_projects/domains/governancedomain/servers/**/logs/soa_server*-diagnostic.log"
    +        tags => "Soa_diagnostic"
    +        start_position => beginning
    +      }
    +      file {
    +        path => "/u01/oracle/user_projects/domains/governancedomain/servers/**/logs/oim_server*-diagnostic.log"
    +        tags => "Oimserver_diagnostic"
    +        start_position => beginning
    +      }
    +      file {
    +        path => "/u01/oracle/user_projects/domains/governancedomain/servers/**/logs/access*.log"
    +        tags => "Access_logs"
    +        start_position => beginning
    +      }
    +    }
    +    filter {
    +      grok {
    +        match => [ "message", "<%{DATA:log_timestamp}> <%{WORD:log_level}> <%{WORD:thread}> <%{HOSTNAME:hostname}> <%{HOSTNAME:servername}> <%{DATA:timer}> <<%{DATA:kernel}>> <> <%{DATA:uuid}> <%{NUMBER:timestamp}> <%{DATA:misc}    > <%{DATA:log_number}> <%{DATA:log_message}>" ]
    +      }
    +    if "_grokparsefailure" in [tags] {
    +        mutate {
    +            remove_tag => [ "_grokparsefailure" ]
    +        }
    +    }
    +    }
    +    output {
    +      elasticsearch {
    +    hosts => ["<ELK_HOSTS>"]
    +    cacert => '/usr/share/logstash/config/certs/elk.crt'
    +    index => "oiglogs-000001"
    +    ssl => <ELK_SSL>
    +    ssl_certificate_verification => false
    +    user => "<ELK_USER>"
    +    password => "${ELASTICSEARCH_PASSWORD}"
    +    api_key => "${ELASTICSEARCH_PASSWORD}"
    +      }
    +    }
    +

    Change the values in the above file as follows:

    +
      +
    • Change the <ELKNS>, <ELK_HOSTS>, <ELK_SSL>, and <ELK_USER> to match the values for your environment.
    • +
    • Change /u01/oracle/user_projects/domains to match the mountPath returned earlier.
    • +
    • If your domainUID is anything other than governancedomain, change each instance of governancedomain to your domainUID.
    • +
    • If using API KEY for your ELK authentication, delete the user and password lines.
    • +
    • If using a password for ELK authentication, delete the api_key line.
    • +
    • If no authentication is used for ELK, delete the user, password, and api_key lines.
    • +
    +

    For example:

    +
    apiVersion: v1
    +kind: ConfigMap
    +metadata:
    +  name: oig-logstash-configmap
    +  namespace: oigns
    +data:
    +  logstash.yml: |
    +    #http.host: "0.0.0.0"
    +  logstash-config.conf: |
    +    input {
    +      file {
    +        path => "/u01/oracle/user_projects/domains/logs/governancedomain/AdminServer*.log"
    +        tags => "Adminserver_log"
    +        start_position => beginning
    +      }
    +      file {
    +        path => "/u01/oracle/user_projects/domains/logs/governancedomain/soa_server*.log"
    +        tags => "soaserver_log"
    +        start_position => beginning
    +      }
    +      file {
    +        path => "/u01/oracle/user_projects/domains/logs/governancedomain/oim_server*.log"
    +        tags => "Oimserver_log"
    +        start_position => beginning
    +      }
    +      file {
    +        path => "/u01/oracle/user_projects/domains/governancedomain/servers/AdminServer/logs/AdminServer-diagnostic.log"
    +        tags => "Adminserver_diagnostic"
    +        start_position => beginning
    +      }
    +      file {
    +        path => "/u01/oracle/user_projects/domains/governancedomain/servers/**/logs/soa_server*-diagnostic.log"
    +        tags => "Soa_diagnostic"
    +        start_position => beginning
    +      }
    +      file {
    +        path => "/u01/oracle/user_projects/domains/governancedomain/servers/**/logs/oim_server*-diagnostic.log"
    +        tags => "Oimserver_diagnostic"
    +        start_position => beginning
    +      }
    +      file {
    +        path => "/u01/oracle/user_projects/domains/governancedomain/servers/**/logs/access*.log"
    +        tags => "Access_logs"
    +        start_position => beginning
    +      }
    +    }
    +    filter {
    +      grok {
    +        match => [ "message", "<%{DATA:log_timestamp}> <%{WORD:log_level}> <%{WORD:thread}> <%{HOSTNAME:hostname}> <%{HOSTNAME:servername}> <%{DATA:timer}> <<%{DATA:kernel}>> <> <%{DATA:uuid}> <%{NUMBER:timestamp}> <%{DATA:misc}    > <%{DATA:log_number}> <%{DATA:log_message}>" ]
    +      }
    +    if "_grokparsefailure" in [tags] {
    +        mutate {
    +            remove_tag => [ "_grokparsefailure" ]
    +        }
    +    }
    +    }
    +    output {
    +      elasticsearch {
    +    hosts => ["https://elasticsearch.example.com:9200"]
    +    cacert => '/usr/share/logstash/config/certs/elk.crt'
    +    index => "oiglogs-000001"
    +    ssl => true
    +    ssl_certificate_verification => false
    +    user => "logstash_internal"
    +    password => "${ELASTICSEARCH_PASSWORD}"
    +      }
    +    }
    +
  6. +
  7. +

    Run the following command to create the configmap:

    +
    $  kubectl apply -f logstash_cm.yaml
    +

    The output will look similar to the following:

    +
    configmap/oig-logstash-configmap created
    +
  8. +
+

Deploy the logstash pod

+
    +
  1. +

    Navigate to the $WORKDIR/kubernetes/elasticsearch-and-kibana directory and create a logstash.yaml file as follows:

    +
    apiVersion: apps/v1
    +kind: Deployment
    +metadata:
    +  name: oig-logstash
    +  namespace: <ELKNS>
    +spec:
    +  selector:
    +    matchLabels:
    +      k8s-app: logstash
    +  template: # create pods using pod definition in this template
    +    metadata:
    +      labels:
    +        k8s-app: logstash
    +    spec:
    +      imagePullSecrets:
    +      - name: dockercred
    +      containers:
    +      - command:
    +        - logstash
    +        image: logstash:<ELK_VER>
    +        imagePullPolicy: IfNotPresent
    +        name: oig-logstash
    +        env:
    +        - name: ELASTICSEARCH_PASSWORD
    +          valueFrom:
    +            secretKeyRef:
    +              name: elasticsearch-pw-elastic
    +              key: password
    +        resources:
    +        ports:
    +        - containerPort: 5044
    +          name: logstash
    +        volumeMounts:
    +        - mountPath: /u01/oracle/user_projects/domains
    +          name: weblogic-domain-storage-volume
    +        - name: shared-logs
    +          mountPath: /shared-logs
    +        - mountPath: /usr/share/logstash/pipeline/
    +          name: oig-logstash-pipeline
    +        - mountPath: /usr/share/logstash/config/logstash.yml
    +          subPath: logstash.yml
    +          name: config-volume
    +        - mountPath: /usr/share/logstash/config/certs
    +          name: elk-cert
    +      volumes:
    +      - configMap:
    +          defaultMode: 420
    +          items:
    +          - key: elk.crt
    +            path: elk.crt
    +          name: elk-cert
    +        name: elk-cert
    +      - configMap:
    +          defaultMode: 420
    +          items:
    +          - key: logstash-config.conf
    +            path: logstash-config.conf
    +          name: oig-logstash-configmap
    +        name: oig-logstash-pipeline
    +      - configMap:
    +          defaultMode: 420
    +          items:
    +          - key: logstash.yml
    +            path: logstash.yml
    +          name: oig-logstash-configmap
    +        name: config-volume
    +      - name: weblogic-domain-storage-volume
    +        persistentVolumeClaim:
    +          claimName: governancedomain-domain-pvc
    +      - name: shared-logs
    +        emptyDir: {}
    +
      +
    • Change the <ELKNS>, and <ELK_VER> to match the values for your environment.
    • +
    • Change /u01/oracle/user_projects/domains to match the mountPath returned earlier
    • +
    • Change the claimName value to match the claimName returned earlier
    • +
    • If your Kubernetes environment does not allow access to the internet to pull the logstash image, you must load the logstash image in your own container registry and change image: logstash:<ELK_VER> to the location of the image in your container registry e.g: container-registry.example.com/logstash:8.3.1
    • +
    +

    For example:

    +
    apiVersion: apps/v1
    +kind: Deployment
    +metadata:
    +  name: oig-logstash
    +  namespace: oigns
    +spec:
    +  selector:
    +    matchLabels:
    +      k8s-app: logstash
    +  template: # create pods using pod definition in this template
    +    metadata:
    +      labels:
    +        k8s-app: logstash
    +    spec:
    +      imagePullSecrets:
    +      - name: dockercred
    +      containers:
    +      - command:
    +        - logstash
    +        image: logstash:8.3.1
    +        imagePullPolicy: IfNotPresent
    +        name: oig-logstash
    +        env:
    +        - name: ELASTICSEARCH_PASSWORD
    +          valueFrom:
    +            secretKeyRef:
    +              name: elasticsearch-pw-elastic
    +              key: password
    +        resources:
    +        ports:
    +        - containerPort: 5044
    +          name: logstash
    +        volumeMounts:
    +        - mountPath: /u01/oracle/user_projects/domains
    +          name: weblogic-domain-storage-volume
    +        - name: shared-logs
    +          mountPath: /shared-logs
    +        - mountPath: /usr/share/logstash/pipeline/
    +          name: oig-logstash-pipeline
    +        - mountPath: /usr/share/logstash/config/logstash.yml
    +          subPath: logstash.yml
    +          name: config-volume
    +        - mountPath: /usr/share/logstash/config/certs
    +          name: elk-cert
    +      volumes:
    +      - configMap:
    +          defaultMode: 420
    +          items:
    +          - key: elk.crt
    +            path: elk.crt
    +          name: elk-cert
    +        name: elk-cert
    +          name: oig-logstash-configmap
    +        name: elk-cert
    +      - configMap:
    +          defaultMode: 420
    +          items:
    +          - key: logstash-config.conf
    +            path: logstash-config.conf
    +          name: oig-logstash-configmap
    +        name: oig-logstash-pipeline
    +      - configMap:
    +          defaultMode: 420
    +          items:
    +          - key: logstash.yml
    +            path: logstash.yml
    +          name: oig-logstash-configmap
    +        name: config-volume
    +      - name: weblogic-domain-storage-volume
    +        persistentVolumeClaim:
    +          claimName: governancedomain-domain-pvc
    +      - name: shared-logs
    +        emptyDir: {}
    +
  2. +
  3. +

    Deploy the logstash pod by executing the following command:

    +
    $ kubectl create -f $WORKDIR/kubernetes/elasticsearch-and-kibana/logstash.yaml 
    +

    The output will look similar to the following:

    +
    deployment.apps/oig-logstash created
    +
  4. +
  5. +

    Run the following command to check the logstash pod is created correctly:

    +
    $ kubectl get pods -n <namespace>
    +

    For example:

    +
    $ kubectl get pods -n oigns
    +

    The output should look similar to the following:

    +
    NAME                                            READY   STATUS      RESTARTS   AGE
    +governancedomain-adminserver                                1/1     Running     0          90m
    +governancedomain-create-fmw-infra-sample-domain-job-fqgnr   0/1     Completed   0          2d19h
    +governancedomain-oim-server1                                1/1     Running     0          88m
    +governancedomain-soa-server1                                1/1     Running     0          88m
    +helper                                                      1/1     Running     0          2d20h
    +oig-logstash-77fbbc66f8-lsvcw                               1/1     Running     0          3m25s
    +

    Note: Wait a couple of minutes to make sure the pod has not had any failures or restarts. If the pod fails you can view the pod log using:

    +
    $ kubectl logs -f oig-logstash-<pod> -n oigns
    +

    Most errors occur due to misconfiguration of the logstash_cm.yaml or logstash.yaml. This is usually because of an incorrect value set, or the certificate was not pasted with the correct indentation.

    +

    If the pod has errors, delete the pod and configmap as follows:

    +
    $ kubectl delete -f $WORKDIR/kubernetes/elasticsearch-and-kibana/logstash.yaml
    +$ kubectl delete -f $WORKDIR/kubernetes/elasticsearch-and-kibana/logstash_cm.yaml
    +

    Once you have resolved the issue in the yaml files, run the commands outlined earlier to recreate the configmap and logstash pod.

    +
  6. +
+

Verify and access the Kibana console

+

To access the Kibana console you will need the Kibana URL as per Installing Elasticsearch (ELK) Stack and Kibana.

+

For Kibana 7.7.x and below:

+
    +
  1. +

    Access the Kibana console with http://<hostname>:<port>/app/kibana and login with your username and password.

    +
  2. +
  3. +

    From the Navigation menu, navigate to Management > Kibana > Index Patterns.

    +
  4. +
  5. +

    In the Create Index Pattern page enter oiglogs* for the Index pattern and click Next Step.

    +
  6. +
  7. +

    In the Configure settings page, from the Time Filter field name drop down menu select @timestamp and click Create index pattern.

    +
  8. +
  9. +

    Once the index pattern is created click on Discover in the navigation menu to view the OIG logs.

    +
  10. +
+

For Kibana version 7.8.X and above:

+
    +
  1. +

    Access the Kibana console with http://<hostname>:<port>/app/kibana and login with your username and password.

    +
  2. +
  3. +

    From the Navigation menu, navigate to Management > Stack Management.

    +
  4. +
  5. +

    Click Data Views in the Kibana section.

    +
  6. +
  7. +

    Click Create Data View and enter the following information:

    +
      +
    • Name: oiglogs*
    • +
    • Timestamp: @timestamp
    • +
    +
  8. +
  9. +

    Click Create Data View.

    +
  10. +
  11. +

    From the Navigation menu, click Discover to view the log file entries.

    +
  12. +
  13. +

    From the drop down menu, select oiglogs* to view the log file entries.

    +
  14. +
+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oig/manage-oig-domains/monitoring-oim-domains/index.html b/docs/23.4.1/idm-products/oig/manage-oig-domains/monitoring-oim-domains/index.html new file mode 100644 index 000000000..a4f4a476e --- /dev/null +++ b/docs/23.4.1/idm-products/oig/manage-oig-domains/monitoring-oim-domains/index.html @@ -0,0 +1,4639 @@ + + + + + + + + + + + + e. Monitoring an OIG domain :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + e. Monitoring an OIG domain +

+ + + + + + +

After the OIG domain is set up you can monitor the OIG instance using Prometheus and Grafana. See Monitoring a domain.

+

The WebLogic Monitoring Exporter uses the WLS RESTful Management API to scrape runtime information and then exports Prometheus-compatible metrics. It is deployed as a web application in a WebLogic Server (WLS) instance, version 12.2.1 or later, typically, in the instance from which you want to get metrics.

+

There are two ways to setup monitoring and you should choose one method or the other:

+
    +
  1. Setup automatically using setup-monitoring.sh
  2. +
  3. Setup using manual configuration
  4. +
+

Setup automatically using setup-monitoring.sh

+

The $WORKDIR/kubernetes/monitoring-service/setup-monitoring.sh sets up the monitoring for the OIG domain. It installs Prometheus, Grafana, WebLogic Monitoring Exporter and deploys the web applications to the OIG domain. It also deploys the WebLogic Server Grafana dashboard.

+

For usage details execute ./setup-monitoring.sh -h.

+
    +
  1. +

    Edit the $WORKDIR/kubernetes/monitoring-service/monitoring-inputs.yaml and change the domainUID, domainNamespace, and weblogicCredentialsSecretName to correspond to your deployment. Also change wlsMonitoringExporterTosoaCluster, wlsMonitoringExporterTooimCluster, exposeMonitoringNodePort to true. For example:

    +
    version: create-governancedomain-monitoring-inputs-v1
    +
    +# Unique ID identifying your domain.
    +# This ID must not contain an underscope ("_"), and must be lowercase and unique across all domains in a Kubernetes cluster.
    +domainUID: governancedomain
    +
    +# Name of the domain namespace
    +domainNamespace: oigns
    +
    +# Boolean value indicating whether to install kube-prometheus-stack
    +setupKubePrometheusStack: true
    +
    +# Additional parameters for helm install kube-prometheus-stack
    +# Refer https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/values.yaml for additional parameters
    +# Sample :
    +# additionalParamForKubePrometheusStack: --set nodeExporter.enabled=false --set prometheusOperator.tls.enabled=false --set prometheusOperator.admissionWebhooks.enabled=false
    +additionalParamForKubePrometheusStack:
    +
    +# Name of the monitoring namespace
    +monitoringNamespace: monitoring
    +  
    +# Name of the Admin Server
    +adminServerName: AdminServer
    +#
    +# Port number for admin server
    +adminServerPort: 7001
    +
    +# Cluster name
    +soaClusterName: soa_cluster
    +
    +# Port number for managed server
    +soaManagedServerPort: 8001
    +
    +# WebLogic Monitoring Exporter to Cluster
    +wlsMonitoringExporterTosoaCluster: true
    +
    +# Cluster name
    +oimClusterName: oim_cluster
    +
    +# Port number for managed server
    +oimManagedServerPort: 14000
    +
    +# WebLogic Monitoring Exporter to Cluster
    +wlsMonitoringExporterTooimCluster: true
    +
    +
    +# Boolean to indicate if the adminNodePort will be exposed
    +exposeMonitoringNodePort: true
    +
    +# NodePort to expose Prometheus
    +prometheusNodePort: 32101
    +
    +# NodePort to expose Grafana
    +grafanaNodePort: 32100
    +   
    +# NodePort to expose Alertmanager
    +alertmanagerNodePort: 32102
    +
    +# Name of the Kubernetes secret for the Admin Server's username and password
    +weblogicCredentialsSecretName: oig-domain-credentials
    +

    Note: If your cluster does not have access to the internet to pull external images, such as grafana or prometheus, you must load the images in a local container registry. You must then set additionalParamForKubePrometheusStack to set the location of the image in your local container registry, for example:

    +
    # Additional parameters for helm install kube-prometheus-stack
    +# Refer https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/values.yaml for additional parameters
    +# Sample :
    +# additionalParamForKubePrometheusStack: --set nodeExporter.enabled=false --set prometheusOperator.tls.enabled=false --set prometheusOperator.admissionWebhooks.enabled=false
    +additionalParamForKubePrometheusStack: --set grafana.image.repository=container-registry.example.com/grafana --set grafana.image.tag=8.3.4
    +
  2. +
  3. +

    Run the following command to setup monitoring:

    +
    $ cd $WORKDIR/kubernetes/monitoring-service
    +$ ./setup-monitoring.sh -i monitoring-inputs.yaml
    +

    The output should be similar to the following:

    +
    Monitoring setup in  monitoring in progress
    +
    +node/worker-node1 not labeled
    +node/worker-node2 not labeled
    +node/master-node not labeled
    +Setup prometheus-community/kube-prometheus-stack started
    +"prometheus-community" already exists with the same configuration, skipping
    +Hang tight while we grab the latest from your chart repositories...
    +...Successfully got an update from the "stable" chart repository
    +...Successfully got an update from the "prometheus" chart repository
    +...Successfully got an update from the "prometheus-community" chart repository
    +Update Complete. ⎈Happy Helming!⎈
    +Setup prometheus-community/kube-prometheus-stack in progress
    +NAME: monitoring
    +LAST DEPLOYED: <DATE>
    +NAMESPACE: monitoring
    +STATUS: deployed
    +REVISION: 1
    +NOTES:
    +kube-prometheus-stack has been installed. Check its status by running:
    +  kubectl --namespace monitoring get pods -l "release=monitoring"
    +
    +Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create & configure Alertmanager and Prometheus instances using the Operator.
    +Setup prometheus-community/kube-prometheus-stack completed
    +Deploy WebLogic Monitoring Exporter started
    +Deploying WebLogic Monitoring Exporter with domainNamespace[oigns], domainUID[governancedomain], adminServerPodName[governancedomain-adminserver]
    +  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
    +                                 Dload  Upload   Total   Spent    Left  Speed
    +100   655  100   655    0     0   1159      0 --:--:-- --:--:-- --:--:--  1159
    +100 2196k  100 2196k    0     0  1763k      0  0:00:01  0:00:01 --:--:-- 20.7M
    +created $WORKDIR/kubernetes/monitoring-service/scripts/wls-exporter-deploy dir
    +created /tmp/ci-GJSQsiXrFE
    +/tmp/ci-GJSQsiXrFE $WORKDIR/kubernetes/monitoring-service
    +in temp dir
    +  adding: WEB-INF/weblogic.xml (deflated 61%)
    +  adding: config.yml (deflated 60%)
    +$WORKDIR/kubernetes/monitoring-service
    +created /tmp/ci-KeyZrdouMD
    +/tmp/ci-KeyZrdouMD $WORKDIR/kubernetes/monitoring-service
    +in temp dir
    +  adding: WEB-INF/weblogic.xml (deflated 61%)
    +  adding: config.yml (deflated 60%)
    +$WORKDIR/kubernetes/monitoring-service
    +created /tmp/ci-QE9HawIIgT
    +/tmp/ci-QE9HawIIgT $WORKDIR/kubernetes/monitoring-service
    +in temp dir
    +  adding: WEB-INF/weblogic.xml (deflated 61%)
    +  adding: config.yml (deflated 60%)
    +$WORKDIR/kubernetes/monitoring-service
    +  
    +Initializing WebLogic Scripting Tool (WLST) ...
    +
    +Welcome to WebLogic Server Administration Scripting Shell
    +
    +Type help() for help on available commands
    +
    +Connecting to t3://governancedomain-adminserver:7001 with userid weblogic ...
    +Successfully connected to Admin Server "AdminServer" that belongs to domain "governancedomain".
    +
    +Warning: An insecure protocol was used to connect to the server.
    +To ensure on-the-wire security, the SSL port or Admin port should be used instead.
    +
    +Deploying .........
    +Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war to targets AdminServer (upload=true) ...
    +<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating deploy operation for application, wls-exporter-adminserver [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war], to AdminServer .>
    +.Completed the deployment of Application with status completed
    +Current Status of your Deployment:
    +Deployment command type: deploy
    +Deployment State : completed
    +Deployment Message : no message
    +Starting application wls-exporter-adminserver.
    +<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating start operation for application, wls-exporter-adminserver [archive: null], to AdminServer .>
    +.Completed the start of Application with status completed
    +Current Status of your Deployment:
    +Deployment command type: start
    +Deployment State : completed
    +Deployment Message : no message
    +Deploying .........
    +Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-soa.war to targets soa_cluster (upload=true) ...
    +<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating deploy operation for application, wls-exporter-soa [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-soa.war], to soa_cluster .>
    +.Completed the deployment of Application with status completed
    +Current Status of your Deployment:
    +Deployment command type: deploy
    +Deployment State : completed
    +Deployment Message : no message
    +Starting application wls-exporter-soa.
    +<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating start operation for application, wls-exporter-soa [archive: null], to soa_cluster .>
    +.Completed the start of Application with status completed
    +Current Status of your Deployment:
    +Deployment command type: start
    +Deployment State : completed
    +Deployment Message : no message
    +Deploying .........
    +Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-oim.war to targets oim_cluster (upload=true) ...
    +<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating deploy operation for application, wls-exporter-oim [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-oim.war], to oim_cluster .>
    +.Completed the deployment of Application with status completed
    +Current Status of your Deployment:
    +Deployment command type: deploy
    +Deployment State : completed
    +Deployment Message : no message
    +Starting application wls-exporter-oim.
    +<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating start operation for application, wls-exporter-oim [archive: null], to oim_cluster .>
    +.Completed the start of Application with status completed
    +Current Status of your Deployment:
    +Deployment command type: start
    +Deployment State : completed
    +Deployment Message : no message
    +Disconnected from weblogic server: AdminServer
    +
    +
    +Exiting WebLogic Scripting Tool.
    +
    +<DATE> <Warning> <JNDI> <BEA-050001> <WLContext.close() was called in a different thread than the one in which it was created.>
    +Deploy WebLogic Monitoring Exporter completed
    +secret/basic-auth created
    +servicemonitor.monitoring.coreos.com/wls-exporter created
    +Deploying WebLogic Server Grafana Dashboard....
    +{"id":25,"slug":"weblogic-server-dashboard","status":"success","uid":"5yUwzbZWz","url":"/d/5yUwzbZWz/weblogic-server-dashboard","version":1}
    +Deployed WebLogic Server Grafana Dashboard successfully
    +
    +Grafana is available at NodePort: 32100
    +Prometheus is available at NodePort: 32101
    +Altermanager is available at NodePort: 32102
    +==============================================================
    +
  4. +
+

Prometheus service discovery

+

After the ServiceMonitor is deployed, the wls-exporter should be discovered by Prometheus and be able to collect metrics.

+
    +
  1. +

    Access the following URL to view Prometheus service discovery: http://${MASTERNODE-HOSTNAME}:32101/service-discovery

    +
  2. +
  3. +

    Click on serviceMonitor/oigns/wls-exporter/0 and then show more. Verify all the targets are mentioned.

    +
  4. +
+

Note : It may take several minutes for serviceMonitor/oigns/wls-exporter/0 to appear, so refresh the page until it does.

+

Grafana dashboard

+
    +
  1. +

    Access the Grafana dashboard with the following URL: http://${MASTERNODE-HOSTNAME}:32100 and login with admin/admin. Change your password when prompted.

    +
  2. +
  3. +

    In the Dashboards panel, click on WebLogic Server Dashboard. The dashboard for your OIG domain should be displayed. If it is not displayed, click the Search icon in the left hand menu and search for WebLogic Server Dashboard.

    +
  4. +
+

Cleanup

+

To uninstall the Prometheus, Grafana, WebLogic Monitoring Exporter and the deployments, you can run the $WORKDIR/monitoring-service/kubernetes/delete-monitoring.sh script. For usage details execute ./delete-monitoring.sh -h

+
    +
  1. +

    To uninstall run the following command:

    +
    $ cd $WORKDIR/kubernetes/monitoring-service
    +$ ./delete-monitoring.sh -i monitoring-inputs.yaml
    +
  2. +
+

Setup using manual configuration

+

Install Prometheus, Grafana and WebLogic Monitoring Exporter manually. Create the web applications and deploy to the OIG domain.

+

Deploy the Prometheus operator

+
    +
  1. +

    Kube-Prometheus requires all nodes to be labelled with kubernetes.io/os=linux. To check if your nodes are labelled, run the following:

    +
    $ kubectl get nodes --show-labels
    +

    If the nodes are labelled the output will look similar to the following:

    +
    NAME             STATUS   ROLES    AGE   VERSION   LABELS
    +worker-node1     Ready    <none>   42d   v1.20.10  beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker-node1,kubernetes.io/os=linux
    +worker-node2     Ready    <none>   42d   v1.20.10  beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker-node2,kubernetes.io/os=linux
    +master-node      Ready    master   42d   v1.20.10  beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=master-node,kubernetes.io/os=linux,node-role.kubernetes.io/master=
    +

    If the nodes are not labelled, run the following command:

    +
    $ kubectl label nodes --all kubernetes.io/os=linux
    +
  2. +
  3. +

    Clone Prometheus by running the following commands:

    +
    $ cd $WORKDIR/kubernetes/monitoring-service
    +$ git clone https://github.com/coreos/kube-prometheus.git -b v0.7.0
    +

    Note: Please refer the compatibility matrix of Kube Prometheus. Please download the release of the repository according to the Kubernetes version of your cluster.

    +
  4. +
  5. +

    If your cluster does not have access to the internet to pull external images, such as grafana, you must load the images in a local container registry.

    +

    For grafana, edit the $WORKDIR/kubernetes/monitoring-service/kube-prometheus/manifests/grafana-deployment.yaml and change image: grafana/grafana:7.3.4 to your local container registry image location, for example image: container-registry.example.com/grafana/grafana:8.3.4.

    +

    For any other images check the $WORKDIR/kubernetes/monitoring-service/kube-prometheus/manifests/*deployment.yaml files.

    +
  6. +
  7. +

    Run the following command to create the namespace and custom resource definitions:

    +
    $ cd $WORKDIR/kubernetes/monitoring-service/kube-prometheus
    +$ kubectl create -f manifests/setup
    +

    The output will look similar to the following:

    +
    namespace/monitoring created
    +customresourcedefinition.apiextensions.k8s.io/alertmanagerconfigs.monitoring.coreos.com created
    +customresourcedefinition.apiextensions.k8s.io/alertmanagers.monitoring.coreos.com created
    +customresourcedefinition.apiextensions.k8s.io/podmonitors.monitoring.coreos.com created
    +customresourcedefinition.apiextensions.k8s.io/probes.monitoring.coreos.com created
    +customresourcedefinition.apiextensions.k8s.io/prometheuses.monitoring.coreos.com created
    +customresourcedefinition.apiextensions.k8s.io/prometheusrules.monitoring.coreos.com created
    +customresourcedefinition.apiextensions.k8s.io/servicemonitors.monitoring.coreos.com created
    +customresourcedefinition.apiextensions.k8s.io/thanosrulers.monitoring.coreos.com created
    +Warning: spec.template.spec.nodeSelector[beta.kubernetes.io/os]: deprecated since v1.14; use "kubernetes.io/os" instead
    +clusterrole.rbac.authorization.k8s.io/prometheus-operator created
    +clusterrolebinding.rbac.authorization.k8s.io/prometheus-operator created
    +deployment.apps/prometheus-operator created
    +service/prometheus-operator created
    +serviceaccount/prometheus-operator created
    +
  8. +
  9. +

    Run the following command to created the rest of the resources:

    +
    $ kubectl create -f manifests/
    +

    The output will look similar to the following:

    +
    alertmanager.monitoring.coreos.com/main created 
    +secret/alertmanager-main created
    +service/alertmanager-main created
    +serviceaccount/alertmanager-main created
    +servicemonitor.monitoring.coreos.com/alertmanager created
    +secret/grafana-datasources created
    +configmap/grafana-dashboard-apiserver created
    +configmap/grafana-dashboard-cluster-total created
    +configmap/grafana-dashboard-controller-manager created
    +configmap/grafana-dashboard-k8s-resources-cluster created
    +configmap/grafana-dashboard-k8s-resources-namespace created
    +configmap/grafana-dashboard-k8s-resources-node created
    +configmap/grafana-dashboard-k8s-resources-pod created
    +configmap/grafana-dashboard-k8s-resources-workload created
    +configmap/grafana-dashboard-k8s-resources-workloads-namespace created
    +configmap/grafana-dashboard-kubelet created
    +configmap/grafana-dashboard-namespace-by-pod created
    +configmap/grafana-dashboard-namespace-by-workload created
    +configmap/grafana-dashboard-node-cluster-rsrc-use created
    +configmap/grafana-dashboard-node-rsrc-use created
    +configmap/grafana-dashboard-nodes created
    +configmap/grafana-dashboard-persistentvolumesusage created
    +configmap/grafana-dashboard-pod-total created
    +configmap/grafana-dashboard-prometheus-remote-write created
    +configmap/grafana-dashboard-prometheus created
    +configmap/grafana-dashboard-proxy created
    +configmap/grafana-dashboard-scheduler created
    +configmap/grafana-dashboard-statefulset created
    +configmap/grafana-dashboard-workload-total created
    +configmap/grafana-dashboards created
    +Warning: spec.template.spec.nodeSelector[beta.kubernetes.io/os]: deprecated since v1.14; use "kubernetes.io/os" instead
    +deployment.apps/grafana created
    +service/grafana created
    +serviceaccount/grafana created
    +servicemonitor.monitoring.coreos.com/grafana created
    +clusterrole.rbac.authorization.k8s.io/kube-state-metrics created
    +clusterrolebinding.rbac.authorization.k8s.io/kube-state-metrics created
    +deployment.apps/kube-state-metrics created
    +service/kube-state-metrics created
    +serviceaccount/kube-state-metrics created
    +servicemonitor.monitoring.coreos.com/kube-state-metrics created
    +clusterrole.rbac.authorization.k8s.io/node-exporter created
    +clusterrolebinding.rbac.authorization.k8s.io/node-exporter created
    +daemonset.apps/node-exporter created
    +service/node-exporter created
    +serviceaccount/node-exporter created
    +servicemonitor.monitoring.coreos.com/node-exporter created
    +apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created
    +clusterrole.rbac.authorization.k8s.io/prometheus-adapter created
    +clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created
    +clusterrolebinding.rbac.authorization.k8s.io/prometheus-adapter created
    +clusterrolebinding.rbac.authorization.k8s.io/resource-metrics:system:auth-delegator created
    +clusterrole.rbac.authorization.k8s.io/resource-metrics-server-resources created
    +configmap/adapter-config created
    +deployment.apps/prometheus-adapter created
    +rolebinding.rbac.authorization.k8s.io/resource-metrics-auth-reader created
    +service/prometheus-adapter created
    +serviceaccount/prometheus-adapter created
    +servicemonitor.monitoring.coreos.com/prometheus-adapter created
    +clusterrole.rbac.authorization.k8s.io/prometheus-k8s created
    +clusterrolebinding.rbac.authorization.k8s.io/prometheus-k8s created
    +servicemonitor.monitoring.coreos.com/prometheus-operator created
    +prometheus.monitoring.coreos.com/k8s created
    +rolebinding.rbac.authorization.k8s.io/prometheus-k8s-config created
    +rolebinding.rbac.authorization.k8s.io/prometheus-k8s created
    +rolebinding.rbac.authorization.k8s.io/prometheus-k8s created
    +rolebinding.rbac.authorization.k8s.io/prometheus-k8s created
    +role.rbac.authorization.k8s.io/prometheus-k8s-config created
    +role.rbac.authorization.k8s.io/prometheus-k8s created
    +role.rbac.authorization.k8s.io/prometheus-k8s created
    +role.rbac.authorization.k8s.io/prometheus-k8s created
    +prometheusrule.monitoring.coreos.com/prometheus-k8s-rules created
    +service/prometheus-k8s created
    +serviceaccount/prometheus-k8s created
    +servicemonitor.monitoring.coreos.com/prometheus created
    +servicemonitor.monitoring.coreos.com/kube-apiserver created
    +servicemonitor.monitoring.coreos.com/coredns created
    +servicemonitor.monitoring.coreos.com/kube-controller-manager created
    +servicemonitor.monitoring.coreos.com/kube-scheduler created
    +servicemonitor.monitoring.coreos.com/kubelet created
    +
  10. +
  11. +

    Provide external access for Grafana, Prometheus, and Alertmanager, by running the following commands:

    +
    $ kubectl patch svc grafana -n monitoring --type=json -p '[{"op": "replace", "path": "/spec/type", "value": "NodePort" },{"op": "replace", "path": "/spec/ports/0/nodePort", "value": 32100 }]'
    +   
    +$ kubectl patch svc prometheus-k8s -n monitoring --type=json -p '[{"op": "replace", "path": "/spec/type", "value": "NodePort" },{"op": "replace", "path": "/spec/ports/0/nodePort", "value": 32101 }]'
    + 
    +$ kubectl patch svc alertmanager-main -n monitoring --type=json -p '[{"op": "replace", "path": "/spec/type", "value": "NodePort" },{"op": "replace", "path": "/spec/ports/0/nodePort", "value": 32102 }]'
    +

    Note: This assigns port 32100 to Grafana, 32101 to Prometheus, and 32102 to Alertmanager.

    +

    The output will look similar to the following:

    +
    service/grafana patched
    +service/prometheus-k8s patched
    +service/alertmanager-main patched
    +
  12. +
  13. +

    Verify that the Prometheus, Grafana, and Alertmanager pods are running in the monitoring namespace and the respective services have the exports configured correctly:

    +
    $ kubectl get pods,services -o wide -n monitoring
    +

    The output should look similar to the following:

    +
    pod/alertmanager-main-0                    2/2     Running   0          40s   10.244.1.29    worker-node1   <none>           <none>
    +pod/alertmanager-main-1                    2/2     Running   0          40s   10.244.2.68    worker-node2   <none>           <none>
    +pod/alertmanager-main-2                    2/2     Running   0          40s   10.244.1.28    worker-node1   <none>           <none>
    +pod/grafana-f8cd57fcf-zpjh2                1/1     Running   0          40s   10.244.2.69    worker-node2   <none>           <none>
    +pod/kube-state-metrics-587bfd4f97-zw9zj    3/3     Running   0          38s   10.244.1.30    worker-node1   <none>           <none>
    +pod/node-exporter-2cgrm                    2/2     Running   0          38s   10.196.54.36   master-node    <none>           <none>
    +pod/node-exporter-fpl7f                    2/2     Running   0          38s   10.247.95.26   worker-node1   <none>           <none>
    +pod/node-exporter-kvvnr                    2/2     Running   0          38s   10.250.40.59   worker-node2   <none>           <none>
    +pod/prometheus-adapter-69b8496df6-9vfdp    1/1     Running   0          38s   10.244.2.70    worker-node2   <none>           <none>
    +pod/prometheus-k8s-0                       2/2     Running   0          37s   10.244.2.71    worker-node2   <none>           <none>
    +pod/prometheus-k8s-1                       2/2     Running   0          37s   10.244.1.31    worker-node1   <none>           <none>
    +pod/prometheus-operator-7649c7454f-g5b4l   2/2     Running   0          47s   10.244.2.67    worker-node2   <none>           <none>
    +
    +NAME                            TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                      AGE   SELECTOR
    +service/alertmanager-main       NodePort    10.105.76.223    <none>        9093:32102/TCP               41s   alertmanager=main,app=alertmanager
    +service/alertmanager-operated   ClusterIP   None             <none>        9093/TCP,9094/TCP,9094/UDP   40s   app=alertmanager
    +service/grafana                 NodePort    10.107.86.157    <none>        3000:32100/TCP               40s   app=grafana
    +service/kube-state-metrics      ClusterIP   None             <none>        8443/TCP,9443/TCP            40s   app.kubernetes.io/name=kube-state-metrics
    +service/node-exporter           ClusterIP   None             <none>        9100/TCP                     39s   app.kubernetes.io/name=node-exporter
    +service/prometheus-adapter      ClusterIP   10.102.244.224   <none>        443/TCP                      39s   name=prometheus-adapter
    +service/prometheus-k8s          NodePort    10.100.241.34    <none>        9090:32101/TCP               39s   app=prometheus,prometheus=k8s
    +service/prometheus-operated     ClusterIP   None             <none>        9090/TCP                     39s   app=prometheus
    +service/prometheus-operator     ClusterIP   None             <none>        8443/TCP                     47s   app.kubernetes.io/component=controller,app.kubernetes.io/name=prometheus-operator
    +
  14. +
+

Deploy WebLogic Monitoring Exporter

+

Generate the WebLogic Monitoring Exporter deployment package. The wls-exporter.war package need to be updated and created for each listening port (Administration Server and Managed Servers) in the domain.

+
    +
  1. +

    Set the below environment values and run the script get-wls-exporter.sh to generate the required WAR files at ${WORKDIR}/kubernetes/monitoring-service/scripts/wls-exporter-deploy:

    +
    $ cd $WORKDIR/kubernetes/monitoring-service/scripts
    +$ export adminServerPort=7001
    +$ export wlsMonitoringExporterTosoaCluster=true
    +$ export soaManagedServerPort=8001
    +$ export wlsMonitoringExporterTooimCluster=true
    +$ export oimManagedServerPort=14000
    +$ sh get-wls-exporter.sh
    +

    The output will look similar to the following:

    +
      % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
    +                              Dload  Upload   Total   Spent    Left  Speed
    +100   655  100   655    0     0   1159      0 --:--:-- --:--:-- --:--:--  1159
    +100 2196k  100 2196k    0     0  1430k      0  0:00:01  0:00:01 --:--:-- 8479k
    +created $WORKDIR/kubernetes/monitoring-service/scripts/wls-exporter-deploy dir
    +domainNamespace is empty, setting to default oimcluster
    +domainUID is empty, setting to default oimcluster
    +weblogicCredentialsSecretName is empty, setting to default "oimcluster-domain-credentials"
    +adminServerPort is empty, setting to default "7001"
    +soaClusterName is empty, setting to default "soa_cluster"
    +oimClusterName is empty, setting to default "oim_cluster"
    +created /tmp/ci-NEZy7NOfoz
    +/tmp/ci-NEZy7NOfoz $WORKDIR/kubernetes/monitoring-service/scripts
    +in temp dir
    +  adding: WEB-INF/weblogic.xml (deflated 61%)
    +  adding: config.yml (deflated 60%)
    +$WORKDIR/kubernetes/monitoring-service/scripts
    +created /tmp/ci-J7QJ4Nc1lo
    +/tmp/ci-J7QJ4Nc1lo $WORKDIR/kubernetes/monitoring-service/scripts
    +in temp dir
    +  adding: WEB-INF/weblogic.xml (deflated 61%)
    +  adding: config.yml (deflated 60%)
    +$WORKDIR/kubernetes/monitoring-service/scripts
    +created /tmp/ci-f4GbaxM2aJ
    +/tmp/ci-f4GbaxM2aJ $WORKDIR/kubernetes/monitoring-service/scripts
    +in temp dir
    +  adding: WEB-INF/weblogic.xml (deflated 61%)
    +  adding: config.yml (deflated 60%)
    +$WORKDIR/kubernetes/monitoring-service/scripts
    +
  2. +
  3. +

    Deploy the WebLogic Monitoring Exporter WAR files into the Oracle Identity Governance domain:

    +
    $ cd $WORKDIR/kubernetes/monitoring-service/scripts
    +$ kubectl cp wls-exporter-deploy <domain_namespace>/<domain_uid>-adminserver:/u01/oracle
    +$ kubectl cp deploy-weblogic-monitoring-exporter.py <domain_namespace>/<domain_uid>-adminserver:/u01/oracle/wls-exporter-deploy
    +$ kubectl exec -it -n <domain_namespace> <domain_uid>-adminserver -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/deploy-weblogic-monitoring-exporter.py -domainName <domain_uid> -adminServerName AdminServer -adminURL <domain_uid>-adminserver:7001 -username weblogic -password <password> -oimClusterName oim_cluster -wlsMonitoringExporterTooimCluster true -soaClusterName soa_cluster -wlsMonitoringExporterTosoaCluster true
    +

    For example:

    +
    $ cd $WORKDIR/kubernetes/monitoring-service/scripts
    +$ kubectl cp wls-exporter-deploy oigns/governancedomain-adminserver:/u01/oracle
    +$ kubectl cp deploy-weblogic-monitoring-exporter.py oigns/governancedomain-adminserver:/u01/oracle/wls-exporter-deploy
    +$ kubectl exec -it -n oigns governancedomain-adminserver -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/deploy-weblogic-monitoring-exporter.py -domainName governancedomain -adminServerName AdminServer -adminURL governancedomain-adminserver:7001 -username weblogic -password <password> -oimClusterName oim_cluster -wlsMonitoringExporterTooimCluster true -soaClusterName soa_cluster -wlsMonitoringExporterTosoaCluster true
    +

    The output will look similar to the following:

    +
    Initializing WebLogic Scripting Tool (WLST) ...
    +
    +Welcome to WebLogic Server Administration Scripting Shell
    +
    +Type help() for help on available commands
    +
    +Connecting to t3://governancedomain-adminserver:7001 with userid weblogic ...
    +Successfully connected to Admin Server "AdminServer" that belongs to domain "governancedomaindomain".
    +
    +Warning: An insecure protocol was used to connect to the server.
    +To ensure on-the-wire security, the SSL port or Admin port should be used instead.
    +
    +Deploying .........
    +Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war to targets AdminServer (upload=true) ...
    +<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating deploy operation for application, wls-exporter-adminserver [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war], to AdminServer .>
    +.Completed the deployment of Application with status completed
    +Current Status of your Deployment:
    +Deployment command type: deploy
    +Deployment State : completed
    +Deployment Message : no message
    +Starting application wls-exporter-adminserver.
    +<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating start operation for application, wls-exporter-adminserver [archive: null], to AdminServer .>
    +.Completed the start of Application with status completed
    +Current Status of your Deployment:
    +Deployment command type: start
    +Deployment State : completed
    +Deployment Message : no message
    +Deploying .........
    +Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-soa.war to targets soa_cluster (upload=true) ...
    +<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating deploy operation for application, wls-exporter-soa [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-soa.war], to soa_cluster .>
    +..Completed the deployment of Application with status completed
    +Current Status of your Deployment:
    +Deployment command type: deploy
    +Deployment State : completed
    +Deployment Message : no message
    +Starting application wls-exporter-soa.
    +<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating start operation for application, wls-exporter-soa [archive: null], to soa_cluster .>
    +.Completed the start of Application with status completed
    +Current Status of your Deployment:
    +Deployment command type: start
    +Deployment State : completed
    +Deployment Message : no message
    +Deploying .........
    +Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-oim.war to targets oim_cluster (upload=true) ...
    +<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating deploy operation for application, wls-exporter-oim [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-oim.war], to oim_cluster .>
    +.Completed the deployment of Application with status completed
    +Current Status of your Deployment:
    +Deployment command type: deploy
    +Deployment State : completed
    +Deployment Message : no message
    +Starting application wls-exporter-oim.
    +<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating start operation for application, wls-exporter-oim [archive: null], to oim_cluster .>
    +.Completed the start of Application with status completed
    +Current Status of your Deployment:
    +Deployment command type: start
    +Deployment State : completed
    +Deployment Message : no message
    +Disconnected from weblogic server: AdminServer
    +
    +Exiting WebLogic Scripting Tool.
    +
    +<DATE> <Warning> <JNDI> <BEA-050001> <WLContext.close() was called in a different thread than the one in which it was created.>
    +
  4. +
+

Configure Prometheus Operator

+

Prometheus enables you to collect metrics from the WebLogic Monitoring Exporter. The Prometheus Operator identifies the targets using service discovery. To get the WebLogic Monitoring Exporter end point discovered as a target, you must create a service monitor pointing to the service.

+

The exporting of metrics from wls-exporter requires basicAuth, so a Kubernetes Secret is created with the user name and password that are base64 encoded. This Secret is used in the ServiceMonitor deployment. The wls-exporter-ServiceMonitor.yaml has basicAuth with credentials as username: weblogic and password: <password> in base64 encoded.

+
    +
  1. +

    Run the following command to get the base64 encoded version of the weblogic password:

    +
    $ echo -n "<password>" | base64
    +

    The output will look similar to the following:

    +
    V2VsY29tZTE=
    +
  2. +
  3. +

    Update the $WORKDIR/kubernetes/monitoring-service/manifests/wls-exporter-ServiceMonitor.yaml and change the password: value to the value returned above. Also change any reference to the namespace and weblogic.domainName: values to match your OIG namespace and domain name. For example:

    +
    apiVersion: v1
    +kind: Secret
    +metadata:
    +  name: basic-auth
    +  namespace: oigns
    +data:
    +  password: V2VsY29tZTE=
    +  user: d2VibG9naWM=
    +type: Opaque
    +---
    +apiVersion: monitoring.coreos.com/v1
    +kind: ServiceMonitor
    +metadata:
    +  name: wls-exporter
    +  namespace: oigns
    +  labels:
    +    k8s-app: wls-exporter
    +    release: monitoring
    +spec:
    +  namespaceSelector:
    +    matchNames:
    +    - oigns
    +  selector:
    +    matchLabels:
    +    weblogic.domainName: governancedomain
    +  endpoints:
    +  - basicAuth:
    +      password:
    +        name: basic-auth
    +        key: password
    +      username:
    +        name: basic-auth
    +        key: user
    +    port: default
    +    relabelings:
    +      - action: labelmap
    +        regex: __meta_kubernetes_service_label_(.+)
    +    interval: 10s
    +    honorLabels: true
    +    path: /wls-exporter/metrics
    +
  4. +
  5. +

    Update the $WORKDIR/kubernetes/monitoring-service/manifests/prometheus-roleSpecific-domain-namespace.yaml and change the namespace to match your OIG namespace. For example:

    +
    apiVersion: rbac.authorization.k8s.io/v1
    +items:
    +- apiVersion: rbac.authorization.k8s.io/v1
    +  kind: Role
    +  metadata:
    +    name: prometheus-k8s
    +    namespace: oigns
    +  rules:
    +  - apiGroups:
    +    - ""
    +    resources:
    +    - services
    +    - endpoints
    +    - pods
    +    verbs:
    +    - get
    +    - list
    +    - watch
    +kind: RoleList
    +
  6. +
  7. +

    Update the $WORKDIR/kubernetes/monitoring-service/manifests/prometheus-roleBinding-domain-namespace.yaml and change the namespace to match your OIG namespace. For example:

    +
    apiVersion: rbac.authorization.k8s.io/v1
    +items:
    +- apiVersion: rbac.authorization.k8s.io/v1
    +  kind: RoleBinding
    +  metadata:
    +    name: prometheus-k8s
    +    namespace: oigns
    +  roleRef:
    +    apiGroup: rbac.authorization.k8s.io
    +    kind: Role
    +    name: prometheus-k8s
    +  subjects:
    +  - kind: ServiceAccount
    +    name: prometheus-k8s
    +    namespace: monitoring
    +kind: RoleBindingList
    +
  8. +
  9. +

    Run the following command to enable Prometheus:

    +
    $ cd $WORKDIR/kubernetes/monitoring-service/manifests
    +$ kubectl apply -f .
    +

    The output will look similar to the following:

    +
    rolebinding.rbac.authorization.k8s.io/prometheus-k8s created
    +role.rbac.authorization.k8s.io/prometheus-k8s created
    +secret/basic-auth created
    +servicemonitor.monitoring.coreos.com/wls-exporter created
    +
  10. +
+

Prometheus service discovery

+

After the ServiceMonitor is deployed, the wls-exporter should be discovered by Prometheus and be able to collect metrics.

+
    +
  1. +

    Access the following URL to view Prometheus service discovery: http://${MASTERNODE-HOSTNAME}:32101/service-discovery

    +
  2. +
  3. +

    Click on oigns/wls-exporter/0 and then show more. Verify all the targets are mentioned.

    +
  4. +
+

Note: It may take several minutes for oigns/wls-exporter/0 to appear, so refresh the page until it does.

+

Grafana dashboard

+
    +
  1. +

    Access the Grafana dashboard with the following URL: http://${MASTERNODE-HOSTNAME}:32100 and login with admin/admin. Change your password when prompted.

    +
  2. +
  3. +

    Import the Grafana dashboard by navigating on the left hand menu to Create > Import. Copy the content from $WORKDIR/kubernetes/monitoring-service/config/weblogic-server-dashboard-import.json and paste. Then click Load and Import. The dashboard should be displayed.

    +
  4. +
+

Cleanup

+

To clean up a manual installation:

+
    +
  1. +

    Run the following commands:

    +
    $ cd $WORKDIR/kubernetes/monitoring-service/manifests/
    +$ kubectl delete -f .
    +
  2. +
  3. +

    Delete the deployments:

    +
    $ cd $WORKDIR/kubernetes/monitoring-service/scripts/
    +$ kubectl cp undeploy-weblogic-monitoring-exporter.py <domain_namespace>/<domain_uid>-adminserver:/u01/oracle/wls-exporter-deploy
    +$ kubectl exec -it -n <domain_namespace> <domain_uid>-adminserver -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/undeploy-weblogic-monitoring-exporter.py -domainName <domain_uid>  -adminServerName AdminServer -adminURL <domain_uid>-adminserver:7001 -username weblogic -password <password> -oimClusterName oim_cluster -wlsMonitoringExporterTooimCluster true -soaClusterName soa_cluster -wlsMonitoringExporterTosoaCluster true
    +
  4. +
  5. +

    Delete Prometheus:

    +
    $ cd $WORKDIR/kubernetes/monitoring-service/kube-prometheus
    +$ kubectl delete -f manifests
    +$ kubectl delete -f manifests/setup
    +
  6. +
+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oig/manage-oig-domains/running-oig-utilities/index.html b/docs/23.4.1/idm-products/oig/manage-oig-domains/running-oig-utilities/index.html new file mode 100644 index 000000000..c2538cf16 --- /dev/null +++ b/docs/23.4.1/idm-products/oig/manage-oig-domains/running-oig-utilities/index.html @@ -0,0 +1,4036 @@ + + + + + + + + + + + + c. Runnning OIG utilities :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + c. Runnning OIG utilities +

+ + + + + + +

Run OIG utlities inside the OIG Kubernetes cluster.

+

Run utilities in an interactive bash shell

+
    +
  1. +

    Access a bash shell inside the <domain_uid>-oim-server1 pod:

    +
    $ kubectl -n oigns exec -it <domain_uid>-oim-server1 -- bash
    +

    For example:

    +
    $ kubectl -n oigns exec -it governancedomain-oim-server1 -- bash
    +

    This will take you into a bash shell in the running <domain_uid>-oim-server1 pod:

    +
    [oracle@governancedomain-oim-server1 oracle]$
    +
  2. +
  3. +

    Navigate to the /u01/oracle/idm/server/bin directory and execute the utility as required. For example:

    +
    [oracle@governancedomain-oim-server1 oracle] cd /u01/oracle/idm/server/bin
    +[oracle@governancedomain-oim-server1 bin]$ ./<filename>.sh
    +

    Note: Some utilties such as PurgeCache.sh, GenerateSnapshot.sh etc, may prompt to enter the t3 URL, for example:

    +
    [oracle@governancedomain-oim-server1 bin]$ sh GenerateSnapshot.sh
    +For running the Utilities the following environment variables need to be set
    +APP_SERVER is weblogic
    +OIM_ORACLE_HOME is /u01/oracle/idm/
    +JAVA_HOME is /u01/jdk
    +MW_HOME is /u01/oracle
    +WL_HOME is /u01/oracle/wlserver
    +DOMAIN_HOME is /u01/oracle/user_projects/domains/governancedomain
    +Executing -Dweblogic.security.SSL.trustedCAKeyStore= in IPv4 mode
    +[Enter Xellerate admin username :]xelsysadm
    +[Enter password for xelsysadm :]
    +[Threads to use [ 8 ]]
    +[Enter serverURL :[t3://oimhostname:oimportno ]]
    +

    To find the t3 URL run:

    +
    $ kubectl get services -n oigns | grep oim-cluster
    +

    The output will look similar to the following:

    +
    governancedomain-cluster-oim-cluster   ClusterIP   10.110.161.82    <none>        14002/TCP,14000/TCP   4d
    +

    In this case the t3 URL is: t3://governancedomain-cluster-oim-cluster:14000.

    +
  4. +
+

Passing inputs as a jar/xml file

+
    +
  1. +

    Copy the input file to pass to a directory of your choice.

    +
  2. +
  3. +

    Run the following command to copy the input file to the running governancedomain-oim-server1 pod.

    +
    $ kubectl -n oigns cp /<path>/<inputFile> governancedomain-oim-server1:/u01/oracle/idm/server/bin/
    +
  4. +
  5. +

    Access a bash shell inside the governancedomain-oim-server1 pod:

    +
    $ kubectl -n oigns exec -it governancedomain-oim-server1 -- bash
    +

    This will take you into a bash shell in the running governancedomain-oim-server1 pod:

    +
    [oracle@governancedomain-oim-server1 oracle]$
    +
  6. +
  7. +

    Navigate to the /u01/oracle/idm/server/bin directory and execute the utility as required, passing the input file. For example:

    +
    [oracle@governancedomain-oim-server1 oracle] cd /u01/oracle/idm/server/bin
    +[oracle@governancedomain-oim-server1 bin]$ ./<filename>.sh -inputFile <inputFile>
    +

    Note As pods are stateless the copied input file will remain until the pod restarts.

    +
  8. +
+

Editing property/profile files

+

To edit a property/profile file in the Kubernetes cluster:

+
    +
  1. +

    Copy the input file from the pod to a on the local system, for example:

    +
    $ kubectl -n oigns cp governancedomain-oim-server1:/u01/oracle/idm/server/bin/<file.properties_profile> /<path>/<file.properties_profile>
    +

    Note: If you see the message tar: Removing leading '/' from member names this can be ignored.

    +
  2. +
  3. +

    Edit the </path>/<file.properties_profile> in an editor of your choice.

    +
  4. +
  5. +

    Copy the file back to the pod:

    +
    $ kubectl -n oigns cp /<path>/<file.properties_profile> governancedomain-oim-server1:/u01/oracle/idm/server/bin/
    +

    Note: As pods are stateless the copied input file will remain until the pod restarts. Preserve a local copy in case you need to copy files back after pod restart.

    +
  6. +
+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oig/manage-oig-domains/wlst-admin-operations/index.html b/docs/23.4.1/idm-products/oig/manage-oig-domains/wlst-admin-operations/index.html new file mode 100644 index 000000000..0e83b4e72 --- /dev/null +++ b/docs/23.4.1/idm-products/oig/manage-oig-domains/wlst-admin-operations/index.html @@ -0,0 +1,4151 @@ + + + + + + + + + + + + b. WLST administration operations :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + b. WLST administration operations +

+ + + + + + +

Invoke WLST and access Administration Server

+

To use WLST to administer the OIG domain, use a helper pod in the same Kubernetes cluster as the OIG Domain.

+
    +
  1. +

    Check to see if the helper pod exists by running:

    +
    $ kubectl get pods -n <domain_namespace> | grep helper
    +

    For example:

    +
    $ kubectl get pods -n oigns | grep helper
    +

    The output should look similar to the following:

    +
    helper                                  1/1     Running     0          26h
    +

    If the helper pod doesn’t exist then see Step 1 in Prepare your environment to create it.

    +
  2. +
  3. +

    Run the following command to start a bash shell in the helper pod:

    +
    $ kubectl exec -it helper -n <domain_namespace> -- /bin/bash
    +

    For example:

    +
    $ kubectl exec -it helper -n oigns -- /bin/bash
    +

    This will take you into a bash shell in the running helper pod:

    +
    [oracle@helper ~]$
    +
  4. +
  5. +

    Connect to WLST using the following commands:

    +
    [oracle@helper ~]$ cd $ORACLE_HOME/oracle_common/common/bin
    +[oracle@helper ~]$ ./wlst.sh
    +

    The output will look similar to the following:

    +
    Initializing WebLogic Scripting Tool (WLST) ...
    +
    +Jython scans all the jar files it can find at first startup. Depending on the system, this process may take a few minutes to complete, and WLST may not return a prompt right away.
    +
    +Welcome to WebLogic Server Administration Scripting Shell
    +
    +Type help() for help on available commands
    +
    +wls:/offline>
    +
  6. +
  7. +

    To access t3 for the Administration Server connect as follows:

    +
    wls:/offline> connect('weblogic','<password>','t3://governancedomain-adminserver:7001')
    +

    The output will look similar to the following:

    +
    Connecting to t3://governancedomain-adminserver:7001 with userid weblogic ...
    +Successfully connected to Admin Server "AdminServer" that belongs to domain "governancedomain".
    +
    +Warning: An insecure protocol was used to connect to the server.
    +To ensure on-the-wire security, the SSL port or Admin port should be used instead.
    +
    +wls:/governancedomain/serverConfig/>
    +

    Or to access t3 for the OIG Cluster service, connect as follows:

    +
    wls:/offline> connect('weblogic','<password>','t3://governancedomain-cluster-oim-cluster:14000')
    +

    The output will look similar to the following:

    +
    Connecting to t3://governancedomain-cluster-oim-cluster:14000 with userid weblogic ...
    +Successfully connected to managed Server "oim_server1" that belongs to domain "governancedomain".
    +
    +Warning: An insecure protocol was used to connect to the server.
    +To ensure on-the-wire security, the SSL port or Admin port should be used instead.
    +
    +wls:/governancedomain/serverConfig/>
    +
  8. +
+

Sample operations

+

For a full list of WLST operations refer to WebLogic Server WLST Online and Offline Command Reference.

+

Display servers

+
wls:/governancedomain/serverConfig/> cd('/Servers')
+wls:/governancedomain/serverConfig/Servers> ls ()
+dr--   AdminServer
+dr--   oim_server1
+dr--   oim_server2
+dr--   oim_server3
+dr--   oim_server4
+dr--   oim_server5
+dr--   soa_server1
+dr--   soa_server2
+dr--   soa_server3
+dr--   soa_server4
+dr--   soa_server5
+
+wls:/governancedomain/serverConfig/Servers>
+

Performing WLST administration via SSL

+
    +
  1. +

    By default the SSL port is not enabled for the Administration Server or OIG Managed Servers. To configure the SSL port for the Administration Server and Managed Servers login to WebLogic Administration console https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console and navigate to Lock & Edit -> Environment ->Servers -> server_name ->Configuration -> General -> SSL Listen Port Enabled -> Provide SSL Port ( For Administration Server: 7002 and for OIG Managed Server (oim_server1): 14101) - > Save -> Activate Changes.

    +

    Note: If configuring the OIG Managed Servers for SSL you must enable SSL on the same port for all servers (oim_server1 through oim_server4)

    +
  2. +
  3. +

    Create a myscripts directory as follows:

    +
    $ cd $WORKDIR/kubernetes
    +$ mkdir myscripts
    +$ cd myscripts
    +
  4. +
  5. +

    Create a sample yaml template file in the myscripts directory called <domain_uid>-adminserver-ssl.yaml to create a Kubernetes service for the Administration Server:

    +

    Note: Update the domainName, domainUID and namespace based on your environment.

    +
    apiVersion: v1
    +kind: Service
    +metadata:
    +  labels:
    +    serviceType: SERVER
    +    weblogic.domainName: governancedomain
    +    weblogic.domainUID: governancedomain
    +    weblogic.resourceVersion: domain-v2
    +    weblogic.serverName: AdminServer
    +  name: governancedomain-adminserver-ssl
    +  namespace: oigns
    +spec:
    +  clusterIP: None
    +  ports:
    +  - name: default
    +    port: 7002
    +    protocol: TCP
    +    targetPort: 7002
    +  selector:
    +    weblogic.createdByOperator: "true"
    +    weblogic.domainUID: governancedomain
    +    weblogic.serverName: AdminServer
    +  type: ClusterIP
    +

    and create the following sample yaml template file <domain_uid>-oim-cluster-ssl.yaml for the OIG Managed Server:

    +
    apiVersion: v1
    +kind: Service
    +metadata:
    +  labels:
    +    serviceType: SERVER
    +    weblogic.domainName: governancedomain
    +    weblogic.domainUID: governancedomain
    +    weblogic.resourceVersion: domain-v2
    +  name: governancedomain-cluster-oim-cluster-ssl
    +  namespace: oigns
    +spec:
    +  clusterIP: None
    +  ports:
    +  - name: default
    +    port: 14101
    +    protocol: TCP
    +    targetPort: 14101
    +  selector:
    +    weblogic.clusterName: oim_cluster
    +    weblogic.createdByOperator: "true"
    +    weblogic.domainUID: governancedomain
    +  type: ClusterIP
    +
  6. +
  7. +

    Apply the template using the following command for the Administration Server:

    +
    $ kubectl apply -f governancedomain-adminserver-ssl.yaml
    +service/governancedomain-adminserver-ssl created
    +

    or using the following command for the OIG Managed Server:

    +
    $ kubectl apply -f governancedomain-oim-cluster-ssl.yaml
    +service/governancedomain-cluster-oim-cluster-ssl created
    +
  8. +
  9. +

    Validate that the Kubernetes Services to access SSL ports are created successfully:

    +
    $ kubectl get svc -n <domain_namespace> |grep ssl
    +

    For example:

    +
    $ kubectl get svc -n oigns |grep ssl
    +

    The output will look similar to the following:

    +
    governancedomain-adminserver-ssl           ClusterIP   None             <none>        7002/TCP                     74s
    +governancedomain-cluster-oim-cluster-ssl   ClusterIP   None             <none>        14101/TCP                    21s
    +
  10. +
  11. +

    Connect to a bash shell of the helper pod:

    +
    $ kubectl exec -it helper -n oigns -- /bin/bash
    +
  12. +
  13. +

    In the bash shell run the following:

    +
    [oracle@helper bin]$ export WLST_PROPERTIES="-Dweblogic.security.SSL.ignoreHostnameVerification=true -Dweblogic.security.TrustKeyStore=DemoTrust"
    +[oracle@helper bin]$ cd /u01/oracle/oracle_common/common/bin
    +[oracle@helper bin]$ ./wlst.sh
    +Initializing WebLogic Scripting Tool (WLST) ...
    +   
    +Welcome to WebLogic Server Administration Scripting Shell
    +   
    +Type help() for help on available commands
    +wls:/offline>
    +

    Connect to the Administration Server t3s service:

    +
    wls:/offline> connect('weblogic','<password>','t3s://governancedomain-adminserver-ssl:7002')
    +Connecting to t3s://governancedomain-adminserver-ssl:7002 with userid weblogic ...
    +<DATE> <Info> <Security> <BEA-090905> <Disabling the CryptoJ JCE Provider self-integrity check for better startup performance. To enable this check, specify -Dweblogic.security.allowCryptoJDefaultJCEVerification=true.>
    +<DATE> <Info> <Security> <BEA-090906> <Changing the default Random Number Generator in RSA CryptoJ from ECDRBG128 to HMACDRBG. To disable this change, specify -Dweblogic.security.allowCryptoJDefaultPRNG=true.>
    +<DATE> <Info> <Security> <BEA-090909> <Using the configured custom SSL Hostname Verifier implementation: weblogic.security.utils.SSLWLSHostnameVerifier$NullHostnameVerifier.>
    +Successfully connected to Admin Server "AdminServer" that belongs to domain "governancedomain".
    +
    +wls:/governancedomain/serverConfig/>
    +

    To connect to the OIG Managed Server t3s service:

    +
    wls:/offline> connect('weblogic','<password>','t3s://governancedomain-cluster-oim-cluster-ssl:14101')
    +Connecting to t3s://governancedomain-cluster-oim-cluster-ssl:14101 with userid weblogic ...
    +<DATE> <Info> <Security> <BEA-090905> <Disabling the CryptoJ JCE Provider self-integrity check for better startup performance. To enable this check, specify -Dweblogic.security.allowCryptoJDefaultJCEVerification=true.>
    +<DATE> <Info> <Security> <BEA-090906> <Changing the default Random Number Generator in RSA CryptoJ from ECDRBG128 to HMACDRBG. To disable this change, specify -Dweblogic.security.allowCryptoJDefaultPRNG=true.>
    +<DATE> <Info> <Security> <BEA-090909> <Using the configured custom SSL Hostname Verifier implementation: weblogic.security.utils.SSLWLSHostnameVerifier$NullHostnameVerifier.>
    +Successfully connected to managed Server "oim_server1" that belongs to domain "governancedomain".
    +
    +wls:/governancedomain/serverConfig/>
    +
  14. +
+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oig/patch-and-upgrade/index.html b/docs/23.4.1/idm-products/oig/patch-and-upgrade/index.html new file mode 100644 index 000000000..08d6e568b --- /dev/null +++ b/docs/23.4.1/idm-products/oig/patch-and-upgrade/index.html @@ -0,0 +1,4056 @@ + + + + + + + + + + + + Patch and upgrade :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + Patch and upgrade +

+ + + + + + + +

This section shows you how to upgrade the WebLogic Kubernetes Operator, upgrade the OIG image, and patch the OIG domain. It also shows you how to upgrade the Elasticsearch and Kibana stack, and the Ingress.

+

The upgrade path taken depends on the version you are upgrading from, and the version you are upgrading to.

+

Please refer to the Release Notes for information on which upgrade steps are necessary for the version you are upgrading to.

+ + + + + + + +
    + + + + + + + + + + + + + + + + + +

    +a. Upgrade an operator release +

    + + + + + +

    Instructions on how to update the WebLogic Kubernetes Operator version.

    + + + + + + + + + + + + +

    +b. Patch an image +

    + + + + + +

    Instructions on how to update your OIG Kubernetes cluster with a new OIG container image.

    + + + + + + + + + + + + +

    +c. Upgrade Ingress +

    + + + + + +

    Instructions on how to upgrade the ingress.

    + + + + + + + + + + + + +

    +d. Upgrade Elasticsearch and Kibana +

    + + + + + +

    Instructions on how to upgrade Elastic Search and Kibana.

    + + + + + + + + +
+ + + + + + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oig/patch-and-upgrade/index.xml b/docs/23.4.1/idm-products/oig/patch-and-upgrade/index.xml new file mode 100644 index 000000000..23a192df9 --- /dev/null +++ b/docs/23.4.1/idm-products/oig/patch-and-upgrade/index.xml @@ -0,0 +1,64 @@ + + + + Patch and upgrade on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oig/patch-and-upgrade/ + Recent content in Patch and upgrade on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + a. Upgrade an operator release + /fmw-kubernetes/23.4.1/idm-products/oig/patch-and-upgrade/upgrade-an-operator-release/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oig/patch-and-upgrade/upgrade-an-operator-release/ + These instructions apply to upgrading operators from 3.X.X to 4.X, or from within the 4.x release family as additional versions are released. + On the master node, download the new WebLogic Kubernetes Operator source code from the operator github project: +$ mkdir &lt;workdir&gt;/weblogic-kubernetes-operator-4.X.X $ cd &lt;workdir&gt;/weblogic-kubernetes-operator-4.X.X $ git clone https://github.com/oracle/weblogic-kubernetes-operator.git --branch v4.X.X For example: +$ mkdir /scratch/OIGK8S/weblogic-kubernetes-operator-4.X.X $ cd /scratch/OIGK8S/weblogic-kubernetes-operator-4.X.X $ git clone https://github.com/oracle/weblogic-kubernetes-operator.git --branch v4.X.X This will create the directory &lt;workdir&gt;/weblogic-kubernetes-operator-4. + + + + b. Patch an image + /fmw-kubernetes/23.4.1/idm-products/oig/patch-and-upgrade/patch-an-image/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oig/patch-and-upgrade/patch-an-image/ + Introduction The OIG domain patching script automatically performs the update of your OIG Kubernetes cluster with a new OIG container image. +Note: Before following the steps below, you must have upgraded to WebLogic Kubernetes Operator 4.1.2. +The script executes the following steps sequentially: + Checks if the helper pod exists in the given namespace. If yes, then it deletes the helper pod. Brings up a new helper pod with the new image. + + + + c. Upgrade Ingress + /fmw-kubernetes/23.4.1/idm-products/oig/patch-and-upgrade/upgrade-an-ingress/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oig/patch-and-upgrade/upgrade-an-ingress/ + This section shows how to upgrade the ingress. +To determine if this step is required for the version you are upgrading to, refer to the Release Notes. +Upgrading the ingress To upgrade the existing ingress rules, follow the steps below: + List the existing ingress: +$ helm list -n &lt;domain_namespace&gt; For example: +$ helm list -n oigns The output will look similar to the following: +NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION governancedomain-nginx oigns 1 &lt;DATE&gt; deployed ingress-per-domain-0. + + + + d. Upgrade Elasticsearch and Kibana + /fmw-kubernetes/23.4.1/idm-products/oig/patch-and-upgrade/upgrade-elk/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oig/patch-and-upgrade/upgrade-elk/ + This section shows how to upgrade Elasticsearch and Kibana. +To determine if this step is required for the version you are upgrading to, refer to the Release Notes. +Download the latest code repository Make sure you have downloaded the latest code as per Download the latest code repository. Undeploy Elasticsearch and Kibana From October 22 (22.4.1) onwards, OIG logs should be stored on a centralized Elasticsearch and Kibana stack. + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oig/patch-and-upgrade/patch-an-image/index.html b/docs/23.4.1/idm-products/oig/patch-and-upgrade/patch-an-image/index.html new file mode 100644 index 000000000..68db5ee2d --- /dev/null +++ b/docs/23.4.1/idm-products/oig/patch-and-upgrade/patch-an-image/index.html @@ -0,0 +1,4049 @@ + + + + + + + + + + + + b. Patch an image :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + b. Patch an image +

+ + + + + + +

Introduction

+

The OIG domain patching script automatically performs the update of your OIG Kubernetes cluster with a new OIG container image.

+

Note: Before following the steps below, you must have upgraded to WebLogic Kubernetes Operator 4.1.2.

+

The script executes the following steps sequentially:

+
    +
  • Checks if the helper pod exists in the given namespace. If yes, then it deletes the helper pod.
  • +
  • Brings up a new helper pod with the new image.
  • +
  • Stops the Administration Server, SOA and OIM managed servers using serverStartPolicy set as Never in the domain definition yaml.
  • +
  • Waits for all servers to be stopped (default timeout 2000s)
  • +
  • Introspects database properties including credentials from the job configmap.
  • +
  • Performs database schema changes from the helper pod
  • +
  • Starts the Administration Server, SOA and OIM managed servers by setting serverStartPolicy to IfNeeded and image to new image tag.
  • +
  • Waits for all the servers to be ready (default timeout 2000s)
  • +
+

The script exits with a failure if a configurable timeout is reached before the target pod count is reached, depending upon the domain configuration. It also exits if there is any failure while patching the database schema and domain.

+

Note: The script execution will cause downtime while patching the OIG deployment and database schemas.

+

Prerequisites

+

Before you begin, perform the following steps:

+
    +
  1. +

    Review the Domain resource documentation.

    +
  2. +
  3. +

    Ensure that you have a running OIG deployment in your cluster.

    +
  4. +
  5. +

    Ensure that the database is up and running.

    +
  6. +
+

Download the latest code repository

+

Download the latest code repository as follows:

+
    +
  1. +

    Create a working directory to setup the source code.

    +
    $ mkdir <workdir>
    +

    For example:

    +
    $ mkdir /scratch/OIGK8Slatest
    +
  2. +
  3. +

    Download the latest OIG deployment scripts from the OIG repository.

    +
    $ cd <workdir>
    +$ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1
    +

    For example:

    +
    $ cd /scratch/OIGK8Slatest
    +$ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1
    +
  4. +
  5. +

    Set the $WORKDIR environment variable as follows:

    +
    $ export WORKDIR=<workdir>/fmw-kubernetes/OracleIdentityGovernance
    +

    For example:

    +
    $ export WORKDIR=/scratch/OIGK8Slatest/fmw-kubernetes/OracleIdentityGovernance
    +
  6. +
+

Run the patch domain script

+
    +
  1. +

    Run the patch domain script as follows. Specify the inputs required by the script. If you need help understanding the inputs run the command help -h.

    +
    $ cd $WORKDIR/kubernetes/domain-lifecycle
    +$ ./patch_oig_domain.sh -h
    +$ ./patch_oig_domain.sh -i <target_image_tag> -n <oig_namespace>
    +

    For example:

    +
    $ cd $WORKDIR/kubernetes/domain-lifecycle
    +$ ./patch_oig_domain.sh -h
    +$ ./patch_oig_domain.sh -i 12.2.1.4-jdk8-ol7-<October`23> -n oigns
    +

    The output will look similar to the following

    +
    [INFO] Found domain name: governancedomain
    +[INFO] Image Registry: container-registry.oracle.com/middleware/oig_cpu
    +[INFO] Domain governancedomain is currently running with image: container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-<April`23>
    +current no of pods under governancedomain are 3
    +[INFO] The pod helper already exists in namespace oigns.
    +[INFO] Deleting pod helper
    +pod "helper" deleted
    +[INFO] Fetched Image Pull Secret: orclcred
    +[INFO] Creating new helper pod with image: container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-<October`23>
    +pod/helper created
    +Checking helper  Running
    +[INFO] Stopping Admin, SOA and OIM servers in domain governancedomain. This may take some time, monitor log /scratch/OIGK8Slatest/fmw-kubernetes/OracleIdentityGovernance/kubernetes/domain-lifecycle/log/oim_patch_log-<DATE>/stop_servers.log for details
    +[INFO] All servers are now stopped successfully. Proceeding with DB Schema changes
    +[INFO] Patching OIM schemas...
    +[INFO] DB schema update successful. Check log /scratch/OIGK8Slatest/fmw-kubernetes/OracleIdentityGovernance/kubernetes/domain-lifecycle/log/oim_patch_log-<DATE>/patch_oim_wls.log for details
    +[INFO] Starting Admin, SOA and OIM servers with new image container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-<October`23>
    +[INFO] Waiting for 3 weblogic pods to be ready..This may take several minutes, do not close the window. Check log /scratch/OIGK8Slatest/fmw-kubernetes/OracleIdentityGovernance/kubernetes/domain-lifecycle/log/oim_patch_log-<DATE>/monitor_weblogic_pods.log for progress
    +[SUCCESS] All servers under governancedomain are now in ready state with new image: container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-<October`23>
    +

    The logs are available at $WORKDIR/kubernetes/domain-lifecycle by default. A custom log location can also be provided to the script.

    +

    Note: If the patch domain script creation fails, refer to the Troubleshooting section.

    +
  2. +
+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oig/patch-and-upgrade/upgrade-an-ingress/index.html b/docs/23.4.1/idm-products/oig/patch-and-upgrade/upgrade-an-ingress/index.html new file mode 100644 index 000000000..96ddfe007 --- /dev/null +++ b/docs/23.4.1/idm-products/oig/patch-and-upgrade/upgrade-an-ingress/index.html @@ -0,0 +1,4091 @@ + + + + + + + + + + + + c. Upgrade Ingress :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + c. Upgrade Ingress +

+ + + + + + +

This section shows how to upgrade the ingress.

+

To determine if this step is required for the version you are upgrading to, refer to the Release Notes.

+

Upgrading the ingress

+

To upgrade the existing ingress rules, follow the steps below:

+
    +
  1. +

    List the existing ingress:

    +
    $ helm list -n <domain_namespace>
    +

    For example:

    +
    $ helm list -n oigns
    +

    The output will look similar to the following:

    +
    NAME                        NAMESPACE       REVISION        UPDATED    STATUS      CHART                           APP VERSION
    +governancedomain-nginx      oigns           1               <DATE>     deployed    ingress-per-domain-0.1.0        1.0
    +
  2. +
  3. +

    Make sure you have downloaded the latest code as per Download the latest code repository.

    +
  4. +
  5. +

    Edit the $WORKDIR/kubernetes/charts/ingress-per-domain/values.yaml and change the domainUID parameter to match your domainUID, for example domainUID: governancedomain. Change sslType to NONSSL or SSL depending on your existing configuration. For example:

    +
    # Load balancer type. Supported values are: NGINX
    +type: NGINX
    +
    +# SSL configuration Type. Supported Values are : NONSSL,SSL
    +sslType: SSL
    +
    +# domainType. Supported values are: oim
    +domainType: oim
    +
    +#WLS domain as backend to the load balancer
    +wlsDomain:
    +  domainUID: governancedomain
    +  adminServerName: AdminServer
    +  adminServerPort: 7001
    +  adminServerSSLPort:
    +  soaClusterName: soa_cluster
    +  soaManagedServerPort: 8001
    +  soaManagedServerSSLPort:
    +  oimClusterName: oim_cluster
    +  oimManagedServerPort: 14000
    +  oimManagedServerSSLPort:
    +
    +
    +# Host  specific values
    +hostName:
    +  enabled: false
    +  admin:
    +  runtime:
    +  internal:
    +
    +# Ngnix specific values
    +nginx:
    +  nginxTimeOut: 180
    +
  6. +
  7. +

    Upgrade the governancedomain-nginx with the following command:

    +
    $ cd $WORKDIR
    +$ helm upgrade <ingress> kubernetes/charts/ingress-per-domain/ --namespace <domain_namespace> --values kubernetes/charts/ingress-per-domain/values.yaml --reuse-values
    +

    For example:

    +
    $ cd $WORKDIR
    +$ helm upgrade governancedomain-nginx kubernetes/charts/ingress-per-domain/ --namespace oigns --values kubernetes/charts/ingress-per-domain/values.yaml --reuse-values
    +

    The output will look similar to the following:

    +
    Release "governancedomain-nginx" has been upgraded. Happy Helming!
    +NAME: governancedomain-nginx
    +LAST DEPLOYED: <DATE>
    +NAMESPACE: oigns
    +STATUS: deployed
    +REVISION: 2
    +TEST SUITE: None
    +
  8. +
  9. +

    List the ingress:

    +
    $ kubectl get ing -n oigns
    +

    The output will look similar to the following:

    +
    NAME                       CLASS    HOSTS   ADDRESS        PORTS   AGE
    +governancedomain-nginx     <none>   *       10.107.182.40  80      18s
    +
  10. +
  11. +

    Describe the ingress and make sure all the listed paths are accessible:

    +
    $ kubectl describe ing governancedomain-nginx -n oigns
    +

    The output will look similar to the following:

    +
    Name:             governancedomain-nginx
    +Namespace:        oigns
    +Address:          10.107.182.40
    +Default backend:  default-http-backend:80 (<error: endpoints "default-http-backend" not found>)
    +Rules:
    +  Host        Path  Backends
    +  ----        ----  --------
    +  *
    +              /console                        governancedomain-adminserver:7001 (10.244.4.240:7001)
    +              /consolehelp                    governancedomain-adminserver:7001 (10.244.4.240:7001)
    +              /em                             governancedomain-adminserver:7001 (10.244.4.240:7001)
    +              /ws_utc                         governancedomain-cluster-soa-cluster:8001 (10.244.4.242:8001)
    +              /soa                            governancedomain-cluster-soa-cluster:8001 (10.244.4.242:8001)
    +              /integration                    governancedomain-cluster-soa-cluster:8001 (10.244.4.242:8001)
    +              /soa-infra                      governancedomain-cluster-soa-cluster:8001 (10.244.4.242:8001)
    +              /identity                       governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000)
    +              /admin                          governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000)
    +              /oim                            governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000)
    +              /sysadmin                       governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000)
    +              /workflowservice                governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000)
    +              /callbackResponseService        governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000)
    +              /spml-xsd                       governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000)
    +              /HTTPClnt                       governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000)
    +              /reqsvc                         governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000)
    +              /iam                            governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000)
    +              /provisioning-callback          governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000)
    +              /CertificationCallbackService   governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000)
    +              /ucs                            governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000)
    +              /FacadeWebApp                   governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000)
    +              /OIGUI                          governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000)
    +              /weblogic                       governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000)
    +Annotations:  kubernetes.io/ingress.class: nginx
    +              meta.helm.sh/release-name: governancedomain-nginx
    +              meta.helm.sh/release-namespace: oigns
    +              nginx.ingress.kubernetes.io/affinity: cookie
    +              nginx.ingress.kubernetes.io/affinity-mode: persistent
    +              nginx.ingress.kubernetes.io/configuration-snippet:
    +                more_clear_input_headers "WL-Proxy-Client-IP" "WL-Proxy-SSL";
    +                more_set_input_headers "X-Forwarded-Proto: https";
    +                more_set_input_headers "WL-Proxy-SSL: true";
    +              nginx.ingress.kubernetes.io/enable-access-log: false
    +              nginx.ingress.kubernetes.io/ingress.allow-http: false
    +              nginx.ingress.kubernetes.io/proxy-buffer-size: 2000k
    +              nginx.ingress.kubernetes.io/proxy-read-timeout: 180
    +              nginx.ingress.kubernetes.io/proxy-send-timeout: 180
    +              nginx.ingress.kubernetes.io/session-cookie-name: sticky
    +Events:
    +  Type    Reason  Age                From                      Message
    +  ----    ------  ----               ----                      -------
    +  Normal  Sync    51m (x3 over 54m)  nginx-ingress-controller  Scheduled for sync
    +
  12. +
+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oig/patch-and-upgrade/upgrade-an-operator-release/index.html b/docs/23.4.1/idm-products/oig/patch-and-upgrade/upgrade-an-operator-release/index.html new file mode 100644 index 000000000..17cfd3559 --- /dev/null +++ b/docs/23.4.1/idm-products/oig/patch-and-upgrade/upgrade-an-operator-release/index.html @@ -0,0 +1,4002 @@ + + + + + + + + + + + + a. Upgrade an operator release :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + a. Upgrade an operator release +

+ + + + + + +

These instructions apply to upgrading operators from 3.X.X to 4.X, or from within the 4.x release family as additional versions are released.

+
    +
  1. +

    On the master node, download the new WebLogic Kubernetes Operator source code from the operator github project:

    +
    $ mkdir <workdir>/weblogic-kubernetes-operator-4.X.X
    +$ cd <workdir>/weblogic-kubernetes-operator-4.X.X
    +$ git clone https://github.com/oracle/weblogic-kubernetes-operator.git --branch v4.X.X 
    +

    For example:

    +
    $ mkdir /scratch/OIGK8S/weblogic-kubernetes-operator-4.X.X
    +$ cd /scratch/OIGK8S/weblogic-kubernetes-operator-4.X.X
    +$ git clone https://github.com/oracle/weblogic-kubernetes-operator.git --branch v4.X.X  
    +

    This will create the directory <workdir>/weblogic-kubernetes-operator-4.X.X/weblogic-kubernetes-operator

    +
  2. +
  3. +

    Run the following helm command to upgrade the operator:

    +
    $ cd <workdir>/weblogic-kubernetes-operator-4.X.X/weblogic-kubernetes-operator
    +$ helm upgrade --reuse-values --set image=ghcr.io/oracle/weblogic-kubernetes-operator:4.X.X --namespace <sample-kubernetes-operator-ns> --wait weblogic-kubernetes-operator kubernetes/charts/weblogic-operator
    +

    For example:

    +
    $ cd /scratch/OIGK8S/weblogic-kubernetes-operator-4.X.X/weblogic-kubernetes-operator
    +$ helm upgrade --reuse-values --set image=ghcr.io/oracle/weblogic-kubernetes-operator:4.X.X --namespace operator --wait weblogic-kubernetes-operator kubernetes/charts/weblogic-operator
    +

    The output will look similar to the following:

    +
    Release "weblogic-kubernetes-operator" has been upgraded. Happy Helming!
    +NAME: weblogic-kubernetes-operator
    +LAST DEPLOYED: <DATE>
    +NAMESPACE: operator
    +STATUS: deployed
    +REVISION: 2
    +TEST SUITE: None
    +
  4. +
  5. +

    Verify that the operator’s pod and services are running by executing the following command:

    +
    $ kubectl get all -n <sample-kubernetes-operator-ns>
    +

    For example:

    +
    $ kubectl get all -n opns
    +

    The output will look similar to the following:

    +
    NAME                                             READY   STATUS    RESTARTS   AGE
    +pod/weblogic-operator-b7d6df78c-mfrc4            1/1     Running   0          40s
    +pod/weblogic-operator-webhook-7996b8b58b-frtwp   1/1     Running   0          42s
    +
    +NAME                                     TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)             AGE
    +service/weblogic-operator-webhook-svc    ClusterIP   10.106.51.57   <none>        8083/TCP,8084/TCP   42s
    +
    +NAME                                        READY   UP-TO-DATE   AVAILABLE   AGE
    +deployment.apps/weblogic-operator           1/1     1            1           6d
    +deployment.apps/weblogic-operator-webhook   1/1     1            1           42s
    +
    +NAME                                                   DESIRED   CURRENT   READY   AGE
    +replicaset.apps/weblogic-operator-5884685f4f           0         0         0       6d
    +replicaset.apps/weblogic-operator-b7d6df78c            1         1         1       40s
    +replicaset.apps/weblogic-operator-webhook-7996b8b58b   1         1         1       42s
    +

    Note: When you upgrade a 3.x WebLogic Kubernetes Operator to 4.x, the upgrade process creates a WebLogic Domain resource conversion webhook deployment, and associated resources in the same namespace. The webhook automatically and transparently upgrades the existing WebLogic Domains from the 3.x schema to the 4.x schema. For more information, see Domain Upgrade in the WebLogic Kubernetes Operator documentation.

    +

    Note: In WebLogic Kubernetes Operator 4.X, changes are made to serverStartPolicy that affect starting/stopping of the domain. Refer to the serverStartPolicy entry in the create-domain-inputs.yaml for more information. Also see Domain Life Cycle.

    +
  6. +
+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oig/patch-and-upgrade/upgrade-elk/index.html b/docs/23.4.1/idm-products/oig/patch-and-upgrade/upgrade-elk/index.html new file mode 100644 index 000000000..c905e4558 --- /dev/null +++ b/docs/23.4.1/idm-products/oig/patch-and-upgrade/upgrade-elk/index.html @@ -0,0 +1,3984 @@ + + + + + + + + + + + + d. Upgrade Elasticsearch and Kibana :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + d. Upgrade Elasticsearch and Kibana +

+ + + + + + +

This section shows how to upgrade Elasticsearch and Kibana.

+

To determine if this step is required for the version you are upgrading to, refer to the Release Notes.

+

Download the latest code repository

+
    +
  1. Make sure you have downloaded the latest code as per Download the latest code repository.
  2. +
+

Undeploy Elasticsearch and Kibana

+

From October 22 (22.4.1) onwards, OIG logs should be stored on a centralized Elasticsearch and Kibana stack.

+

Deployments prior to October 22 (22.4.1) used local deployments of Elasticsearch and Kibana.

+

If you are upgrading from July 22 (22.3.1) or earlier, to October 22 (22.4.1) or later, you must first undeploy Elasticsearch and Kibana using the steps below:

+
    +
  1. +

    Edit the $WORKDIR/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml and change all instances of namespace to correspond to your deployment.

    +
  2. +
  3. +

    Delete the Elasticsearch and Kibana resources using the following command:

    +
    $ kubectl delete -f $WORKDIR/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml
    +
  4. +
+

Deploy Elasticsearch and Kibana in centralized stack

+
    +
  1. Follow Install Elasticsearch stack and Kibana to deploy Elasticsearch and Kibana in a centralized stack.
  2. +
+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oig/post-install-config/index.html b/docs/23.4.1/idm-products/oig/post-install-config/index.html new file mode 100644 index 000000000..44ad2b9c3 --- /dev/null +++ b/docs/23.4.1/idm-products/oig/post-install-config/index.html @@ -0,0 +1,4012 @@ + + + + + + + + + + + + Post install configuration :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + Post install configuration +

+ + + + + + + +

Follow these post install configuration steps.

+ + + + + + + + + + + + + + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oig/post-install-config/index.xml b/docs/23.4.1/idm-products/oig/post-install-config/index.xml new file mode 100644 index 000000000..f1523cb09 --- /dev/null +++ b/docs/23.4.1/idm-products/oig/post-install-config/index.xml @@ -0,0 +1,39 @@ + + + + Post install configuration on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oig/post-install-config/ + Recent content in Post install configuration on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + a. Post Install Tasks + /fmw-kubernetes/23.4.1/idm-products/oig/post-install-config/set_oimfronendurl_using_mbeans/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oig/post-install-config/set_oimfronendurl_using_mbeans/ + Follow these post install configuration steps. + Create a Server Overrides File Set OIMFrontendURL using MBeans Create a Server Overrides File Navigate to the following directory: +cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv/output/weblogic-domains/governancedomain Create a setUserOverrides.sh with the following contents: +DERBY_FLAG=false JAVA_OPTIONS=&quot;${JAVA_OPTIONS} -Djava.net.preferIPv4Stack=true&quot; MEM_ARGS=&quot;-Xms8192m -Xmx8192m&quot; Copy the setUserOverrides.sh file to the Administration Server pod: +$ chmod 755 setUserOverrides.sh $ kubectl cp setUserOverrides.sh oigns/governancedomain-adminserver:/u01/oracle/user_projects/domains/governancedomain/bin/setUserOverrides.sh Where oigns is the OIG namespace and governancedomain is the domain_UID. + + + + b. Install and configure connectors + /fmw-kubernetes/23.4.1/idm-products/oig/post-install-config/install_and_configure_connectors/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oig/post-install-config/install_and_configure_connectors/ + Download the connector Download the Connector you are interested in from Oracle Identity Manager Connector Downloads. + Copy the connector zip file to a staging directory on the master node e.g. &lt;workdir&gt;/stage and unzip it: +$ cp $HOME/Downloads/&lt;connector&gt;.zip &lt;workdir&gt;/&lt;stage&gt;/ $ cd &lt;workdir&gt;/&lt;stage&gt; $ unzip &lt;connector&gt;.zip $ chmod -R 755 * For example: +$ cp $HOME/Downloads/Exchange-12.2.1.3.0.zip /scratch/OIGK8S/stage/ $ cd /scratch/OIGK8S/stage/ $ unzip exchange-12.2.1.3.0.zip $ chmod -R 755 * Copy OIG connectors There are two options to copy OIG Connectors to your Kubernetes cluster: + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oig/post-install-config/install_and_configure_connectors/index.html b/docs/23.4.1/idm-products/oig/post-install-config/install_and_configure_connectors/index.html new file mode 100644 index 000000000..cc2f44328 --- /dev/null +++ b/docs/23.4.1/idm-products/oig/post-install-config/install_and_configure_connectors/index.html @@ -0,0 +1,4007 @@ + + + + + + + + + + + + b. Install and configure connectors :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + b. Install and configure connectors +

+ + + + + + +

Download the connector

+
    +
  1. +

    Download the Connector you are interested in from Oracle Identity Manager Connector Downloads.

    +
  2. +
  3. +

    Copy the connector zip file to a staging directory on the master node e.g. <workdir>/stage and unzip it:

    +
    $ cp $HOME/Downloads/<connector>.zip <workdir>/<stage>/
    +$ cd <workdir>/<stage>
    +$ unzip <connector>.zip
    +$ chmod -R 755 *
    +

    For example:

    +
    $ cp $HOME/Downloads/Exchange-12.2.1.3.0.zip /scratch/OIGK8S/stage/
    +$ cd /scratch/OIGK8S/stage/
    +$ unzip exchange-12.2.1.3.0.zip
    +$ chmod -R 755 *
    +
  4. +
+

Copy OIG connectors

+

There are two options to copy OIG Connectors to your Kubernetes cluster:

+
    +
  • a) Copy the connector directly to the Persistent Volume
  • +
  • b) Use the kubectl cp command to copy the connector to the Persistent Volume
  • +
+

It is recommended to use option a), however there may be cases, for example when using a Managed Service such as Oracle Kubernetes Engine on Oracle Cloud Infrastructure, where it may not be feasible to directly mount the domain directory. In such cases option b) should be used.

+

a) Copy the connector directly to the persistent volume

+
    +
  1. +

    Copy the connector zip file to the persistent volume. For example:

    +
    $ cp -R <path_to>/<connector> <persistent_volume>/governancedomainpv/ConnectorDefaultDirectory/
    +

    For example:

    +
    $ cp -R /scratch/OIGK8S/stage/Exchange-12.2.1.3.0 /scratch/shared/governancedomainpv/ConnectorDefaultDirectory/
    +
  2. +
+

b) Use the kubectl cp command to copy the connector to the persistent volume

+
    +
  1. +

    Run the following command to copy over the connector:

    +
    $ kubectl -n <domain_namespace> cp <path_to>/<connector> <cluster_name>:/u01/oracle/idm/server/ConnectorDefaultDirectory/
    +

    For example:

    +
    $ kubectl -n oigns cp /scratch/OIGK8S/stage/Exchange-12.2.1.3.0 governancedomain-oim-server1:/u01/oracle/idm/server/ConnectorDefaultDirectory/
    +
  2. +
+

Install the connector

+

The connectors are installed as they are on a standard on-premises setup, via Application On Boarding or via Connector Installer.

+

Refer to your Connector specific documentation for instructions.

+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oig/post-install-config/set_oimfronendurl_using_mbeans/index.html b/docs/23.4.1/idm-products/oig/post-install-config/set_oimfronendurl_using_mbeans/index.html new file mode 100644 index 000000000..dd6f3410d --- /dev/null +++ b/docs/23.4.1/idm-products/oig/post-install-config/set_oimfronendurl_using_mbeans/index.html @@ -0,0 +1,4052 @@ + + + + + + + + + + + + a. Post Install Tasks :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + a. Post Install Tasks +

+ + + + + + +

Follow these post install configuration steps.

+
    +
  1. Create a Server Overrides File
  2. +
  3. Set OIMFrontendURL using MBeans
  4. +
+

Create a Server Overrides File

+
    +
  1. +

    Navigate to the following directory:

    +
    cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv/output/weblogic-domains/governancedomain
    +
  2. +
  3. +

    Create a setUserOverrides.sh with the following contents:

    +
    DERBY_FLAG=false
    +JAVA_OPTIONS="${JAVA_OPTIONS} -Djava.net.preferIPv4Stack=true"
    +MEM_ARGS="-Xms8192m -Xmx8192m"
    +
  4. +
  5. +

    Copy the setUserOverrides.sh file to the Administration Server pod:

    +
    $ chmod 755 setUserOverrides.sh
    +$ kubectl cp setUserOverrides.sh oigns/governancedomain-adminserver:/u01/oracle/user_projects/domains/governancedomain/bin/setUserOverrides.sh
    +

    Where oigns is the OIG namespace and governancedomain is the domain_UID.

    +
  6. +
  7. +

    Stop the OIG domain using the following command:

    +
    $ kubectl -n <domain_namespace> patch domains <domain_uid> --type='json' -p='[{"op": "replace", "path": "/spec/serverStartPolicy", "value": "Never" }]'
    +

    For example:

    +
    $ kubectl -n oigns patch domains governancedomain --type='json' -p='[{"op": "replace", "path": "/spec/serverStartPolicy", "value": "Never" }]'
    +

    The output will look similar to the following:

    +
    domain.weblogic.oracle/governancedomain patched
    +
  8. +
  9. +

    Check that all the pods are stopped:

    +
    $ kubectl get pods -n <domain_namespace>
    +

    For example:

    +
    $ kubectl get pods -n oigns
    +

    The output will look similar to the following:

    +
    NAME                                                 READY    STATUS        RESTARTS   AGE
    +governancedomain-adminserver                         1/1     Terminating    0          18h
    +governancedomain-create-fmw-infra-domain-job-8cww8   0/1     Completed      0          24h
    +governancedomain-oim-server1                         1/1     Terminating    0          18h
    +governancedomain-soa-server1                         1/1     Terminating    0          18h
    +helper                                               1/1     Running        0          41h
    +

    The Administration Server pods and Managed Server pods will move to a STATUS of Terminating. After a few minutes, run the command again and the pods should have disappeared:

    +
    NAME                                                 READY   STATUS      RESTARTS   AGE
    +governancedomain-create-fmw-infra-domain-job-8cww8   0/1     Completed   0          24h
    +helper                                               1/1     Running     0          41h
    +
  10. +
  11. +

    Start the domain using the following command:

    +
    $ kubectl -n <domain_namespace> patch domains <domain_uid> --type='json' -p='[{"op": "replace", "path": "/spec/serverStartPolicy", "value": "IfNeeded" }]'
    +

    For example:

    +
    $ kubectl -n oigns patch domains governancedomain --type='json' -p='[{"op": "replace", "path": "/spec/serverStartPolicy", "value": "IfNeeded" }]'
    +

    Run the following kubectl command to view the pods:

    +
    $ kubectl get pods -n <domain_namespace>
    +

    For example:

    +
    $ kubectl get pods -n oigns
    +

    The output will look similar to the following:

    +
    NAME                                                 READY   STATUS      RESTARTS   AGE
    +governancedomain-create-fmw -infra-domain-job-vj69h  0/1     Completed   0          24h
    +governancedomain-introspect-domain-job-7qx29         1/1     Running     0          8s
    +helper                                               1/1     Running     0          41h
    +

    The Administration Server pod will start followed by the OIG Managed Servers pods. This process will take several minutes, so keep executing the command until all the pods are running with READY status 1/1:

    +
    NAME                                                READY   STATUS      RESTARTS   AGE  
    +governancedomain-adminserver                        1/1     Running     0          6m4s
    +governancedomain-create-fmw-infra-domain-job-vj69h  0/1     Completed   0          24h
    +governancedomain-oim-server1                        1/1     Running     0          3m5s
    +governancedomain-soa-server1                        1/1     Running     0          3m5s
    +helper                                              1/1     Running     0          41h
    +
  12. +
+

Set OIMFrontendURL using MBeans

+
    +
  1. +

    Login to Oracle Enterprise Manager using the following URL:

    +

    https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/em

    +
  2. +
  3. +

    Click the Target Navigation icon in the top left of the screen and navigate to the following:

    +
      +
    • Expand Identity and Access > Access > OIM > oim
    • +
    • Right click the instance oim and select System MBean Browser
    • +
    • Under Application Defined MBeans, navigate to oracle.iam, Server:oim_server1 > Application:oim > XMLConfig > Config > XMLConfig.DiscoveryConfig > Discovery.
    • +
    +
  4. +
  5. +

    Enter a new value for the OimFrontEndURL attribute, in the format:

    +
      +
    • If using an External LoadBalancer for your ingress: https://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-PORT}
    • +
    • If using NodePort for your ingress: https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}
    • +
    +

    If using HTTP instead of HTTPS for your ingress, change the URL appropriately.

    +

    Then click Apply.

    +
  6. +
+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oig/prepare-your-environment/index.html b/docs/23.4.1/idm-products/oig/prepare-your-environment/index.html new file mode 100644 index 000000000..5c65555c0 --- /dev/null +++ b/docs/23.4.1/idm-products/oig/prepare-your-environment/index.html @@ -0,0 +1,4692 @@ + + + + + + + + + + + + Prepare your environment :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+ +
+ +
+ +
+ +

+ + Prepare your environment +

+ + + + + + + +

To prepare for Oracle Identity Governance deployment in a Kubernetes environment, complete the following steps:

+
    +
  1. +

    Check the Kubernetes cluster is ready

    +
  2. +
  3. +

    Obtain the OIG container image

    +
  4. +
  5. +

    Setup the code repository to deploy OIG domains

    +
  6. +
  7. +

    Install the WebLogic Kubernetes Operator

    +
  8. +
  9. +

    Create a namespace for Oracle Identity Governance

    +
  10. +
  11. +

    Create a Kubernetes secret for the container registry

    +
  12. +
  13. +

    RCU schema creation

    +
  14. +
  15. +

    Preparing the environment for domain creation

    +

    a. Creating Kubernetes secrets for the domain and RCU

    +

    b. Create a Kubernetes persistent volume and persistent volume claim

    +
  16. +
+

Check the Kubernetes cluster is ready

+

As per the Prerequisites a Kubernetes cluster should have already been configured.

+
    +
  1. +

    Run the following command on the master node to check the cluster and worker nodes are running:

    +
    $ kubectl get nodes,pods -n kube-system
    +

    The output will look similar to the following:

    +
    NAME                  STATUS   ROLES    AGE   VERSION
    +node/worker-node1     Ready    <none>   17h   v1.26.6+1.el8
    +node/worker-node2     Ready    <none>   17h   v1.26.6+1.el8
    +node/master-node      Ready    master   23h   v1.26.6+1.el8
    +
    +NAME                                     READY   STATUS    RESTARTS   AGE
    +pod/coredns-66bff467f8-fnhbq             1/1     Running   0          23h
    +pod/coredns-66bff467f8-xtc8k             1/1     Running   0          23h
    +pod/etcd-master                          1/1     Running   0          21h
    +pod/kube-apiserver-master-node           1/1     Running   0          21h
    +pod/kube-controller-manager-master-node  1/1     Running   0          21h
    +pod/kube-flannel-ds-amd64-lxsfw          1/1     Running   0          17h
    +pod/kube-flannel-ds-amd64-pqrqr          1/1     Running   0          17h
    +pod/kube-flannel-ds-amd64-wj5nh          1/1     Running   0          17h
    +pod/kube-proxy-2kxv2                     1/1     Running   0          17h
    +pod/kube-proxy-82vvj                     1/1     Running   0          17h
    +pod/kube-proxy-nrgw9                     1/1     Running   0          23h
    +pod/kube-scheduler-master                1/1     Running   0          21h
    +
  2. +
+

Obtain the OIG container image

+

The OIG Kubernetes deployment requires access to an OIG container image. The image can be obtained in the following ways:

+
    +
  • Prebuilt OIG container image
  • +
  • Build your own OIG container image using WebLogic Image Tool
  • +
+

Prebuilt OIG container image

+

The latest prebuilt OIG October 2023 container image can be downloaded from Oracle Container Registry. This image is prebuilt by Oracle and includes Oracle Identity Governance 12.2.1.4.0, the October Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program..

+

Note: Before using this image you must login to Oracle Container Registry, navigate to Middleware > oig_cpu and accept the license agreement.

+

You can use this image in the following ways:

+
    +
  • Pull the container image from the Oracle Container Registry automatically during the OIG Kubernetes deployment.
  • +
  • Manually pull the container image from the Oracle Container Registry and then upload it to your own container registry.
  • +
  • Manually pull the container image from the Oracle Container Registry and manually stage it on the master node and each worker node.
  • +
+

Build your own OIG container image using WebLogic Image Tool

+

You can build your own OIG container image using the WebLogic Image Tool. This is recommended if you need to apply one off patches to a Prebuilt OIG container image. For more information about building your own container image with WebLogic Image Tool, see Create or update image.

+

You can use an image built with WebLogic Image Tool in the following ways:

+
    +
  • Manually upload them to your own container registry.
  • +
  • Manually stage them on the master node and each worker node.
  • +
+

Note: This documentation does not tell you how to pull or push the above images into a private container registry, or stage them on the master and worker nodes. Details of this can be found in the Enterprise Deployment Guide.

+

Setup the code repository to deploy OIG domains

+

Oracle Identity Governance domain deployment on Kubernetes leverages the WebLogic Kubernetes Operator infrastructure. For deploying the OIG domains, you need to set up the deployment scripts on the master node as below:

+
    +
  1. +

    Create a working directory to setup the source code.

    +
    $ mkdir <workdir>
    +

    For example:

    +
    $ mkdir /scratch/OIGK8S
    +
  2. +
  3. +

    Download the latest OIG deployment scripts from the OIG repository.

    +
    $ cd <workdir>
    +$ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1
    +

    For example:

    +
    $ cd /scratch/OIGK8S
    +$ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1
    +
  4. +
  5. +

    Set the $WORKDIR environment variable as follows:

    +
    $ export WORKDIR=<workdir>/fmw-kubernetes/OracleIdentityGovernance
    +

    For example:

    +
    $ export WORKDIR=/scratch/OIGK8S/fmw-kubernetes/OracleIdentityGovernance
    +
  6. +
  7. +

    Run the following command and see if the WebLogic custom resource definition name already exists:

    +
    $ kubectl get crd
    +

    In the output you should see:

    +
    No resources found in default namespace.
    +

    If you see any of the following:

    +
    NAME                      AGE
    +clusters.weblogic.oracle  5d
    +domains.weblogic.oracle   5d
    +

    then run the following command to delete the existing crd’s:

    +
    $ kubectl delete crd clusters.weblogic.oracle
    +$ kubectl delete crd domains.weblogic.oracle
    +
  8. +
+

Install the WebLogic Kubernetes Operator

+
    +
  1. +

    On the master node run the following command to create a namespace for the operator:

    +
    $ kubectl create namespace <sample-kubernetes-operator-ns>
    +

    For example:

    +
    $ kubectl create namespace opns
    +

    The output will look similar to the following:

    +
    namespace/opns created
    +
  2. +
  3. +

    Create a service account for the operator in the operator’s namespace by running the following command:

    +
    $ kubectl create serviceaccount -n <sample-kubernetes-operator-ns> <sample-kubernetes-operator-sa>
    +

    For example:

    +
    $ kubectl create serviceaccount -n opns op-sa
    +

    The output will look similar to the following:

    +
    serviceaccount/op-sa created
    +
  4. +
  5. +

    Run the following helm command to install and start the operator:

    +
    $ cd $WORKDIR
    +$ helm install weblogic-kubernetes-operator kubernetes/charts/weblogic-operator \
    +--namespace <sample-kubernetes-operator-ns> \
    +--set image=ghcr.io/oracle/weblogic-kubernetes-operator:4.1.2 \
    +--set serviceAccount=<sample-kubernetes-operator-sa> \
    +--set “enableClusterRoleBinding=true” \
    +--set "domainNamespaceSelectionStrategy=LabelSelector" \
    +--set "domainNamespaceLabelSelector=weblogic-operator\=enabled" \
    +--set "javaLoggingLevel=FINE" --wait
    +

    For example:

    +
    $ cd $WORKDIR
    +$ helm install weblogic-kubernetes-operator kubernetes/charts/weblogic-operator \
    +--namespace opns \
    +--set image=ghcr.io/oracle/weblogic-kubernetes-operator:4.1.2 \
    +--set serviceAccount=op-sa \
    +--set "enableClusterRoleBinding=true" \
    +--set "domainNamespaceSelectionStrategy=LabelSelector" \
    +--set "domainNamespaceLabelSelector=weblogic-operator\=enabled" \
    +--set "javaLoggingLevel=FINE" --wait
    +

    The output will look similar to the following:

    +
    NAME: weblogic-kubernetes-operator
    +LAST DEPLOYED: <DATE>
    +NAMESPACE: opns
    +STATUS: deployed
    +REVISION: 1
    +TEST SUITE: None
    +
  6. +
  7. +

    Verify that the operator’s pod and services are running by executing the following command:

    +
    $ kubectl get all -n <sample-kubernetes-operator-ns>
    +

    For example:

    +
    $ kubectl get all -n opns
    +

    The output will look similar to the following:

    +
    NAME                                             READY   STATUS    RESTARTS   AGE
    +pod/weblogic-operator-b7d6df78c-vxnpt            1/1     Running   0          33s
    +pod/weblogic-operator-webhook-7996b8b58b-68l8s   1/1     Running   0          33s
    +
    +NAME                                     TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)             AGE
    +service/weblogic-operator-webhook-svc    ClusterIP   10.109.163.130   <none>        8083/TCP,8084/TCP   34s
    +
    +NAME                                        READY   UP-TO-DATE   AVAILABLE   AGE
    +deployment.apps/weblogic-operator           1/1     1            1           33s
    +deployment.apps/weblogic-operator-webhook   1/1     1            1           33s
    +
    +NAME                                                   DESIRED   CURRENT   READY   AGE
    +replicaset.apps/weblogic-operator-b7d6df78c            1         1         1       33s
    +replicaset.apps/weblogic-operator-webhook-7996b8b58b   1         1         1       33s
    +
  8. +
  9. +

    Verify the operator pod’s log:

    +
    $ kubectl logs -n <sample-kubernetes-operator-ns> -c weblogic-operator deployments/weblogic-operator
    +

    For example:

    +
    $ kubectl logs -n opns -c weblogic-operator deployments/weblogic-operator
    +

    The output will look similar to the following:

    +
    {"timestamp":"<DATE>","thread":37,"fiber":"","namespace":"","domainUID":"","level":"FINE","class":"oracle.kubernetes.operator.DeploymentLiveness","method":"run","timeInMillis":1678902295852,"message":"Liveness file last modified time set","exception":"","code":"","headers":{},"body":""}
    +{"timestamp":"<DATE>","thread":42,"fiber":"","namespace":"","domainUID":"","level":"FINE","class":"oracle.kubernetes.operator.DeploymentLiveness","method":"run","timeInMillis":1678902300853,"message":"Liveness file last modified time set","exception":"","code":"","headers":{},"body":""}
    +{"timestamp":"<DATE>","thread":21,"fiber":"","namespace":"","domainUID":"","level":"FINE","class":"oracle.kubernetes.operator.DeploymentLiveness","method":"run","timeInMillis":1678902305854,"message":"Liveness file last modified time set","exception":"","code":"","headers":{},"body":""}
    +
  10. +
+

Create a namespace for Oracle Identity Governance

+
    +
  1. +

    Run the following command to create a namespace for the domain:

    +
    $ kubectl create namespace <domain_namespace>
    +

    For example:

    +
    $ kubectl create namespace oigns
    +

    The output will look similar to the following:

    +
    namespace/oigns created
    +
  2. +
  3. +

    Run the following command to tag the namespace so the WebLogic Kubernetes Operator can manage it:

    +
    $ kubectl label namespaces <domain_namespace> weblogic-operator=enabled
    +

    For example:

    +
    $ kubectl label namespaces oigns weblogic-operator=enabled
    +

    The output will look similar to the following:

    +
    namespace/oigns labeled
    +
  4. +
  5. +

    Run the following command to check the label was created:

    +
    $ kubectl describe namespace <domain_namespace>
    +

    For example:

    +
    $ kubectl describe namespace oigns
    +

    The output will look similar to the following:

    +
    Name:         oigns
    +Labels:       kubernetes.io/metadata.name=oigns
    +              weblogic-operator=enabled
    +Annotations:  <none>
    +Status:       Active
    +   
    +No resource quota.
    +
    +No LimitRange resource.
    +
  6. +
+

Create a Kubernetes secret for the container registry

+

In this section you create a secret that stores the credentials for the container registry where the OIG image is stored.

+

If you are not using a container registry and have loaded the images on each of the master and worker nodes, then there is no need to create the registry secret.

+
    +
  1. +

    Run the following command to create the secret:

    +
    kubectl create secret docker-registry "orclcred" --docker-server=<CONTAINER_REGISTRY> \
    +--docker-username="<USER_NAME>" \
    +--docker-password=<PASSWORD> --docker-email=<EMAIL_ID> \
    +--namespace=<domain_namespace>
    +

    For example, if using Oracle Container Registry:

    +
    kubectl create secret docker-registry "orclcred" --docker-server=container-registry.oracle.com \
    +--docker-username="user@example.com" \
    +--docker-password=password --docker-email=user@example.com \
    +--namespace=oigns
    +

    Replace <USER_NAME> and <PASSWORD> with the credentials for the registry with the following caveats:

    +
      +
    • +

      If using Oracle Container Registry to pull the OIG container image, this is the username and password used to login to Oracle Container Registry. Before you can use this image you must login to Oracle Container Registry, navigate to Middleware > oig_cpu and accept the license agreement.

      +
    • +
    • +

      If using your own container registry to store the OIG container image, this is the username and password (or token) for your container registry.

      +
    • +
    +

    The output will look similar to the following:

    +
    secret/orclcred created
    +
  2. +
+

RCU schema creation

+

In this section you create the RCU schemas in the Oracle Database.

+

Before following the steps in this section, make sure that the database and listener are up and running and you can connect to the database via SQL*Plus or other client tool.

+
    +
  1. +

    If using Oracle Container Registry or your own container registry for your OIG container image, run the following command to create a helper pod to run RCU:

    +
    $ kubectl run --image=<image_name-from-registry> --image-pull-policy="IfNotPresent" --overrides='{"apiVersion": "v1", "spec":{"imagePullSecrets": [{"name": "orclcred"}]}}' helper -n <domain_namespace> -- sleep infinity
    +

    For example:

    +
    $ kubectl run --image=container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-<October`23> --image-pull-policy="IfNotPresent" --overrides='{"apiVersion": "v1","spec":{"imagePullSecrets": [{"name": "orclcred"}]}}' helper -n oigns -- sleep infinity
    +

    If you are not using a container registry and have loaded the image on each of the master and worker nodes, run the following command:

    +
    $ kubectl run helper --image <image> -n oigns -- sleep infinity
    +

    For example:

    +
    $ kubectl run helper --image oracle/oig:12.2.1.4-jdk8-ol7-<October`23> -n oigns -- sleep infinity
    +

    The output will look similar to the following:

    +
    pod/helper created
    +
  2. +
  3. +

    Run the following command to check the pod is running:

    +
    $ kubectl get pods -n <domain_namespace>
    +

    For example:

    +
    $ kubectl get pods -n oigns
    +

    The output will look similar to the following:

    +
    NAME     READY   STATUS    RESTARTS   AGE
    +helper   1/1     Running   0          3m
    +

    Note: If you are pulling the image from a container registry it may take several minutes before the pod has a STATUS of 1\1. While the pod is starting you can check the status of the pod, by running the following command:

    +
    $ kubectl describe pod helper -n oigns
    +
  4. +
  5. +

    Run the following command to start a bash shell in the helper pod:

    +
    $ kubectl exec -it helper -n <domain_namespace> -- /bin/bash
    +

    For example:

    +
    $ kubectl exec -it helper -n oigns -- /bin/bash
    +

    This will take you into a bash shell in the running helper pod:

    +
    [oracle@helper oracle]$
    +
  6. +
  7. +

    In the helper bash shell run the following commands to set the environment:

    +
    [oracle@helper oracle]$ export DB_HOST=<db_host.domain>
    +[oracle@helper oracle]$ export DB_PORT=<db_port>
    +[oracle@helper oracle]$ export DB_SERVICE=<service_name>
    +[oracle@helper oracle]$ export RCUPREFIX=<rcu_schema_prefix>
    +[oracle@helper oracle]$ export RCU_SCHEMA_PWD=<rcu_schema_pwd>
    +[oracle@helper oracle]$ echo -e <db_pwd>"\n"<rcu_schema_pwd> > /tmp/pwd.txt
    +[oracle@helper oracle]$ cat /tmp/pwd.txt
    +

    where:

    +

    <db_host.domain> is the database server hostname

    +

    <db_port> is the database listener port

    +

    <service_name> is the database service name

    +

    <rcu_schema_prefix> is the RCU schema prefix you want to set

    +

    <rcu_schema_pwd> is the password you want to set for the <rcu_schema_prefix>

    +

    <db_pwd> is the SYS password for the database

    +

    For example:

    +
    [oracle@helper oracle]$ export DB_HOST=mydatabasehost.example.com
    +[oracle@helper oracle]$ export DB_PORT=1521
    +[oracle@helper oracle]$ export DB_SERVICE=orcl.example.com
    +[oracle@helper oracle]$ export RCUPREFIX=OIGK8S
    +[oracle@helper oracle]$ export RCU_SCHEMA_PWD=<password>
    +[oracle@helper oracle]$ echo -e <password>"\n"<password> > /tmp/pwd.txt
    +[oracle@helper oracle]$ cat /tmp/pwd.txt
    +<password>
    +<password>
    +
  8. +
  9. +

    In the helper bash shell run the following commands to create the RCU schemas in the database:

    +
    [oracle@helper oracle]$ /u01/oracle/oracle_common/bin/rcu -silent -createRepository -databaseType ORACLE -connectString \
    +$DB_HOST:$DB_PORT/$DB_SERVICE -dbUser sys -dbRole sysdba -useSamePasswordForAllSchemaUsers true \
    +-selectDependentsForComponents true -schemaPrefix $RCUPREFIX -component OIM -component MDS -component SOAINFRA -component OPSS \
    +-f < /tmp/pwd.txt
    +

    The output will look similar to the following:

    +
    RCU Logfile: /tmp/RCU<DATE>/logs/rcu.log
    +
    +Processing command line ....
    +Repository Creation Utility - Checking Prerequisites
    +Checking Global Prerequisites
    +
    +
    +Repository Creation Utility - Checking Prerequisites
    +Checking Component Prerequisites
    +Repository Creation Utility - Creating Tablespaces
    +Validating and Creating Tablespaces
    +Create tablespaces in the repository database
    +Repository Creation Utility - Create
    +Repository Create in progress.
    +        Percent Complete: 10
    +Executing pre create operations
    +        Percent Complete: 25
    +        Percent Complete: 25
    +        Percent Complete: 26
    +        Percent Complete: 27
    +        Percent Complete: 28
    +        Percent Complete: 28
    +        Percent Complete: 29
    +        Percent Complete: 29
    +Creating Common Infrastructure Services(STB)
    +        Percent Complete: 36
    +        Percent Complete: 36
    +        Percent Complete: 44
    +        Percent Complete: 44
    +        Percent Complete: 44
    +Creating Audit Services Append(IAU_APPEND)
    +        Percent Complete: 51
    +        Percent Complete: 51
    +        Percent Complete: 59
    +        Percent Complete: 59
    +        Percent Complete: 59
    +Creating Audit Services Viewer(IAU_VIEWER)
    +        Percent Complete: 66
    +        Percent Complete: 66
    +        Percent Complete: 67
    +        Percent Complete: 67
    +        Percent Complete: 68
    +        Percent Complete: 68
    +Creating Metadata Services(MDS)
    +        Percent Complete: 76
    +        Percent Complete: 76
    +        Percent Complete: 76
    +        Percent Complete: 77
    +        Percent Complete: 77
    +        Percent Complete: 78
    +        Percent Complete: 78
    +        Percent Complete: 78
    +Creating Weblogic Services(WLS)
    +        Percent Complete: 82
    +        Percent Complete: 82
    +        Percent Complete: 83
    +        Percent Complete: 84
    +        Percent Complete: 86
    +        Percent Complete: 88
    +        Percent Complete: 88
    +        Percent Complete: 88
    +Creating User Messaging Service(UCSUMS)
    +        Percent Complete: 92
    +        Percent Complete: 92
    +        Percent Complete: 95
    +        Percent Complete: 95
    +        Percent Complete: 100
    +Creating Audit Services(IAU)
    +Creating Oracle Platform Security Services(OPSS)
    +Creating SOA Infrastructure(SOAINFRA)
    +Creating Oracle Identity Manager(OIM)
    +Executing post create operations
    +
    +Repository Creation Utility: Create - Completion Summary
    +
    +Database details:
    +-----------------------------
    +Host Name                                    : mydatabasehost.example.com
    +Port                                         : 1521
    +Service Name                                 : ORCL.EXAMPLE.COM
    +Connected As                                 : sys
    +Prefix for (prefixable) Schema Owners        : OIGK8S
    +RCU Logfile                                  : /tmp/RCU<DATE>/logs/rcu.log
    +
    +Component schemas created:
    +-----------------------------
    +Component                                    Status         Logfile
    +
    +Common Infrastructure Services               Success        /tmp/RCU<DATE>/logs/stb.log
    +Oracle Platform Security Services            Success        /tmp/RCU<DATE>/logs/opss.log
    +SOA Infrastructure                           Success        /tmp/RCU<DATE>/logs/soainfra.log
    +Oracle Identity Manager                      Success        /tmp/RCU<DATE>/logs/oim.log
    +User Messaging Service                       Success        /tmp/RCU<DATE>/logs/ucsums.log
    +Audit Services                               Success        /tmp/RCU<DATE>/logs/iau.log
    +Audit Services Append                        Success        /tmp/RCU<DATE>/logs/iau_append.log
    +Audit Services Viewer                        Success        /tmp/RCU<DATE>/logs/iau_viewer.log
    +Metadata Services                            Success        /tmp/RCU<DATE>/logs/mds.log
    +WebLogic Services                            Success        /tmp/RCU<DATE>/logs/wls.log
    +
    +Repository Creation Utility - Create : Operation Completed
    +[oracle@helper oracle]$
    +
  10. +
  11. +

    Run the following command to patch schemas in the database:

    + +

    This command should be run if you are using an OIG image that contains OIG bundle patches. If using an OIG image without OIG bundle patches, then you can skip this step.

    +
    + +
    [oracle@helper oracle]$ /u01/oracle/oracle_common/modules/thirdparty/org.apache.ant/1.10.5.0.0/apache-ant-1.10.5/bin/ant \
    +-f /u01/oracle/idm/server/setup/deploy-files/automation.xml \
    +run-patched-sql-files \
    +-logger org.apache.tools.ant.NoBannerLogger \
    +-logfile /u01/oracle/idm/server/bin/patch_oim_wls.log \
    +-DoperationsDB.host=$DB_HOST \
    +-DoperationsDB.port=$DB_PORT \
    +-DoperationsDB.serviceName=$DB_SERVICE \
    +-DoperationsDB.user=${RCUPREFIX}_OIM \
    +-DOIM.DBPassword=$RCU_SCHEMA_PWD \
    +-Dojdbc=/u01/oracle/oracle_common/modules/oracle.jdbc/ojdbc8.jar
    +

    The output will look similar to the following:

    +
    Buildfile: /u01/oracle/idm/server/setup/deploy-files/automation.xml
    +
  12. +
  13. +

    Verify the database was patched successfully by viewing the patch_oim_wls.log:

    +
    [oracle@helper oracle]$ cat /u01/oracle/idm/server/bin/patch_oim_wls.log
    +

    The output should look similar to below:

    +
    ...
    +[sql] Executing resource: /u01/oracle/idm/server/db/oim/oracle/Upgrade/oim12cps4/list/oim12cps4_upg_ent_trg_bkp.sql
    +[sql] Executing resource: /u01/oracle/idm/server/db/oim/oracle/Upgrade/oim12cps4/list/oim12cps4_upg_ent_trg_fix.sql
    +[sql] Executing resource: /u01/oracle/idm/server/db/oim/oracle/Upgrade/oim12cps4/list/oim12cps4_upg_ent_trg_restore_bkp.sql
    +[sql] Executing resource: /u01/oracle/idm/server/db/oim/oracle/Upgrade/oim12cps4/list/oim12cps4_ddl_alter_pwr_add_column.sql
    +[sql] 67 of 67 SQL statements executed successfully
    +
    +BUILD SUCCESSFUL
    +Total time: 6 seconds
    +
  14. +
  15. +

    Exit the helper bash shell by issuing the command exit.

    +
  16. +
+

Preparing the environment for domain creation

+

In this section you prepare the environment for the OIG domain creation. This involves the following steps:

+

a. Creating Kubernetes secrets for the domain and RCU

+

b. Create a Kubernetes persistent volume and persistent volume claim

+

Creating Kubernetes secrets for the domain and RCU

+
    +
  1. +

    Create a Kubernetes secret for the domain using the create-weblogic-credentials script in the same Kubernetes namespace as the domain:

    +
    $ cd $WORKDIR/kubernetes/create-weblogic-domain-credentials
    +$ ./create-weblogic-credentials.sh -u weblogic -p <pwd> -n <domain_namespace> -d <domain_uid> -s <kubernetes_domain_secret>
    +

    where:

    +

    -u weblogic is the WebLogic username

    +

    -p <pwd> is the password for the WebLogic user

    +

    -n <domain_namespace> is the domain namespace

    +

    -d <domain_uid> is the domain UID to be created. The default is domain1 if not specified

    +

    -s <kubernetes_domain_secret> is the name you want to create for the secret for this namespace. The default is to use the domainUID if not specified

    +

    For example:

    +
    $ cd $WORKDIR/kubernetes/create-weblogic-domain-credentials
    +$ ./create-weblogic-credentials.sh -u weblogic -p <password> -n oigns -d governancedomain -s oig-domain-credentials
    +

    The output will look similar to the following:

    +
    secret/oig-domain-credentials created
    +secret/oig-domain-credentials labeled
    +The secret oig-domain-credentials has been successfully created in the oigns namespace.
    +
  2. +
  3. +

    Verify the secret is created using the following command:

    +
    $ kubectl get secret <kubernetes_domain_secret> -o yaml -n <domain_namespace>
    +

    For example:

    +
    $ kubectl get secret oig-domain-credentials -o yaml -n oigns
    +

    The output will look similar to the following:

    +
    $ kubectl get secret oig-domain-credentials -o yaml -n oigns
    +apiVersion: v1
    +data:
    +  password: V2VsY29tZTE=
    +  username: d2VibG9naWM=
    +kind: Secret
    +metadata:
    +  creationTimestamp: "<DATE>"
    +  labels:
    +    weblogic.domainName: governancedomain
    +    weblogic.domainUID: governancedomain
    +  name: oig-domain-credentials
    +  namespace: oigns
    +  resourceVersion: "3216738"
    +  uid: c2ec07e0-0135-458d-bceb-c648d2a9ac54
    +type: Opaque
    +
  4. +
  5. +

    Create a Kubernetes secret for RCU in the same Kubernetes namespace as the domain, using the create-rcu-credentials.sh script:

    +
    $ cd $WORKDIR/kubernetes/create-rcu-credentials
    +$ ./create-rcu-credentials.sh -u <rcu_prefix> -p <rcu_schema_pwd> -a sys -q <sys_db_pwd> -d <domain_uid> -n <domain_namespace> -s <kubernetes_rcu_secret>
    +

    where:

    +

    -u <rcu_prefix> is the name of the RCU schema prefix created previously

    +

    -p <rcu_schema_pwd> is the password for the RCU schema prefix

    +

    -a <sys_db_user> is the database user with sys dba privilege

    +

    -q <sys_db_pwd> is the sys database password

    +

    -d <domain_uid> is the domain_uid that you created earlier

    +

    -n <domain_namespace> is the domain namespace

    +

    -s <kubernetes_rcu_secret> is the name of the rcu secret to create

    +

    For example:

    +
    $ cd $WORKDIR/kubernetes/create-rcu-credentials
    +$ ./create-rcu-credentials.sh -u OIGK8S -p <password> -a sys -q <password> -d governancedomain -n oigns -s oig-rcu-credentials
    +

    The output will look similar to the following:

    +
    secret/oig-rcu-credentials created
    +secret/oig-rcu-credentials labeled
    +The secret oig-rcu-credentials has been successfully created in the oigns namespace.
    +
  6. +
  7. +

    Verify the secret is created using the following command:

    +
    $ kubectl get secret <kubernetes_rcu_secret> -o yaml -n <domain_namespace>
    +

    For example:

    +
    $ kubectl get secret oig-rcu-credentials -o yaml -n oigns
    +

    The output will look similar to the following:

    +
    apiVersion: v1
    +data:
    +  password: V2VsY29tZTE=
    +  sys_password: V2VsY29tZTE=
    +  sys_username: c3lz
    +  username: T0lHSzhT
    +kind: Secret
    +metadata:
    +  creationTimestamp: "<DATE>"
    +  labels:
    +    weblogic.domainName: governancedomain
    +    weblogic.domainUID: governancedomain
    +  name: oig-rcu-credentials
    +  namespace: oigns
    +  resourceVersion: "3217023"
    +  uid: ce70b91a-fbbc-4839-9616-4cc2c1adeb4f
    +type: Opaque
    +
  8. +
+

Create a Kubernetes persistent volume and persistent volume claim

+

As referenced in Prerequisites the nodes in the Kubernetes cluster must have access to a persistent volume such as a Network File System (NFS) mount or a shared file system.

+

A persistent volume is the same as a disk mount but is inside a container. A Kubernetes persistent volume is an arbitrary name (determined in this case, by Oracle) that is mapped to a physical volume on a disk.

+

When a container is started, it needs to mount that volume. The physical volume should be on a shared disk accessible by all the Kubernetes worker nodes because it is not known on which worker node the container will be started. In the case of Identity and Access Management, the persistent volume does not get erased when a container stops. This enables persistent configurations.

+

The example below uses an NFS mounted volume (<persistent_volume>/governancedomainpv). Other volume types can also be used. See the official Kubernetes documentation for Volumes.

+

Note: The persistent volume directory needs to be accessible to both the master and worker node(s). In this example /scratch/shared/governancedomainpv is accessible from all nodes via NFS.

+
    +
  1. +

    Make a backup copy of the create-pv-pvc-inputs.yaml file and create required directories:

    +
    $ cd $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc
    +$ cp create-pv-pvc-inputs.yaml create-pv-pvc-inputs.yaml.orig
    +$ mkdir output
    +$ mkdir -p <persistent_volume>/governancedomainpv
    +$ sudo chown -R 1000:0 <persistent_volume>/governancedomainpv
    +

    For example:

    +
    $ cd $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc
    +$ cp create-pv-pvc-inputs.yaml create-pv-pvc-inputs.yaml.orig
    +$ mkdir output
    +$ mkdir -p /scratch/shared/governancedomainpv
    +$ sudo chown -R 1000:0 /scratch/shared/governancedomainpv
    +
  2. +
  3. +

    On the master node run the following command to ensure it is possible to read and write to the persistent volume:

    +
    cd <persistent_volume>/governancedomainpv
    +touch file.txt
    +ls filemaster.txt
    +

    For example:

    +
    cd /scratch/shared/governancedomainpv
    +touch filemaster.txt
    +ls filemaster.txt
    +

    On the first worker node run the following to ensure it is possible to read and write to the persistent volume:

    +
    cd /scratch/shared/governancedomainpv
    +ls filemaster.txt
    +touch fileworker1.txt
    +ls fileworker1.txt
    +

    Repeat the above for any other worker nodes e.g fileworker2.txt etc. Once proven that it’s possible to read and write from each node to the persistent volume, delete the files created.

    +
  4. +
  5. +

    Navigate to $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc:

    +
    $ cd $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc
    +

    and edit the create-pv-pvc-inputs.yaml file and update the following parameters to reflect your settings. Save the file when complete:

    +
    baseName: <domain>
    +domainUID: <domain_uid>
    +namespace: <domain_namespace>
    +weblogicDomainStorageType: NFS
    +weblogicDomainStorageNFSServer: <nfs_server>
    +weblogicDomainStoragePath: <physical_path_of_persistent_storage>
    +weblogicDomainStorageSize: 10Gi
    +

    For example:

    +
    # The base name of the pv and pvc
    +baseName: domain
    +
    +# Unique ID identifying a domain.
    +# If left empty, the generated pv can be shared by multiple domains
    +# This ID must not contain an underscope ("_"), and must be lowercase and unique across all domains in a Kubernetes cluster.
    +domainUID: governancedomain
    +
    +# Name of the namespace for the persistent volume claim
    +namespace: oigns
    +
    +# Persistent volume type for the persistent storage.
    +# The value must be 'HOST_PATH' or 'NFS'.
    +# If using 'NFS', weblogicDomainStorageNFSServer must be specified.
    +weblogicDomainStorageType: NFS
    +
    +# The server name or ip address of the NFS server to use for the persistent storage.
    +# The following line must be uncomment and customized if weblogicDomainStorateType is NFS:
    +weblogicDomainStorageNFSServer: mynfsserver
    +
    +# Physical path of the persistent storage.
    +# When weblogicDomainStorageType is set to HOST_PATH, this value should be set the to path to the
    +# domain storage on the Kubernetes host.
    +# When weblogicDomainStorageType is set to NFS, then weblogicDomainStorageNFSServer should be set
    +# to the IP address or name of the DNS server, and this value should be set to the exported path
    +# on that server.
    +# Note that the path where the domain is mounted in the WebLogic containers is not affected by this
    +# setting, that is determined when you create your domain.
    +# The following line must be uncomment and customized:
    +weblogicDomainStoragePath: /scratch/shared/governancedomainpv
    +     
    +# Reclaim policy of the persistent storage
    +# The valid values are: 'Retain', 'Delete', and 'Recycle'
    +weblogicDomainStorageReclaimPolicy: Retain
    +
    +# Total storage allocated to the persistent storage.
    +weblogicDomainStorageSize: 10Gi
    +
  6. +
  7. +

    Execute the create-pv-pvc.sh script to create the PV and PVC configuration files:

    +
    $ ./create-pv-pvc.sh -i create-pv-pvc-inputs.yaml -o output
    +

    The output will be similar to the following:

    +
    Input parameters being used
    +export version="create-weblogic-sample-domain-pv-pvc-inputs-v1"
    +export baseName="domain"
    +export domainUID="governancedomain"
    +export namespace="oigns"
    +export weblogicDomainStorageType="NFS"
    +export weblogicDomainStorageNFSServer="mynfsserver"
    +export weblogicDomainStoragePath="/scratch/shared/governancedomainpv"
    +export weblogicDomainStorageReclaimPolicy="Retain"
    +export weblogicDomainStorageSize="10Gi"
    +
    +Generating output/pv-pvcs/governancedomain-domain-pv.yaml
    +Generating output/pv-pvcs/governancedomain-domain-pvc.yaml
    +The following files were generated:
    +  output/pv-pvcs/governancedomain-domain-pv.yaml
    +  output/pv-pvcs/governancedomain-domain-pvc.yaml
    +
    +Completed
    +
  8. +
  9. +

    Run the following to show the files are created:

    +
    $ ls output/pv-pvcs
    +create-pv-pvc-inputs.yaml  governancedomain-domain-pv.yaml  governancedomain-domain-pvc.yaml
    +
  10. +
  11. +

    Run the following kubectl command to create the PV and PVC in the domain namespace:

    +
    $ kubectl create -f output/pv-pvcs/governancedomain-domain-pv.yaml -n <domain_namespace>
    +$ kubectl create -f output/pv-pvcs/governancedomain-domain-pvc.yaml -n <domain_namespace>
    +

    For example:

    +
    $ kubectl create -f output/pv-pvcs/governancedomain-domain-pv.yaml -n oigns
    +$ kubectl create -f output/pv-pvcs/governancedomain-domain-pvc.yaml -n oigns
    +

    The output will look similar to the following:

    +
    persistentvolume/governancedomain-domain-pv created
    +persistentvolumeclaim/governancedomain-domain-pvc created
    +
  12. +
  13. +

    Run the following commands to verify the PV and PVC were created successfully:

    +
    $ kubectl describe pv <pv_name> 
    +$ kubectl describe pvc <pvc_name> -n <domain_namespace>
    +

    For example:

    +
    $ kubectl describe pv governancedomain-domain-pv 
    +$ kubectl describe pvc governancedomain-domain-pvc -n oigns
    +

    The output will look similar to the following:

    +
    $ kubectl describe pv governancedomain-domain-pv
    +   
    +Name:            governancedomain-domain-pv
    +Labels:          weblogic.domainUID=governancedomain
    +Annotations:     pv.kubernetes.io/bound-by-controller: yes
    +Finalizers:      [kubernetes.io/pv-protection]
    +StorageClass:    governancedomain-domain-storage-class
    +Status:          Bound
    +Claim:           oigns/governancedomain-domain-pvc
    +Reclaim Policy:  Retain
    +Access Modes:    RWX
    +VolumeMode:      Filesystem
    +Capacity:        10Gi
    +Node Affinity:   <none>
    +Message:
    +Source:
    +    Type:      NFS (an NFS mount that lasts the lifetime of a pod)
    +    Server:    mynfsserver
    +    Path:      /scratch/shared/governancedomainpv
    +    ReadOnly:  false
    +Events:        <none>
    +
    $ kubectl describe pvc governancedomain-domain-pvc -n oigns
    +
    +Name:          governancedomain-domain-pvc
    +Namespace:     oigns
    +StorageClass:  governancedomain-domain-storage-class
    +Status:        Bound
    +Volume:        governancedomain-domain-pv
    +Labels:        weblogic.domainUID=governancedomain
    +Annotations:   pv.kubernetes.io/bind-completed: yes
    +               pv.kubernetes.io/bound-by-controller: yes
    +Finalizers:    [kubernetes.io/pvc-protection]
    +Capacity:      10Gi
    +Access Modes:  RWX
    +VolumeMode:    Filesystem
    +Mounted By:    <none>
    +Events:        <none>
    +

    You are now ready to create the OIG domain as per Create OIG Domains

    +
  14. +
+ + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oig/prepare-your-environment/index.xml b/docs/23.4.1/idm-products/oig/prepare-your-environment/index.xml new file mode 100644 index 000000000..7baf94546 --- /dev/null +++ b/docs/23.4.1/idm-products/oig/prepare-your-environment/index.xml @@ -0,0 +1,14 @@ + + + + Prepare your environment on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oig/prepare-your-environment/ + Recent content in Prepare your environment on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oig/prerequisites/index.html b/docs/23.4.1/idm-products/oig/prerequisites/index.html new file mode 100644 index 000000000..0b31553e4 --- /dev/null +++ b/docs/23.4.1/idm-products/oig/prerequisites/index.html @@ -0,0 +1,3985 @@ + + + + + + + + + + + + Prerequisites :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + Prerequisites +

+ + + + + + + +

Introduction

+

This document provides information about the system requirements and limitations for deploying and running OIG domains with the WebLogic Kubernetes Operator 4.1.2.

+

System requirements for OIG domains

+
    +
  • +

    A running Kubernetes cluster that meets the following requirements:

    +
      +
    • The Kubernetes cluster must have sufficient nodes and resources.
    • +
    • An installation of Helm is required on the Kubernetes cluster. Helm is used to create and deploy the necessary resources and run the WebLogic Kubernetes Operator in a Kubernetes cluster
    • +
    • A supported container engine must be installed and running on the Kubernetes cluster.
    • +
    • The Kubernetes cluster and container engine must meet the minimum version requirements outlined in document ID 2723908.1 on My Oracle Support.
    • +
    • You must have the cluster-admin role to install the WebLogic Kubernetes Operator.
    • +
    • The nodes in the Kubernetes cluster must have access to a persistent volume such as a Network File System (NFS) mount or a shared file system.
    • +
    • The system clocks on node of the Kubernetes cluster must be synchronized. Run the date command simultaneously on all the nodes in each cluster and then syncrhonize accordingly.
    • +
    +
  • +
  • +

    A running Oracle Database 12.2.0.1 or later. The database must be a supported version for OIG as outlined in Oracle Fusion Middleware 12c certifications. It must meet the requirements as outlined in About Database Requirements for an Oracle Fusion Middleware Installation and in RCU Requirements for Oracle Databases.

    +
  • +
+

Note: This documentation does not tell you how to install a Kubernetes cluster, Helm, the container engine, or how to push container images to a container registry. +Please refer to your vendor specific documentation for this information. Also see Getting Started.

+

Limitations

+

Compared to running a WebLogic Server domain in Kubernetes using the operator, the following limitations currently exist for OIG domains:

+
    +
  • In this release, OIG domains are supported using the “domain on a persistent volume” +model only, where the domain home is located in a persistent volume (PV).
  • +
  • The “domain in image” model is not supported.
  • +
  • Only configured clusters are supported. Dynamic clusters are not supported for OIG domains. Note that you can still use all of the scaling features, you just need to define the maximum size of your cluster at domain creation time.
  • +
  • The WebLogic Monitoring Exporter currently supports the WebLogic MBean trees only. Support for JRF MBeans has not been added yet.
  • +
  • We do not currently support running OIG in non-Linux containers.
  • +
+ + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oig/prerequisites/index.xml b/docs/23.4.1/idm-products/oig/prerequisites/index.xml new file mode 100644 index 000000000..11f92fa4b --- /dev/null +++ b/docs/23.4.1/idm-products/oig/prerequisites/index.xml @@ -0,0 +1,14 @@ + + + + Prerequisites on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oig/prerequisites/ + Recent content in Prerequisites on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oig/release-notes/index.html b/docs/23.4.1/idm-products/oig/release-notes/index.html new file mode 100644 index 000000000..4809a84b6 --- /dev/null +++ b/docs/23.4.1/idm-products/oig/release-notes/index.html @@ -0,0 +1,4264 @@ + + + + + + + + + + + + Release Notes :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + Release Notes +

+ + + + + + + +

Review the latest changes and known issues for Oracle Identity Governance on Kubernetes.

+

Recent changes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
DateVersionChange
October, 202323.4.1Supports Oracle Identity Governance 12.2.1.4 domain deployment using the October 2023 container image which contains the October Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program.
This release contains the following changes:
+ Support for WebLogic Kubernetes Operator 4.1.2.
+ Ability to set resource requests and limits for CPU and memory on a cluster resource. See, Setting the OIM server memory parameters.
+ Support for the Kubernetes Horizontal Pod Autoscaler (HPA). See, Kubernetes Horizontal Pod Autoscaler.
If upgrading to October 23 (23.4.1) from October 22 (22.4.1) or later, you must upgrade the following in order:
1. WebLogic Kubernetes Operator to 4.1.2
2. Patch the OIG container image to October 23
If upgrading to October 23 (23.4.1) from a release prior to October 22 (22.4.1), you must upgrade the following in order:
1. WebLogic Kubernetes Operator to 4.1.2
2. Patch the OIG container image to October 23
3. Upgrade the Ingress
4. Upgrade Elasticsearch and Kibana
See Patch and Upgrade for these instructions.
July, 202323.3.1Supports Oracle Identity Governance 12.2.1.4 domain deployment using the July 2023 container image which contains the July Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program.
If upgrading to July 23 (23.3.1) from April 23 (23.2.1), upgrade as follows:
1. Patch the OIG container image to July 23
If upgrading to July 23 (23.3.1) from October 22 (22.4.1), or January 23 (23.1.1) release, you must upgrade the following in order:
1. WebLogic Kubernetes Operator to 4.0.4
2. Patch the OIG container image to July 23
If upgrading to July 23 (23.3.1) from a release prior to October 22 (22.4.1) release, you must upgrade the following in order:
1. WebLogic Kubernetes Operator to 4.0.4
2. Patch the OIG container image to July 23
3. Upgrade the Ingress
4. Upgrade Elasticsearch and Kibana
See Patch and Upgrade for these instructions.
April, 202323.2.1Supports Oracle Identity Governance 12.2.1.4 domain deployment using the April 2023 container image which contains the April Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program.
Support for WebLogic Kubernetes Operator 4.0.4.
Changes to stopping/starting pods due to domain and cluster configuration being separated and parameter changes (IF_NEEDED, NEVER to IfNeeded, Never).
If upgrading to April 23 (23.2.1) from October 22 (22.4.1) or later, you must upgrade in the following order:
1. WebLogic Kubernetes Operator to 4.0.4
2. Patch the OIG container image to April 23
If upgrading to April 23 (23.2.1) from a release prior to October 22 (22.4.1), you must upgrade the following in order:
1. WebLogic Kubernetes Operator to 4.0.4
2. Patch the OIG container image to April 23
3. Upgrade the Ingress
4. Upgrade Elasticsearch and Kibana See Patch and Upgrade for these instructions.
January, 202323.1.1Supports Oracle Identity Governance 12.2.1.4 domain deployment using the January 2023 container image which contains the January Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program.
If upgrading to January 23 (23.1.1) from October 22 (22.4.1) release, you only need to patch the OIG container image to January 23.
If upgrading to January 23 (23.1.1) from a release prior to October 22 (22.4.1) release, you must upgrade the following in order:
1. WebLogic Kubernetes Operator to 3.4.2
2. Patch the OIG container image to January 23
3. Upgrade the Ingress
4. Upgrade Elasticsearch and Kibana See Patch and Upgrade for these instructions.
October, 202222.4.1Supports Oracle Identity Governance 12.2.1.4 domain deployment using the October 2022 container image which contains the October Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program.
Support for WebLogic Kubernetes Operator 3.4.2.
Additional Ingress mappings added.
Changes to deployment of Logging and Visualization with Elasticsearch and Kibana.
OIG container images are now only available from container-registry.oracle.com and are no longer available from My Oracle Support.
If upgrading to October 22 (22.4.1) from a previous release, you must upgrade the following in order:
1. WebLogic Kubernetes Operator to 3.4.2
2. Patch the OIG container image to October 22
3. Upgrade the Ingress
4. Upgrade Elasticsearch and Kibana
See Patch and Upgrade for these instructions.
July, 202222.3.1Supports Oracle Identity Governance 12.2.1.4 domain deployment using the July 2022 container image which contains the July Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program.
April, 202222.2.1Updated for CRI-O support.
November, 202121.4.2Supports Oracle Identity Governance domain deployment using WebLogic Kubernetes Operator 3.3.0. Voyager ingress removed as no longer supported.
October 202121.4.1A) References to supported Kubernetes, Helm and Docker versions removed and replaced with Support note reference. B) Namespace and domain names changed to be consistent with Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster. C) Addtional post configuration tasks added. D) New section on how to start Design Console in a container. E) Upgrading a Kubernetes Cluster and Security Hardening removed as vendor specific.
November 202020.4.1Initial release of Identity Governance on Kubernetes.
+ + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oig/release-notes/index.xml b/docs/23.4.1/idm-products/oig/release-notes/index.xml new file mode 100644 index 000000000..f2432d46c --- /dev/null +++ b/docs/23.4.1/idm-products/oig/release-notes/index.xml @@ -0,0 +1,14 @@ + + + + Release Notes on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oig/release-notes/ + Recent content in Release Notes on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oig/troubleshooting/index.html b/docs/23.4.1/idm-products/oig/troubleshooting/index.html new file mode 100644 index 000000000..0452b48a9 --- /dev/null +++ b/docs/23.4.1/idm-products/oig/troubleshooting/index.html @@ -0,0 +1,4007 @@ + + + + + + + + + + + + Troubleshooting :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + Troubleshooting +

+ + + + + + + +

Domain creation failure

+

If the OIG domain creation fails when running create-domain.sh, run the following to diagnose the issue:

+
    +
  1. +

    Run the following command to diagnose the create domain job:

    +
    $ kubectl logs <job_name> -n <domain_namespace>
    +

    For example:

    +
    $ kubectl logs governancedomain-create-fmw-infra-sample-domain-job-9wqzb -n oigns
    +

    Also run:

    +
    $ kubectl describe pod <job_domain> -n <domain_namespace>
    +

    For example:

    +
    $ kubectl describe pod governancedomain-create-fmw-infra-sample-domain-job-9wqzb -n oigns
    +

    Using the output you should be able to diagnose the problem and resolve the issue.

    +

    Clean down the failed domain creation by following steps 1-3 in Delete the OIG domain home. Then follow RCU schema creation onwards to recreate the RCU schema, Kubernetes secrets for domain and RCU, the persistent volume and the persistent volume claim. Then execute the OIG domain creation steps again.

    +
  2. +
  3. +

    If any of the above commands return the following error:

    +
    Failed to start container "create-fmw-infra-sample-domain-job": Error response from daemon: error while creating mount source path
    +'/scratch/shared/governancedomainpv ': mkdir /scratch/shared/governancedomainpv : permission denied
    +

    then there is a permissions error on the directory for the PV and PVC and the following should be checked:

    +

    a) The directory has 777 permissions: chmod -R 777 <persistent_volume>/governancedomainpv.

    +

    b) If it does have the permissions, check if an oracle user exists and the uid and gid equal 1000, for example:

    +
    $ uid=1000(oracle) gid=1000(spg) groups=1000(spg),59968(oinstall),8500(dba),100(users),1007(cgbudba)
    +

    Create the oracle user if it doesn’t exist and set the uid and gid to 1000.

    +

    c) Edit the $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc/create-pv-pvc-inputs.yaml and add a slash to the end of the directory for the weblogicDomainStoragePath parameter:

    +
    weblogicDomainStoragePath: /scratch/shared/governancedomainpv/
    +

    Clean down the failed domain creation by following steps 1-3 in Delete the OIG domain home. Then follow RCU schema creation onwards to recreate the RCU schema, kubernetes secrets for domain and RCU, the persistent volume and the persistent volume claim. Then execute the OIG domain creation steps again.

    +
  4. +
+

Patch domain failures

+

The instructions in this section relate to problems patching a deployment with a new image as per Patch an image.

+
    +
  1. +

    If the OIG domain patching fails when running patch_oig_domain.sh, run the following to diagnose the issue:

    +
    $ kubectl describe domain <domain name> -n <domain_namespace>
    +

    For example:

    +
    $ kubectl describe domain governancedomain -n oigns
    +

    Using the output you should be able to diagnose the problem and resolve the issue.

    +

    If the domain is already patched successfully and the script failed at the last step of waiting for pods to come up with the new image, then you do not need to rerun the script again after issue resolution. The pods will come up automatically once you resolve the underlying issue.

    +
  2. +
  3. +

    If the script is stuck at the following message for a long time:

    +
    "[INFO] Waiting for weblogic pods to be ready..This may take several minutes, do not close the window. Check log /scratch/OIGK8Slatest/fmw-kubernetes/OracleIdentityGovernance/kubernetes/domain-lifecycle/log/oim_patch_log-<DATE>/monitor_weblogic_pods.log for progress"
    +

    run the following command to diagnose the issue:

    +
    $ kubectl get pods -n <domain_namespace>
    +

    For example:

    +
    $ kubectl get pods -n oigns
    +

    Run the following to check the logs of the AdminServer, SOA server or OIM server pods, as there may be an issue that is not allowing the domain pods to start properly:

    +
    $ kubectl logs <pod> -n oigns
    +

    If the above does not glean any information you can also run:

    +
    $ kubectl describe pod <pod> -n oigns
    +

    Further diagnostic logs can also be found under the $WORKDIR/kubernetes/domain-lifecycle.

    +

    Once any issue is resolved the pods will come up automatically without the need to rerun the script.

    +
  4. +
+ + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oig/troubleshooting/index.xml b/docs/23.4.1/idm-products/oig/troubleshooting/index.xml new file mode 100644 index 000000000..6144718b5 --- /dev/null +++ b/docs/23.4.1/idm-products/oig/troubleshooting/index.xml @@ -0,0 +1,14 @@ + + + + Troubleshooting on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oig/troubleshooting/ + Recent content in Troubleshooting on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oig/validate-domain-urls/index.html b/docs/23.4.1/idm-products/oig/validate-domain-urls/index.html new file mode 100644 index 000000000..02afa6ab1 --- /dev/null +++ b/docs/23.4.1/idm-products/oig/validate-domain-urls/index.html @@ -0,0 +1,3987 @@ + + + + + + + + + + + + Validate domain URLs :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + Validate domain URLs +

+ + + + + + + +

In this section you validate the OIG domain URLs that are accessible via the NGINX ingress.

+

Make sure you know the master hostname and port before proceeding.

+

Validate the OIG domain urls via the ingress

+

Launch a browser and access the following URL’s. Use http or https depending on whether you configured your ingress for non-ssl or ssl.

+

Login to the WebLogic Administration Console and Oracle Enterprise Manager Console with the WebLogic username and password (weblogic/<password>).

+

Login to Oracle Identity Governance with the xelsysadm username and password (xelsysadm/<password>).

+

Note: If using a load balancer for your ingress replace ${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT} with ${LOADBALANCER-HOSTNAME}:${LOADBALANCER-PORT}.

+ + + + + + + + + + + + + + + + + + + + + + + + + +
Console or PageURL
WebLogic Administration Consolehttps://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console
Oracle Enterprise Manager Consolehttps://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/em
Oracle Identity System Administrationhttps://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/sysadmin
Oracle Identity Self Servicehttps://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/identity
+

Note: WebLogic Administration Console and Oracle Enterprise Manager Console should only be used to monitor the servers in the OIG domain. To control the Administration Server and OIG Managed Servers (start/stop) you must use Kubernetes. See Domain Life Cycle for more information.

+

The browser will give certificate errors if you used a self signed certifcate and have not imported it into the browsers Certificate Authority store. If this occurs you can proceed with the connection and ignore the errors.

+

After the URL’s have been verified follow Post install configuration.

+ + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oig/validate-domain-urls/index.xml b/docs/23.4.1/idm-products/oig/validate-domain-urls/index.xml new file mode 100644 index 000000000..9beb49e16 --- /dev/null +++ b/docs/23.4.1/idm-products/oig/validate-domain-urls/index.xml @@ -0,0 +1,14 @@ + + + + Validate domain URLs on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oig/validate-domain-urls/ + Recent content in Validate domain URLs on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oud/configure-ingress/index.html b/docs/23.4.1/idm-products/oud/configure-ingress/index.html new file mode 100644 index 000000000..bb52b6d27 --- /dev/null +++ b/docs/23.4.1/idm-products/oud/configure-ingress/index.html @@ -0,0 +1,4781 @@ + + + + + + + + + + + + Configure an Ingress for OUD :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + Configure an Ingress for OUD +

+ + + + + + + +
    +
  1. +

    Introduction

    +
  2. +
  3. +

    Install NGINX

    +

    a. Configure the repository

    +

    b. Create a namespace

    +

    c. Install NGINX using helm

    +
  4. +
  5. +

    Access to interfaces through ingress

    +

    a. Changes in /etc/hosts to validate hostname based ingress rules

    +

    b. Using LDAP utilities

    +

    c. Validate access using LDAP

    +

    d. Validate access using HTTPS

    +
  6. +
+

Introduction

+

The instructions below explain how to set up NGINX as an ingress for OUD.

+

By default the ingress configuration only supports HTTP and HTTPS ports. To allow LDAP and LDAPS communication over TCP, configuration is required at the ingress controller/implementation level.

+

Install NGINX

+

Use Helm to install NGINX.

+

Configure the repository

+
    +
  1. +

    Add the Helm chart repository for installing NGINX using the following command:

    +
    $ helm repo add stable https://kubernetes.github.io/ingress-nginx
    +

    The output will look similar to the following:

    +
    "stable" has been added to your repositories
    +
  2. +
  3. +

    Update the repository using the following command:

    +
    $ helm repo update
    +

    The output will look similar to the following:

    +
    Hang tight while we grab the latest from your chart repositories...
    +...Successfully got an update from the "stable" chart repository
    +Update Complete. Happy Helming!
    +
  4. +
+

Create a namespace

+
    +
  1. +

    Create a Kubernetes namespace for NGINX:

    +
    $ kubectl create namespace <namespace>
    +

    For example:

    +
    $ kubectl create namespace mynginx
    +

    The output will look similar to the following:

    +
    namespace/mynginx created
    +
  2. +
+

Install NGINX using helm

+
    +
  1. +

    Create a $WORKDIR/kubernetes/helm/nginx-ingress-values-override.yaml that contains the following:

    +

    Note: The configuration below:

    +
      +
    • Assumes that you have oud-ds-rs installed with value oud-ds-rs as a deployment/release name in the namespace oudns. If using a different deployment name and/or namespace change appropriately.
    • +
    • Deploys an ingress using LoadBalancer. If you prefer to use NodePort, change the configuration accordingly. For more details about NGINX configuration see: NGINX Ingress Controller.
    • +
    +
    # Configuration for additional TCP ports to be exposed through Ingress
    +# Format for each port would be like:
    +# <PortNumber>: <Namespace>/<Service>
    +tcp:
    +  # Map 1389 TCP port to LBR LDAP service to get requests handled through any available POD/Endpoint serving LDAP Port
    +  1389: oudns/oud-ds-rs-lbr-ldap:ldap
    +  # Map 1636 TCP port to LBR LDAP service to get requests handled through any available POD/Endpoint serving LDAPS Port
    +  1636: oudns/oud-ds-rs-lbr-ldap:ldaps
    +controller:
    +  admissionWebhooks:
    +    enabled: false
    +  extraArgs:
    +    # The secret referred to by this flag contains the default certificate to be used when accessing the catch-all server.
    +    # If this flag is not provided NGINX will use a self-signed certificate.
    +    # If the TLS Secret is in different namespace, name can be mentioned as <namespace>/<tlsSecretName>
    +    default-ssl-certificate: oudns/oud-ds-rs-tls-cert
    +  service:
    +    # controller service external IP addresses
    +    # externalIPs:
    +    #   - < External IP Address >
    +    # To configure Ingress Controller Service as LoadBalancer type of Service
    +    # Based on the Kubernetes configuration, External LoadBalancer would be linked to the Ingress Controller Service
    +    type: LoadBalancer
    +    # Configuration for NodePort to be used for Ports exposed through Ingress
    +    # If NodePorts are not defied/configured, Node Port would be assigend automatically by Kubernetes
    +    # These NodePorts are helpful while accessing services directly through Ingress and without having External Load Balancer.
    +    nodePorts:
    +      # For HTTP Interface exposed through LoadBalancer/Ingress
    +      http: 30080
    +      # For HTTPS Interface exposed through LoadBalancer/Ingress
    +      https: 30443
    +      tcp:
    +        # For LDAP Interface
    +        1389: 31389
    +        # For LDAPS Interface
    +        1636: 31636
    +
  2. +
  3. +

    To install and configure NGINX Ingress issue the following command:

    +
    $ helm install --namespace <namespace> \
    +--values nginx-ingress-values-override.yaml \
    +lbr-nginx stable/ingress-nginx
    +

    Where:

    +
      +
    • lbr-nginx is your deployment name
    • +
    • stable/ingress-nginx is the chart reference
    • +
    +

    For example:

    +
    $ helm install --namespace mynginx \
    +--values nginx-ingress-values-override.yaml \
    +lbr-nginx stable/ingress-nginx
    +

    The output will look similar to the following:

    +
    NAME: lbr-nginx
    +LAST DEPLOYED: <DATE>
    +NAMESPACE: mynginx
    +STATUS: deployed
    +REVISION: 1
    +TEST SUITE: None
    +NOTES:
    +The ingress-nginx controller has been installed.
    +It may take a few minutes for the LoadBalancer IP to be available.
    +You can watch the status by running 'kubectl --namespace mynginx get services -o wide -w lbr-nginx-ingress-nginx-controller'
    +
    +An example Ingress that makes use of the controller:
    +
    +  apiVersion: networking.k8s.io/v1beta1
    +  kind: Ingress
    +  metadata:
    +    annotations:
    +      kubernetes.io/ingress.class: nginx
    +    name: example
    +    namespace: foo
    +  spec:
    +    rules:
    +      - host: www.example.com
    +        http:
    +          paths:
    +            - backend:
    +                serviceName: exampleService
    +                servicePort: 80
    +              path: /
    +    # This section is only required if TLS is to be enabled for the Ingress
    +    tls:
    +        - hosts:
    +            - www.example.com
    +          secretName: example-tls
    +
    +If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided:
    +
    +  apiVersion: v1
    +  kind: Secret
    +  metadata:
    +    name: example-tls
    +    namespace: foo
    +  data:
    +    tls.crt: <base64 encoded cert>
    +    tls.key: <base64 encoded key>
    +  type: kubernetes.io/tls
    +
  4. +
+
Optional: Command helm upgrade to update nginx-ingress
+

If required, an nginx-ingress deployment can be updated/upgraded with following command. In this example, nginx-ingress configuration is updated with an additional TCP port and Node Port for accessing the LDAP/LDAPS port of a specific POD:

+
    +
  1. +

    Create a nginx-ingress-values-override.yaml that contains the following:

    +
    # Configuration for additional TCP ports to be exposed through Ingress
    +# Format for each port would be like:
    +# <PortNumber>: <Namespace>/<Service>
    +tcp: 
    +  # Map 1389 TCP port to LBR LDAP service to get requests handled through any available POD/Endpoint serving LDAP Port
    +  1389: oudns/oud-ds-rs-lbr-ldap:ldap
    +  # Map 1636 TCP port to LBR LDAP service to get requests handled through any available POD/Endpoint serving LDAPS Port
    +  1636: oudns/oud-ds-rs-lbr-ldap:ldaps
    +  # Map specific ports for LDAP and LDAPS communication from individual Services/Pods
    +  # To redirect requests on 3890 port to oudns/oud-ds-rs-ldap-0:ldap
    +  3890: oudns/oud-ds-rs-ldap-0:ldap
    +  # To redirect requests on 6360 port to oudns/oud-ds-rs-ldaps-0:ldap
    +  6360: oudns/oud-ds-rs-ldap-0:ldaps
    +  # To redirect requests on 3891 port to oudns/oud-ds-rs-ldap-1:ldap
    +  3891: oudns/oud-ds-rs-ldap-1:ldap
    +  # To redirect requests on 6361 port to oudns/oud-ds-rs-ldaps-1:ldap
    +  6361: oudns/oud-ds-rs-ldap-1:ldaps
    +  # To redirect requests on 3892 port to oudns/oud-ds-rs-ldap-2:ldap
    +  3892: oudns/oud-ds-rs-ldap-2:ldap
    +  # To redirect requests on 6362 port to oudns/oud-ds-rs-ldaps-2:ldap
    +  6362: oudns/oud-ds-rs-ldap-2:ldaps
    +  # Map 1444 TCP port to LBR Admin service to get requests handled through any available POD/Endpoint serving Admin LDAPS Port
    +  1444: oudns/oud-ds-rs-lbr-admin:adminldaps
    +  # To redirect requests on 4440 port to oudns/oud-ds-rs-0:adminldaps
    +  4440: oudns/oud-ds-rs-0:adminldaps
    +  # To redirect requests on 4441 port to oudns/oud-ds-rs-1:adminldaps
    +  4441: oudns/oud-ds-rs-1:adminldaps
    +  # To redirect requests on 4442 port to oudns/oud-ds-rs-2:adminldaps
    +  4442: oudns/oud-ds-rs-2:adminldaps
    +controller:
    +  admissionWebhooks:
    +    enabled: false
    +  extraArgs:
    +    # The secret referred to by this flag contains the default certificate to be used when accessing the catch-all server.
    +    # If this flag is not provided NGINX will use a self-signed certificate.
    +    # If the TLS Secret is in different namespace, name can be mentioned as <namespace>/<tlsSecretName>
    +    default-ssl-certificate: oudns/oud-ds-rs-tls-cert
    +  service:
    +    # controller service external IP addresses
    +    # externalIPs:
    +    #   - < External IP Address >
    +    # To configure Ingress Controller Service as LoadBalancer type of Service
    +    # Based on the Kubernetes configuration, External LoadBalancer would be linked to the Ingress Controller Service
    +    type: LoadBalancer
    +    # Configuration for NodePort to be used for Ports exposed through Ingress
    +    # If NodePorts are not defied/configured, Node Port would be assigend automatically by Kubernetes
    +    # These NodePorts are helpful while accessing services directly through Ingress and without having External Load Balancer.
    +    nodePorts:
    +      # For HTTP Interface exposed through LoadBalancer/Ingress
    +      http: 30080
    +      # For HTTPS Interface exposed through LoadBalancer/Ingress
    +      https: 30443
    +      tcp:
    +        # For LDAP Interface referring to LBR LDAP services serving LDAP port
    +        1389: 31389
    +        # For LDAPS Interface referring to LBR LDAP services serving LDAPS port
    +        1636: 31636
    +        # For LDAP Interface from specific service oud-ds-rs-ldap-0
    +        3890: 30890
    +        # For LDAPS Interface from specific service oud-ds-rs-ldap-0
    +        6360: 30360
    +        # For LDAP Interface from specific service oud-ds-rs-ldap-1
    +        3891: 30891
    +        # For LDAPS Interface from specific service oud-ds-rs-ldap-1
    +        6361: 30361
    +        # For LDAP Interface from specific service oud-ds-rs-ldap-2
    +        3892: 30892
    +        # For LDAPS Interface from specific service oud-ds-rs-ldap-2
    +        6362: 30362
    +        # For LDAPS Interface referring to LBR Admin services serving adminldaps port
    +        1444: 31444
    +        # For Admin LDAPS Interface from specific service oud-ds-rs-0
    +        4440: 30440
    +        # For Admin LDAPS Interface from specific service oud-ds-rs-1
    +        4441: 30441
    +        # For Admin LDAPS Interface from specific service oud-ds-rs-2
    +        4442: 30442
    +
  2. +
  3. +

    Run the following command to upgrade the ingress:

    +
    $ helm upgrade --namespace <namespace> \
    +--values nginx-ingress-values-override.yaml \
    +lbr-nginx stable/ingress-nginx 
    +

    Where:

    +
      +
    • lbr-nginx is your deployment name
    • +
    • stable/ingress-nginx is the chart reference
    • +
    +

    For example:

    +
    $ helm upgrade --namespace mynginx \
    +--values nginx-ingress-values-override.yaml \
    +lbr-nginx stable/ingress-nginx 
    +
  4. +
+

Access to interfaces through ingress

+

Using the Helm chart, ingress objects are created according to configuration. The following table details the rules configured in ingress object(s) for access to Oracle Unified Directory Interfaces through ingress.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PortNodePortHostExample HostnamePathBackend Service:PortExample Service Name:Port
http/https30080/30443<deployment/release name>-admin-0oud-ds-rs-admin-0*<deployment/release name>-0:adminhttpsoud-ds-rs-0:adminhttps
http/https30080/30443<deployment/release name>-admin-Noud-ds-rs-admin-N*<deployment/release name>-N:adminhttpsoud-ds-rs-1:adminhttps
http/https30080/30443<deployment/release name>-adminoud-ds-rs-admin*<deployment/release name>-lbr-admin:adminhttpsoud-ds-rs-lbr-admin:adminhttps
http/https30080/30443**/rest/v1/admin<deployment/release name>-lbr-admin:adminhttpsoud-ds-rs-lbr-admin:adminhttps
http/https30080/30443<deployment/release name>-http-0oud-ds-rs-http-0*<deployment/release name>-http-0:httpoud-ds-rs-http-0:http
http/https30080/30443<deployment/release name>-http-Noud-ds-rs-http-N*<deployment/release name>-http-N:httpoud-ds-rs-http-N:http
http/https30080/30443<deployment/release name>-httpoud-ds-rs-http*<deployment/release name>-lbr-http:httpoud-ds-rs-lbr-http:http
http/https30080/30443**/rest/v1/directory<deployment/release name>-lbr-http:httpoud-ds-rs-lbr-http:http
http/https30080/30443**/iam/directory<deployment/release name>-lbr-http:httpoud-ds-rs-lbr-http:http
+
+

In the table above, example values are based on the value ‘oud-ds-rs’ as the deployment/release name for Helm chart installation. +The NodePorts mentioned in the table are according to ingress configuration described in previous section. +When External LoadBalancer is not available/configured, interfaces can be accessed through NodePort on a Kubernetes node.

+
+

For LDAP/LDAPS access (based on the updated/upgraded configuration mentioned in previous section)

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PortNodePortBackend Service:PortExample Service Name:Port
138931389<deployment/release name>-lbr-ldap:ldapoud-ds-rs-lbr-ldap:ldap
163631636<deployment/release name>-lbr-ldap:ldapoud-ds-rs-lbr-ldap:ldaps
144431444<deployment/release name>-lbr-admin:adminldapsoud-ds-rs-lbr-admin:adminldaps
389030890<deployment/release name>-ldap-0:ldapoud-ds-rs-ldap-0:ldap
636030360<deployment/release name>-ldap-0:ldapsoud-ds-rs-ldap-0:ldaps
389130891<deployment/release name>-ldap-1:ldapoud-ds-rs-ldap-1:ldap
636130361<deployment/release name>-ldap-1:ldapsoud-ds-rs-ldap-1:ldaps
389230892<deployment/release name>-ldap-2:ldapoud-ds-rs-ldap-2:ldap
636230362<deployment/release name>-ldap-2:ldapsoud-ds-rs-ldap-2:ldaps
444030440<deployment/release name>-0:adminldapsoud-ds-rs-ldap-0:adminldaps
444130441<deployment/release name>-1:adminldapsoud-ds-rs-ldap-1:adminldaps
444230442<deployment/release name>-2:adminldapsoud-ds-rs-ldap-2:adminldaps
+
    +
  • In the table above, example values are based on value ‘oud-ds-rs’ as the deployment/release name for helm chart installation.
  • +
  • The NodePorts mentioned in the table are according to Ingress configuration described in previous section.
  • +
  • When external LoadBalancer is not available/configured, Interfaces can be accessed through NodePort on a Kubernetes Node.
  • +
+

Changes in /etc/hosts to validate hostname based ingress rules

+

If it is not possible to have a LoadBalancer configuration updated to have host names added for Oracle Unified Directory Interfaces then the following entries can be added in /etc/hosts files on the host from where Oracle Unified Directory interfaces will be accessed.

+
<IP Address of External LBR or Kubernetes Node>	oud-ds-rs-http oud-ds-rs-http-0 oud-ds-rs-http-1 oud-ds-rs-http-2 oud-ds-rs-http-N
+<IP Address of External LBR or Kubernetes Node>	oud-ds-rs-admin oud-ds-rs-admin-0 oud-ds-rs-admin-1 oud-ds-rs-admin-2 oud-ds-rs-admin-N
+
    +
  • In the table above, host names are based on the value ‘oud-ds-rs’ as the deployment/release name for Helm chart installation.
  • +
  • When External LoadBalancer is not available/configured, Interfaces can be accessed through NodePort on Kubernetes Node.
  • +
+

Using LDAP utilities

+

To use Oracle LDAP utilities such as ldapbind, ldapsearch, ldapmodify etc. you can either:

+
    +
  • +

    Run the LDAP commands from an OUD installation outside the Kubernetes cluster. This requires access to an On-Premises OUD installation oustide the Kubernetes cluster.

    +
  • +
  • +

    Run the LDAP commands from inside the OUD Kubernetes pod.

    +
    $ kubectl exec -ti <pod> -n <namespace> -- bash
    +

    For example:

    +
    $ kubectl exec -ti oud-ds-rs-0 -n oudns -- bash
    +

    This will take you into a bash session in the pod:

    +
    [oracle@oud-ds-rs-0 oracle]$    
    +

    Inside the container navigate to /u01/oracle/oud/bin to view the LDAP utilties:

    +
    [oracle@oud-ds-rs-0 oracle]$ cd /u01/oracle/oud/bin
    +[oracle@oud-ds-rs-0 bin]$ ls ldap*
    +ldapcompare  ldapdelete  ldapmodify  ldappasswordmodify  ldapsearch
    +

    Note: For commands that require an ldif file, copy the file into the <persistent_volume>/oud_user_projects directory:

    +
    $ cp file.ldif <peristent_volume>/oud_user_projects
    +

    For example:

    +
    $ cp file.ldif /scratch/shared/oud_user_projects
    +

    The file can then be viewed inside the pod:

    +
    [oracle@oud-ds-rs-0 bin]$ cd /u01/oracle/user_projects
    +[oracle@oud-ds-rs-0 user_projects]$ ls *.ldif
    +file.ldif
    +
  • +
+

Validate access using LDAP

+

Note: The examples assume sample data was installed when creating the OUD instance.

+
LDAP against External Load Balancer
+

Note If your ingress is configured with type: LoadBalancer then you cannot connect to the external LoadBalancer hostname and ports from inside the pod and must connect from an OUD installation outside the cluster.

+
    +
  • +

    Command to perform ldapsearch against External LBR and LDAP port

    +
    $OUD_HOME/bin/ldapsearch --hostname <External LBR> --port 1389 \
    +-D "<Root User DN>" -w <Password for Root User DN> \
    +-b "" -s base "(objectClass=*)" "*"
    +

    The output will look similar to the following:

    +
    dn: 
    +objectClass: top
    +objectClass: ds-root-dse
    +lastChangeNumber: 0
    +firstChangeNumber: 0
    +changelog: cn=changelog
    +entryDN: 
    +pwdPolicySubentry: cn=Default Password Policy,cn=Password Policies,cn=config
    +subschemaSubentry: cn=schema
    +supportedAuthPasswordSchemes: SHA256
    +supportedAuthPasswordSchemes: SHA1
    +supportedAuthPasswordSchemes: SHA384
    +supportedAuthPasswordSchemes: SHA512
    +supportedAuthPasswordSchemes: MD5
    +numSubordinates: 1
    +supportedFeatures: 1.3.6.1.1.14
    +supportedFeatures: 1.3.6.1.4.1.4203.1.5.1
    +supportedFeatures: 1.3.6.1.4.1.4203.1.5.2
    +supportedFeatures: 1.3.6.1.4.1.4203.1.5.3
    +lastExternalChangelogCookie: 
    +vendorName: Oracle Corporation
    +vendorVersion: Oracle Unified Directory 12.2.1.4.0
    +componentVersion: 4
    +releaseVersion: 1
    +platformVersion: 0
    +supportedLDAPVersion: 2
    +supportedLDAPVersion: 3
    +supportedControl: 1.2.826.0.1.3344810.2.3
    +supportedControl: 1.2.840.113556.1.4.1413
    +supportedControl: 1.2.840.113556.1.4.319
    +supportedControl: 1.2.840.113556.1.4.473
    +supportedControl: 1.2.840.113556.1.4.805
    +supportedControl: 1.3.6.1.1.12
    +supportedControl: 1.3.6.1.1.13.1
    +supportedControl: 1.3.6.1.1.13.2
    +supportedControl: 1.3.6.1.4.1.26027.1.5.2
    +supportedControl: 1.3.6.1.4.1.26027.1.5.4
    +supportedControl: 1.3.6.1.4.1.26027.1.5.5
    +supportedControl: 1.3.6.1.4.1.26027.1.5.6
    +supportedControl: 1.3.6.1.4.1.26027.2.3.1
    +supportedControl: 1.3.6.1.4.1.26027.2.3.2
    +supportedControl: 1.3.6.1.4.1.26027.2.3.4
    +supportedControl: 1.3.6.1.4.1.42.2.27.8.5.1
    +supportedControl: 1.3.6.1.4.1.42.2.27.9.5.2
    +supportedControl: 1.3.6.1.4.1.42.2.27.9.5.8
    +supportedControl: 1.3.6.1.4.1.4203.1.10.1
    +supportedControl: 1.3.6.1.4.1.4203.1.10.2
    +supportedControl: 2.16.840.1.113730.3.4.12
    +supportedControl: 2.16.840.1.113730.3.4.16
    +supportedControl: 2.16.840.1.113730.3.4.17
    +supportedControl: 2.16.840.1.113730.3.4.18
    +supportedControl: 2.16.840.1.113730.3.4.19
    +supportedControl: 2.16.840.1.113730.3.4.2
    +supportedControl: 2.16.840.1.113730.3.4.3
    +supportedControl: 2.16.840.1.113730.3.4.4
    +supportedControl: 2.16.840.1.113730.3.4.5
    +supportedControl: 2.16.840.1.113730.3.4.9
    +supportedControl: 2.16.840.1.113894.1.8.21
    +supportedControl: 2.16.840.1.113894.1.8.31
    +supportedControl: 2.16.840.1.113894.1.8.36
    +maintenanceVersion: 2
    +supportedSASLMechanisms: PLAIN
    +supportedSASLMechanisms: EXTERNAL
    +supportedSASLMechanisms: CRAM-MD5
    +supportedSASLMechanisms: DIGEST-MD5
    +majorVersion: 12
    +orclGUID: D41D8CD98F003204A9800998ECF8427E
    +entryUUID: d41d8cd9-8f00-3204-a980-0998ecf8427e
    +ds-private-naming-contexts: cn=schema
    +hasSubordinates: true
    +nsUniqueId: d41d8cd9-8f003204-a9800998-ecf8427e
    +structuralObjectClass: ds-root-dse
    +supportedExtension: 1.3.6.1.4.1.4203.1.11.1
    +supportedExtension: 1.3.6.1.4.1.4203.1.11.3
    +supportedExtension: 1.3.6.1.1.8
    +supportedExtension: 1.3.6.1.4.1.26027.1.6.3
    +supportedExtension: 1.3.6.1.4.1.26027.1.6.2
    +supportedExtension: 1.3.6.1.4.1.26027.1.6.1
    +supportedExtension: 1.3.6.1.4.1.1466.20037
    +namingContexts: cn=changelog
    +namingContexts: dc=example,dc=com
    +
  • +
  • +

    Command to perform ldapsearch against External LBR and LDAP port for specific Oracle Unified Directory Interface

    +
    $OUD_HOME/bin/ldapsearch --hostname <External LBR> --port 3890 \
    +-D "<Root User DN>" -w <Password for Root User DN> \
    +-b "" -s base "(objectClass=*)" "*"
    +
  • +
+
LDAPS against Kubernetes NodePort for Ingress Controller Service
+

In the example below LDAP utilities are executed from inside the oud-ds-rs-0 pod. If your ingress is configured with type: LoadBalancer you can connect to the Kubernetes hostname where the ingress is deployed using the NodePorts.

+
    +
  • +

    Command to perform ldapsearch against Kubernetes NodePort and LDAP port

    +
    [oracle@oud-ds-rs-0 bin]$ ./ldapsearch --hostname <Kubernetes Node> --port 31636 \
    +--useSSL --trustAll \
    +-D "<Root User DN>" -w <Password for Root User DN> \
    +-b "" -s base "(objectClass=*)" "*"
    +
  • +
+

Validate access using HTTPS

+
HTTPS/REST API against External LBR Host:Port
+

Note: In all the examples below:

+

a) You need to have an external IP assigned at ingress level.

+

b) | json_pp is used to format output in readable json format on the client side. It can be ignored if you do not have the json_pp library.

+

c) Base64 of userDN:userPassword can be generated using echo -n "userDN:userPassword" | base64.

+
    +
  • +

    Command to invoke Data REST API:

    +
    $curl --noproxy "*" -k  --location \
    +--request GET 'https://<External LBR Host>/rest/v1/directory/uid=user.1,ou=People,dc=example,dc=com?scope=sub&attributes=*' \
    +--header 'Authorization: Basic <Base64 of userDN:userPassword>' | json_pp
    +

    The output will look similar to the following:

    +
    {
    +   "msgType" : "urn:ietf:params:rest:schemas:oracle:oud:1.0:SearchResponse",
    +   "totalResults" : 1,
    +   "searchResultEntries" : [
    +      {
    +         "dn" : "uid=user.1,ou=People,dc=example,dc=com",
    +         "attributes" : {
    +            "st" : "OH",
    +            "employeeNumber" : "1",
    +            "postalCode" : "93694",
    +            "description" : "This is the description for Aaren Atp.",
    +            "telephoneNumber" : "+1 390 103 6917",
    +            "homePhone" : "+1 280 375 4325",
    +            "initials" : "ALA",
    +            "objectClass" : [
    +               "top",
    +               "inetorgperson",
    +               "organizationalperson",
    +               "person"
    +            ],
    +            "uid" : "user.1",
    +            "sn" : "Atp",
    +            "street" : "70110 Fourth Street",
    +            "mobile" : "+1 680 734 6300",
    +            "givenName" : "Aaren",
    +            "mail" : "user.1@maildomain.net",
    +            "l" : "New Haven",
    +            "postalAddress" : "Aaren Atp$70110 Fourth Street$New Haven, OH  93694",
    +            "pager" : "+1 850 883 8888",
    +            "cn" : "Aaren Atp"
    +         }
    +      }
    +    ]
    +}
    +
  • +
  • +

    Command to invoke Data REST API against specific Oracle Unified Directory Interface:

    +
    $ curl --noproxy "*" -k  --location \
    +--request GET 'https://oud-ds-rs-http-0/rest/v1/directory/uid=user.1,ou=People,dc=example,dc=com?scope=sub&attributes=*' \
    +--header 'Authorization: Basic <Base64 of userDN:userPassword>' | json_pp
    +
      +
    • For this example, it is assumed that the value ‘oud-ds-rs’ is used as the deployment/release name for helm chart installation.
    • +
    • It is assumed that ‘oud-ds-rs-http-0’ points to an External LoadBalancer
    • +
    +
  • +
+
HTTPS/REST API against Kubernetes NodePort for Ingress Controller Service
+

Note: In all the examples below:

+

a) | json_pp is used to format output in readable json format on the client side. It can be ignored if you do not have the json_pp library.

+

b) Base64 of userDN:userPassword can be generated using echo -n "userDN:userPassword" | base64.

+

c) It is assumed that the value ‘oud-ds-rs’ is used as the deployment/release name for helm chart installation.

+
    +
  • +

    Command to invoke Data SCIM API:

    +
    $ curl --noproxy "*" -k --location \
    +--request GET 'https://<Kubernetes Node>:30443/iam/directory/oud/scim/v1/Users' \
    +--header 'Authorization: Basic <Base64 of userDN:userPassword>' | json_pp
    +

    The output will look similar to the following:

    +
    {
    +   "Resources" : [
    +      {
    +         "id" : "ad55a34a-763f-358f-93f9-da86f9ecd9e4",
    +         "userName" : [
    +            {
    +               "value" : "user.0"
    +            }
    +         ],
    +         "schemas" : [
    +            "urn:ietf:params:scim:schemas:core:2.0:User",
    +            "urn:ietf:params:scim:schemas:extension:oracle:2.0:OUD:User",
    +            "urn:ietf:params:scim:schemas:extension:enterprise:2.0:User"
    +         ],
    +         "meta" : {
    +            "location" : "http://<Kubernetes Node>:30443/iam/directory/oud/scim/v1/Users/ad55a34a-763f-358f-93f9-da86f9ecd9e4",
    +            "resourceType" : "User"
    +         },
    +         "addresses" : [
    +            {
    +               "postalCode" : "50369",
    +               "formatted" : "Aaccf Amar$01251 Chestnut Street$Panama City, DE  50369",
    +               "streetAddress" : "01251 Chestnut Street",
    +               "locality" : "Panama City",
    +               "region" : "DE"
    +            }
    +         ],
    +         "urn:ietf:params:scim:schemas:extension:oracle:2.0:OUD:User" : {
    +            "description" : [
    +               {
    +                  "value" : "This is the description for Aaccf Amar."
    +               }
    +            ],
    +            "mobile" : [
    +               {
    +                  "value" : "+1 010 154 3228"
    +               }
    +            ],
    +            "pager" : [
    +               {
    +                  "value" : "+1 779 041 6341"
    +               }
    +            ],
    +            "objectClass" : [
    +               {
    +                  "value" : "top"
    +               },
    +               {
    +                  "value" : "organizationalperson"
    +               },
    +               {
    +                  "value" : "person"
    +               },
    +               {
    +                  "value" : "inetorgperson"
    +               }
    +            ],
    +            "initials" : [
    +               {
    +                  "value" : "ASA"
    +               }
    +            ],
    +            "homePhone" : [
    +               {
    +                  "value" : "+1 225 216 5900"
    +               }
    +            ]
    +         },
    +         "name" : [
    +            {
    +               "givenName" : "Aaccf",
    +               "familyName" : "Amar",
    +               "formatted" : "Aaccf Amar"
    +            }
    +         ],
    +         "emails" : [
    +            {
    +               "value" : "user.0@maildomain.net"
    +            }
    +         ],
    +         "phoneNumbers" : [
    +            {
    +               "value" : "+1 685 622 6202"
    +            }
    +         ],
    +         "urn:ietf:params:scim:schemas:extension:enterprise:2.0:User" : {
    +            "employeeNumber" : [
    +               {
    +                  "value" : "0"
    +               }
    +            ]
    +         }
    +      }
    +      ,
    + .
    + .
    + .
    + }
    +
  • +
  • +

    Command to invoke Data SCIM API against specific Oracle Unified Directory Interface:

    +
    $ curl --noproxy "*" -k --location \
    +--request GET 'https://oud-ds-rs-http-0:30443/iam/directory/oud/scim/v1/Users' \
    +--header 'Authorization: Basic <Base64 of userDN:userPassword>' | json_pp
    +
  • +
+
HTTPS/REST Admin API
+

Note: In all the examples below:

+

a) | json_pp is used to format output in readable json format on the client side. It can be ignored if you do not have the json_pp library.

+

b) Base64 of userDN:userPassword can be generated using echo -n "userDN:userPassword" | base64.

+
    +
  • +

    Command to invoke Admin REST API against External LBR:

    +
    $ curl --noproxy "*" -k --insecure --location \
    +--request GET 'https://<External LBR Host>/rest/v1/admin/?scope=base&attributes=vendorName&attributes=vendorVersion&attributes=ds-private-naming-contexts&attributes=subschemaSubentry' \
    +--header 'Content-Type: application/json' \
    +--header 'Authorization: Basic <Base64 of userDN:userPassword>' | json_pp
    +

    The output will look similar to the following:

    +
    {
    +   "totalResults" : 1,
    +   "searchResultEntries" : [
    +      {
    +         "dn" : "",
    +         "attributes" : {
    +            "vendorVersion" : "Oracle Unified Directory 12.2.1.4.0",
    +            "ds-private-naming-contexts" : [
    +               "cn=admin data",
    +               "cn=ads-truststore",
    +               "cn=backups",
    +               "cn=config",
    +               "cn=monitor",
    +               "cn=schema",
    +               "cn=tasks",
    +               "cn=virtual acis",
    +               "dc=replicationchanges"
    +            ],
    +            "subschemaSubentry" : "cn=schema",
    +            "vendorName" : "Oracle Corporation"
    +         }
    +      }
    +   ],
    +   "msgType" : "urn:ietf:params:rest:schemas:oracle:oud:1.0:SearchResponse"
    +}
    +
  • +
  • +

    Command to invoke Admin REST API against specific Oracle Unified Directory Admin Interface:

    +
    $ curl --noproxy "*" -k --insecure --location \
    +--request GET 'https://oud-ds-rs-admin-0/rest/v1/admin/?scope=base&attributes=vendorName&attributes=vendorVersion&attributes=ds-private-naming-contexts&attributes=subschemaSubentry' \
    +--header 'Content-Type: application/json' \
    +--header 'Authorization: Basic <Base64 of userDN:userPassword>' | json_pp
    +
  • +
  • +

    Command to invoke Admin REST API against Kubernetes NodePort for Ingress Controller Service

    +
    $ curl --noproxy "*" -k --insecure --location \
    +--request GET 'https://oud-ds-rs-admin-0:30443/rest/v1/admin/?scope=base&attributes=vendorName&attributes=vendorVersion&attributes=ds-private-naming-contexts&attributes=subschemaSubentry' \
    +--header 'Content-Type: application/json' \
    +--header 'Authorization: Basic <Base64 of userDN:userPassword>' | json_pp
    +
  • +
+ + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oud/configure-ingress/index.xml b/docs/23.4.1/idm-products/oud/configure-ingress/index.xml new file mode 100644 index 000000000..1068f2d74 --- /dev/null +++ b/docs/23.4.1/idm-products/oud/configure-ingress/index.xml @@ -0,0 +1,14 @@ + + + + Configure an Ingress for OUD on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oud/configure-ingress/ + Recent content in Configure an Ingress for OUD on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oud/create-or-update-image/index.html b/docs/23.4.1/idm-products/oud/create-or-update-image/index.html new file mode 100644 index 000000000..31805bfd3 --- /dev/null +++ b/docs/23.4.1/idm-products/oud/create-or-update-image/index.html @@ -0,0 +1,4212 @@ + + + + + + + + + + + + Create or update an image :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + Create or update an image +

+ + + + + + + +

As described in Prepare Your Environment you can create your own OUD container image. If you have access to the My Oracle Support (MOS), and there is a need to build a new image with an interim or one off patch, it is recommended to use the WebLogic Image Tool to build an Oracle Unified Directory image for production deployments.

+

Create or update an Oracle Unified Directory image using the WebLogic Image Tool

+

Using the WebLogic Image Tool, you can create a new Oracle Unified Directory image with PSU’s and interim patches or update an existing image with one or more interim patches.

+
+

Recommendations:

+
    +
  • Use create for creating a new Oracle Unified Directory image containing the Oracle Unified Directory binaries, bundle patch and interim patches. This is the recommended approach if you have access to the OUD patches because it optimizes the size of the image.
  • +
  • Use update for patching an existing Oracle Unified Directory image with a single interim patch. Note that the patched image size may increase considerably due to additional image layers introduced by the patch application tool.
  • +
+
+

Create an image

+

Set up the WebLogic Image Tool

+ +
Prerequisites
+

Verify that your environment meets the following prerequisites:

+
    +
  • Docker client and daemon on the build machine, with minimum Docker version 18.03.1.ce.
  • +
  • Bash version 4.0 or later, to enable the command complete feature.
  • +
  • JAVA_HOME environment variable set to the appropriate JDK location e.g: /scratch/export/oracle/product/jdk
  • +
+
Set up the WebLogic Image Tool
+

To set up the WebLogic Image Tool:

+
    +
  1. +

    Create a working directory and change to it:

    +
    $ mdir <workdir>
    +$ cd <workdir>
    +

    For example:

    +
    $ mkdir /scratch/imagetool-setup
    +$ cd /scratch/imagetool-setup
    +
  2. +
  3. +

    Download the latest version of the WebLogic Image Tool from the releases page.

    +
    $ wget https://github.com/oracle/weblogic-image-tool/releases/download/release-X.X.X/imagetool.zip
    +

    where X.X.X is the latest release referenced on the releases page.

    +
  4. +
  5. +

    Unzip the release ZIP file in the imagetool-setup directory.

    +
    $ unzip imagetool.zip
    +
  6. +
  7. +

    Execute the following commands to set up the WebLogic Image Tool:

    +
    $ cd <workdir>/imagetool-setup/imagetool/bin
    +$ source setup.sh
    +

    For example:

    +
    $ cd /scratch/imagetool-setup/imagetool/bin
    +$ source setup.sh
    +
  8. +
+
Validate setup
+

To validate the setup of the WebLogic Image Tool:

+
    +
  1. +

    Enter the following command to retrieve the version of the WebLogic Image Tool:

    +
    $ imagetool --version
    +
  2. +
  3. +

    Enter imagetool then press the Tab key to display the available imagetool commands:

    +
    $ imagetool <TAB>
    +cache   create  help    rebase  update
    +
  4. +
+
WebLogic Image Tool build directory
+

The WebLogic Image Tool creates a temporary Docker context directory, prefixed by wlsimgbuilder_temp, every time the tool runs. Under normal circumstances, this context directory will be deleted. However, if the process is aborted or the tool is unable to remove the directory, it is safe for you to delete it manually. By default, the WebLogic Image Tool creates the Docker context directory under the user’s home directory. If you prefer to use a different directory for the temporary context, set the environment variable WLSIMG_BLDDIR:

+
$ export WLSIMG_BLDDIR="/path/to/buid/dir"
+
WebLogic Image Tool cache
+

The WebLogic Image Tool maintains a local file cache store. This store is used to look up where the Java, WebLogic Server installers, and WebLogic Server patches reside in the local file system. By default, the cache store is located in the user’s $HOME/cache directory. Under this directory, the lookup information is stored in the .metadata file. All automatically downloaded patches also reside in this directory. You can change the default cache store location by setting the environment variable WLSIMG_CACHEDIR:

+
$ export WLSIMG_CACHEDIR="/path/to/cachedir"
+
Set up additional build scripts
+

Creating an Oracle Unified Directory container image using the WebLogic Image Tool requires additional container scripts for Oracle Unified Directory domains.

+
    +
  1. +

    Clone the docker-images repository to set up those scripts. In these steps, this directory is DOCKER_REPO:

    +
    $ cd <workdir>/imagetool-setup
    +$ git clone https://github.com/oracle/docker-images.git
    +

    For example:

    +
    $ cd /scratch/imagetool-setup
    +$ git clone https://github.com/oracle/docker-images.git
    +
  2. +
+
+

Note: If you want to create the image continue with the following steps, otherwise to update the image see update an image.

+
+

Create an image

+

After setting up the WebLogic Image Tool, follow these steps to use the WebLogic Image Tool to create a new Oracle Unified Directory image.

+
Download the Oracle Unified Directory installation binaries and patches
+

You must download the required Oracle Unified Directory installation binaries and patches as listed below from the Oracle Software Delivery Cloud and save them in a directory of your choice.

+

The installation binaries and patches required are:

+
    +
  • +

    Oracle Unified Directory 12.2.1.4.0

    +
      +
    • fmw_12.2.1.4.0_oud.jar
    • +
    +
  • +
  • +

    OUD Patches:

    +
      +
    • View document ID 2723908.1 on My Oracle Support. In the Container Image Download/Patch Details section, locate the Oracle Unified Directory (OUD) table. For the latest PSU click the README link in the Documentation column. In the README, locate the “Installed Software” section. All the patch numbers to be download are listed here. Download all these individual patches from My Oracle Support.
    • +
    +
  • +
  • +

    Oracle JDK v8

    +
      +
    • jdk-8uXXX-linux-x64.tar.gz where XXX is the JDK version referenced in the README above.
    • +
    +
  • +
+
Update required build files
+

The following files in the code repository location <imagetool-setup-location>/docker-images/OracleUnifiedDirectory/imagetool/12.2.1.4.0 are used for creating the image:

+
    +
  • additionalBuildCmds.txt
  • +
  • buildArgs
  • +
+
    +
  1. +

    Edit the <workdir>/imagetool-setup/docker-images/OracleUnifiedDirectory/imagetool/12.2.1.4.0/buildArgs file and change %DOCKER_REPO%,%JDK_VERSION% and %BUILDTAG% appropriately.

    +

    For example:

    +
    create
    +--jdkVersion=8u321
    +--type oud
    +--version=12.2.1.4.0
    +--tag=oud-latestpsu:12.2.1.4.0
    +--pull
    +--installerResponseFile /scratch/imagetool-setup/docker-images/OracleUnifiedDirectory/dockerfiles/12.2.1.4.0/install/oud.response
    +--additionalBuildCommands /scratch/imagetool-setup/docker-images/OracleUnifiedDirectory/imagetool/12.2.1.4.0/additionalBuildCmds.txt
    +--additionalBuildFiles /scratch/imagetool-setup/docker-images/OracleUnifiedDirectory/dockerfiles/12.2.1.4.0/container-scripts
    +
  2. +
  3. +

    The <workdir>/imagetool-setup/imagetool/docker-images/OracleUnifiedDirectory/imagetool/12.2.1.4.0/additionalBuildCmds.txt contains additional build commands. You may edit this file if you want to customize the image further.

    +
  4. +
+
Create the image
+
    +
  1. +

    Add a JDK package to the WebLogic Image Tool cache. For example:

    +
    $ imagetool cache addInstaller --type jdk --version 8uXXX --path <download location>/jdk-8uXXX-linux-x64.tar.gz
    +

    where XXX is the JDK version downloaded

    +
  2. +
  3. +

    Add the downloaded installation binaries to the WebLogic Image Tool cache. For example:

    +
    $ imagetool cache addInstaller --type OUD --version 12.2.1.4.0 --path <download location>/fmw_12.2.1.4.0_oud.jar
    +
  4. +
  5. +

    Add the downloaded OPatch patch to the WebLogic Image Tool cache. For example:

    +
    $ imagetool cache addEntry --key 28186730_13.9.4.2.8 --value <download location>/p28186730_139428_Generic.zip
    +
  6. +
  7. +

    Add the rest of the downloaded product patches to the WebLogic Image Tool cache:

    +
    $ imagetool cache addEntry --key <patch>_12.2.1.4.0 --value <download location>/p<patch>_122140_Generic.zip
    +

    For example:

    +
    $ imagetool cache addEntry --key 32971905_12.2.1.4.0 --value <download location>/p33448950_122140_Generic.zip
    +
  8. +
  9. +

    Edit the <workdir>/imagetool-setup/docker-images/OracleUnifiedDirectory/imagetool/12.2.1.4.0/buildArgs file and append the product patches and opatch patch as follows:

    +
    --patches 33448950_12.2.1.4.0
    +--opatchBugNumber=28186730_13.9.4.2.8
    +

    An example buildArgs file is now as follows:

    +
    create
    +--jdkVersion=8u321
    +--type oud
    +--version=12.2.1.4.0
    +--tag=oud-latestpsu:12.2.1.4.0
    +--pull
    +--installerResponseFile /scratch/imagetool-setup/docker-images/OracleUnifiedDirectory/dockerfiles/12.2.1.4.0/install/oud.response
    +--additionalBuildCommands /scratch/imagetool-setup/docker-images/OracleUnifiedDirectory/imagetool/12.2.1.4.0/additionalBuildCmds.txt
    +--additionalBuildFiles /scratch/imagetool-setup/docker-images/OracleUnifiedDirectory/dockerfiles/12.2.1.4.0/container-scripts
    +--patches 33448950_12.2.1.4.0
    +--opatchBugNumber=28186730_13.9.4.2.8
    +
    +

    Note: In the buildArgs file:

    +
      +
    • --jdkVersion value must match the --version value used in the imagetool cache addInstaller command for --type jdk.
    • +
    • --version value must match the --version value used in the imagetool cache addInstaller command for --type OUD.
    • +
    +
    +

    Refer to this page for the complete list of options available with the WebLogic Image Tool create command.

    +
  10. +
  11. +

    Create the Oracle Unified Directory image:

    +
    $ imagetool @<absolute path to buildargs file> --fromImage ghcr.io/oracle/oraclelinux:7-slim
    +
    +

    Note: Make sure that the absolute path to the buildargs file is prepended with a @ character, as shown in the example above.

    +
    +

    For example:

    +
    $ imagetool @<imagetool-setup-location>/docker-images/OracleUnifiedDirectory/imagetool/12.2.1.4.0/buildArgs --fromImage ghcr.io/oracle/oraclelinux:7-slim
    +
  12. +
  13. +

    Check the created image using the docker images command:

    +
    $ docker images | grep oud
    +

    The output will look similar to the following:

    +
    oud-latestpsu                                       12.2.1.4.0                     30b02a692fa3        About a minute ago   1.04GB
    +
  14. +
  15. +

    Run the following command to save the container image to a tar file:

    +
    $ docker save -o <path>/<file>.tar <image>
    +

    For example:

    +
    $ docker save -o $WORKDIR/oud-latestpsu.tar oud-latestpsu:12.2.1.4.0
    +
  16. +
+

Update an image

+

The steps below show how to update an existing Oracle Unified Directory image with an interim patch.

+

The container image to be patched must be loaded in the local docker images repository before attempting these steps.

+

In the examples below the image oracle/oud:12.2.1.4.0 is updated with an interim patch.

+
$ docker images
+
+REPOSITORY     TAG          IMAGE ID          CREATED             SIZE
+oracle/oud     12.2.1.4.0   b051804ba15f      3 months ago        1.04GB
+
    +
  1. +

    Set up the WebLogic Image Tool.

    +
  2. +
  3. +

    Download the required interim patch and latest Opatch (28186730) from My Oracle Support. and save them in a directory of your choice.

    +
  4. +
  5. +

    Add the OPatch patch to the WebLogic Image Tool cache, for example:

    +
    $ imagetool cache addEntry --key 28186730_13.9.4.2.8 --value <downloaded-patches-location>/p28186730_139428_Generic.zip
    +
  6. +
  7. +

    Execute the imagetool cache addEntry command for each patch to add the required patch(es) to the WebLogic Image Tool cache. For example, to add patch p32701831_12214210607_Generic.zip:

    +
    $ imagetool cache addEntry --key=33521773_12.2.1.4.211008 --value <downloaded-patches-location>/p33521773_12214211008_Generic.zip
    +
  8. +
  9. +

    Provide the following arguments to the WebLogic Image Tool update command:

    +
      +
    • –-fromImage - Identify the image that needs to be updated. In the example below, the image to be updated is oracle/oud:12.2.1.4.0.
    • +
    • –-patches - Multiple patches can be specified as a comma-separated list.
    • +
    • --tag - Specify the new tag to be applied for the image being built.
    • +
    +

    Refer here for the complete list of options available with the WebLogic Image Tool update command.

    +
    +

    Note: The WebLogic Image Tool cache should have the latest OPatch zip. The WebLogic Image Tool will update the OPatch if it is not already updated in the image.

    +
    +

    For example:

    +
    $ imagetool update --fromImage oracle/oud:12.2.1.4.0 --tag=oracle/oud-new:12.2.1.4.0 --patches=33521773_12.2.1.4.211008 --opatchBugNumber=28186730_13.9.4.2.8
    +
    +

    Note: If the command fails because the files in the image being upgraded are not owned by oracle:oracle, then add the parameter --chown <userid>:<groupid> to correspond with the values returned in the error.

    +
    +
  10. +
  11. +

    Check the built image using the docker images command:

    +
    $ docker images | grep oud
    +

    The output will look similar to the following:

    +
    REPOSITORY         TAG          IMAGE ID        CREATED             SIZE
    +oracle/oud-new     12.2.1.4.0   78ccd1ad67eb    5 minutes ago       1.11GB
    +oracle/oud         12.2.1.4.0   b051804ba15f    3 months ago        1.04GB
    +
  12. +
  13. +

    Run the following command to save the patched container image to a tar file:

    +
    $ docker save -o <path>/<file>.tar <image>
    +

    For example:

    +
    $ docker save -o $WORKDIR/oud-new.tar oracle/oud-new:12.2.1.4.0
    +
  14. +
+ + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oud/create-or-update-image/index.xml b/docs/23.4.1/idm-products/oud/create-or-update-image/index.xml new file mode 100644 index 000000000..96ec31789 --- /dev/null +++ b/docs/23.4.1/idm-products/oud/create-or-update-image/index.xml @@ -0,0 +1,14 @@ + + + + Create or update an image on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oud/create-or-update-image/ + Recent content in Create or update an image on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oud/create-oud-instances/index.html b/docs/23.4.1/idm-products/oud/create-oud-instances/index.html new file mode 100644 index 000000000..f2c028a68 --- /dev/null +++ b/docs/23.4.1/idm-products/oud/create-oud-instances/index.html @@ -0,0 +1,5616 @@ + + + + + + + + + + + + Create Oracle Unified Directory Instances :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+ +
+ +
+ +
+ +

+ + Create Oracle Unified Directory Instances +

+ + + + + + + +
    +
  1. Introduction
  2. +
  3. Create a Kubernetes namespace
  4. +
  5. Create a Kubernetes secret for the container registry
  6. +
  7. Create a Kubernetes secret for cronjob images
  8. +
  9. The oud-ds-rs Helm chart
  10. +
  11. Create OUD instances
  12. +
  13. Helm command output
  14. +
  15. Verify the OUD deployment
  16. +
  17. Verify the OUD replication
  18. +
  19. Verify the cronjob
  20. +
  21. Undeploy an OUD deployment
  22. +
  23. Appendix A: Configuration parameters
  24. +
  25. Appendix B: Environment Variables
  26. +
+

Introduction

+

This chapter demonstrates how to deploy Oracle Unified Directory (OUD) 12c instance(s) and replicated instances using the Helm package manager for Kubernetes.

+

The helm chart can be used to deploy an Oracle Unified Directory instance as a base, with configured sample entries, and multiple replicated Oracle Unified Directory instances/pods/services based on the specified replicaCount.

+

Based on the configuration, this chart deploys the following objects in the specified namespace of a Kubernetes cluster.

+
    +
  • Service Account
  • +
  • Secret
  • +
  • Persistent Volume and Persistent Volume Claim
  • +
  • Pod(s)/Container(s) for Oracle Unified Directory Instances
  • +
  • Services for interfaces exposed through Oracle Unified Directory Instances
  • +
  • Ingress configuration
  • +
+

Note: From July 22 (22.3.1) onwards OUD deployment is performed using StatefulSets.

+

Create a Kubernetes namespace

+

Create a Kubernetes namespace for the OUD deployment by running the following command:

+
$ kubectl create namespace <namespace>
+

For example:

+
$ kubectl create namespace oudns
+

The output will look similar to the following:

+
namespace/oudns created
+

Create a Kubernetes secret for the container registry

+

Create a Kubernetes secret to stores the credentials for the container registry where the OUD image is stored. This step must be followed if using Oracle Container Registry or your own private container registry. If you are not using a container registry and have loaded the images on each of the master and worker nodes, you can skip this step.

+
    +
  1. +

    Run the following command to create the secret:

    +
    kubectl create secret docker-registry "orclcred" --docker-server=<CONTAINER_REGISTRY> \
    +--docker-username="<USER_NAME>" \
    +--docker-password=<PASSWORD> --docker-email=<EMAIL_ID> \
    +--namespace=<domain_namespace>
    +

    For example, if using Oracle Container Registry:

    +
    $ kubectl create secret docker-registry "orclcred" --docker-server=container-registry.oracle.com \
    +--docker-username="user@example.com" \
    +--docker-password=password --docker-email=user@example.com \
    +--namespace=oudns
    +

    Replace <USER_NAME> and <PASSWORD> with the credentials for the registry with the following caveats:

    +
      +
    • +

      If using Oracle Container Registry to pull the OUD container image, this is the username and password used to login to Oracle Container Registry. Before you can use this image you must login to Oracle Container Registry, navigate to Middleware > oud_cpu and accept the license agreement.

      +
    • +
    • +

      If using your own container registry to store the OUD container image, this is the username and password (or token) for your container registry.

      +
    • +
    +

    The output will look similar to the following:

    +
    secret/orclcred created
    +
  2. +
+

Create a Kubernetes secret for cronjob images

+

Once OUD is deployed, if the Kubernetes node where the OUD pod(s) is/are running goes down after the pod eviction time-out, the pod(s) don’t get evicted but move to a Terminating state. The pod(s) will then remain in that state forever. To avoid this problem a cron-job is created during OUD deployment that checks for any pods in Terminating state. If there are any pods in Terminating state, the cron job will delete them. The pods will then start again automatically. This cron job requires access to images on hub.docker.com. A Kubernetes secret must therefore be created to enable access to these images.

+
    +
  1. +

    Create a Kubernetes secret to access the required images on hub.docker.com:

    +

    Note: You must first have a user account on hub.docker.com:

    +
    $ kubectl create secret docker-registry "dockercred" --docker-server="https://index.docker.io/v1/" --docker-username="<docker_username>" --docker-password=<password> --docker-email=<docker_email_credentials> --namespace=<domain_namespace>
    +

    For example:

    +
    $ kubectl create secret docker-registry "dockercred" --docker-server="https://index.docker.io/v1/" --docker-username="username" --docker-password=<password> --docker-email=user@example.com --namespace=oudns
    +

    The output will look similar to the following:

    +
    secret/dockercred created
    +
  2. +
+

The oud-ds-rs Helm chart

+

The oud-ds-rs Helm chart allows you to create or deploy a group of replicated Oracle Unified Directory instances along with Kubernetes objects in a specified namespace.

+

The deployment can be initiated by running the following Helm command with reference to the oud-ds-rs Helm chart, along with configuration parameters according to your environment.

+
$ cd $WORKDIR/kubernetes/helm
+$ helm install --namespace <namespace> \
+<Configuration Parameters> \
+<deployment/release name> \
+<Helm Chart Path/Name>
+

Configuration Parameters (override values in chart) can be passed on with --set arguments on the command line and/or with -f / --values arguments when referring to files.

+

Note: The examples in Create OUD instances below provide values which allow the user to override the default values provided by the Helm chart. A full list +of configuration parameters and their default values is shown in Appendix A: Configuration parameters.

+

For more details about the helm command and parameters, please execute helm --help and helm install --help.

+

Create OUD instances

+

You can create OUD instances using one of the following methods:

+
    +
  1. Using a YAML file
  2. +
  3. Using --set argument
  4. +
+

Note: While it is possible to install sample data during the OID deployment is it not possible to load your own data via an ldif file . In order to load data in OUD, create the OUD deployment and then use ldapmodify post the ingress deployment. See Using LDAP utilities.

+

Using a YAML file

+
    +
  1. +

    Navigate to the $WORKDIR/kubernetes/helm directory:

    +
    $ cd $WORKDIR/kubernetes/helm
    +
  2. +
  3. +

    Create an oud-ds-rs-values-override.yaml as follows:

    +
    image:
    +  repository: <image_location>
    +  tag: <image_tag>
    +  pullPolicy: IfNotPresent
    +imagePullSecrets:
    +  - name: orclcred
    +oudConfig:
    + # memory, cpu parameters for both requests and limits for oud instances
    +  resources:
    +    limits:
    +      cpu: "1"
    +      memory: "4Gi"
    +    requests:
    +      cpu: "500m" 
    +      memory: "4Gi"
    +  rootUserPassword: <password>
    +  sampleData: "200"
    +persistence:
    +  type: filesystem
    +  filesystem:
    +    hostPath:
    +      path: <persistent_volume>/oud_user_projects
    +cronJob:
    +  kubectlImage:
    +    repository: bitnami/kubectl
    +    tag: <version>
    +    pullPolicy: IfNotPresent
    + 
    +  imagePullSecrets:
    +    - name: dockercred
    +

    For example:

    +
    image:
    +  repository: container-registry.oracle.com/middleware/oud_cpu
    +  tag: 12.2.1.4-jdk8-ol7-<October`23>
    +  pullPolicy: IfNotPresent
    +imagePullSecrets:
    +  - name: orclcred
    +oudConfig:
    + # memory, cpu parameters for both requests and limits for oud instances
    +  resources:
    +    limits:
    +      cpu: "1"
    +      memory: "8Gi"
    +    requests:
    +      cpu: "500m" 
    +      memory: "4Gi"
    +  rootUserPassword: <password>
    +  sampleData: "200"
    +persistence:
    +  type: filesystem
    +  filesystem:
    +    hostPath:
    +      path: /scratch/shared/oud_user_projects
    +cronJob:
    +  kubectlImage:
    +    repository: bitnami/kubectl
    +    tag: 1.26.6
    +    pullPolicy: IfNotPresent
    + 
    +  imagePullSecrets:
    +    - name: dockercred
    +

    The following caveats exist:

    +
      +
    • +

      Replace <password> with the relevant password.

      +
    • +
    • +

      sampleData: "200" will load 200 sample users into the default baseDN dc=example,dc=com. If you do not want sample data, remove this entry. If sampleData is set to 1,000,000 users or greater, then you must add the following entries to the yaml file to prevent inconsistencies in dsreplication:

      +
      deploymentConfig:
      +  startupTime: 720
      +  period: 120
      +  timeout: 60
      +
    • +
    • +

      The <version> in kubectlImage tag: should be set to the same version as your Kubernetes version (kubectl version). For example if your Kubernetes version is 1.26.6 set to 1.26.6.

      +
    • +
    • +

      If you are not using Oracle Container Registry or your own container registry for your OUD container image, then you can remove the following:

      +
      imagePullSecrets:
      +  - name: orclcred
      +
    • +
    • +

      If your cluster does not have access to the internet to pull external images, such as bitnami/kubectl or busybox, you must load the images in a local container registry. You must then set the following:

      +
      cronJob:
      +  kubectlImage:
      +    repository: container-registry.example.com/bitnami/kubectl
      +    tag: 1.26.6
      +    pullPolicy: IfNotPresent
      +	   
      +busybox:
      +  image: container-registry.example.com/busybox 
      +
    • +
    • +

      If using NFS for your persistent volume then change the persistence section as follows:

      +

      Note: If you want to use NFS you should ensure that you have a default Kubernetes storage class defined for your environment that allows network storage.

      +

      For more information on storage classes, see Storage Classes.

      +
      persistence:
      +  type: networkstorage
      +  networkstorage:
      +    nfs: 
      +      path: <persistent_volume>/oud_user_projects
      +      server: <NFS IP address>
      +  # if true, it will create the storageclass. if value is false, please provide existing storage class (storageClass) to be used.
      +  storageClassCreate: true
      +  storageClass: oud-sc
      +  # if storageClassCreate is true, please provide the custom provisioner if any to use. If you do not have a custom provisioner, delete this line, and it will use the default class kubernetes.io/is-default-class.
      +  provisioner:  kubernetes.io/is-default-class
      +

      The following caveats exist:

      +
        +
      • If you want to create your own storage class, set storageClassCreate: true. If storageClassCreate: true it is recommended to set storageClass to a value of your choice, and provisioner to the provisioner supported by your cloud vendor.
      • +
      • If you have an existing storageClass that supports network storage, set storageClassCreate: false and storageClass to the NAME value returned in “kubectl get storageclass”. The provisioner can be ignored.
      • +
      +
    • +
    • +

      If using Block Device storage for your persistent volume then change the persistence section as follows:

      +

      Note: If you want to use block devices you should ensure that you have a default Kubernetes storage class defined for your environment that allows dynamic storage. Each vendor has its own storage provider but it may not be configured to provide dynamic storage allocation.

      +

      For more information on storage classes, see Storage Classes.

      +
      persistence:
      +  type: blockstorage
      +  # Specify Accessmode ReadWriteMany for NFS and for block ReadWriteOnce
      +  accessMode: ReadWriteOnce
      +  # if true, it will create the storageclass. if value is false, please provide existing storage class (storageClass) to be used.
      +  storageClassCreate: true
      +  storageClass: oud-sc
      +  # if storageClassCreate is true, please provide the custom provisioner if any to use or else it will use default.
      +  provisioner:  oracle.com/oci
      +

      The following caveats exist:

      +
        +
      • If you want to create your own storage class, set storageClassCreate: true. If storageClassCreate: true it is recommended to set storageClass to a value of your choice, and provisioner to the provisioner supported by your cloud vendor.
      • +
      • If you have an existing storageClass that supports dynamic storage, set storageClassCreate: false and storageClass to the NAME value returned in “kubectl get storageclass”. The provisioner can be ignored.
      • +
      +
    • +
    • +

      For resources, limits and requests, the example CPU and memory values shown are for development environments only. For Enterprise Deployments, please review the performance recommendations and sizing requirements in Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster.

      +

      Note: Limits and requests for CPU resources are measured in CPU units. One CPU in Kubernetes is equivalent to 1 vCPU/Core for cloud providers, and 1 hyperthread on bare-metal Intel processors. An “m” suffix in a CPU attribute indicates ‘milli-CPU’, so 500m is 50% of a CPU. Memory can be expressed in various units, where one Mi is one IEC unit mega-byte (1024^2), and one Gi is one IEC unit giga-byte (1024^3). For more information, see Resource Management for Pods and Containers, Assign Memory Resources to Containers and Pods, and Assign CPU Resources to Containers and Pods.

      +

      Note: The parameters above are also utilized by the Kubernetes Horizontal Pod Autoscaler (HPA). For more details on HPA, see Kubernetes Horizontal Pod Autoscaler.

      +
    • +
    • +

      If you plan on integrating OUD with other Oracle components then you must specify the following under the oudConfig: section:

      +
        integration: <Integration option>
      +
           
      +For example:
      +
      oudConfig:
      +  etc...
      +  integration: <Integration option>
      +
      
      +It is recommended to choose the option covering your minimal requirements. Allowed values include: `no-integration` (no integration), `basic` (Directory Integration Platform), `generic` (Directory Integration Platform, Database Net Services and E-Business Suite integration), `eus` (Directory Integration  Platform, Database Net Services, E-Business Suite and Enterprise User Security integration). The default value is `no-integration`
      +     
      +     
      +**Note**: This will enable the integration type only. To integrate OUD with the Oracle component referenced, refer to the relevant product component documentation.
      +
      +
    • +
    • +

      If you want to enable Assured Replication, see Enabling Assured Replication (Optional).

      +
    • +
    +
  4. +
  5. +

    Run the following command to deploy OUD:

    +
    $ helm install --namespace <namespace> \
    +--values oud-ds-rs-values-override.yaml \
    +<release_name> oud-ds-rs
    +

    For example:

    +
    $ helm install --namespace oudns \
    +--values oud-ds-rs-values-override.yaml \
    +oud-ds-rs oud-ds-rs
    +
  6. +
  7. +

    Check the OUD deployment as per Verify the OUD deployment and Verify the OUD replication.

    +
  8. +
+

Using --set argument

+
    +
  1. +

    Navigate to the $WORKDIR/kubernetes/helm directory:

    +
    $ cd $WORKDIR/kubernetes/helm
    +
  2. +
  3. +

    Run the following command to create OUD instances:

    +
    $ helm install --namespace <namespace> \
    +--set oudConfig.rootUserPassword=<password> \
    +--set persistence.filesystem.hostPath.path=<persistent_volume>/oud_user_projects \
    +--set image.repository=<image_location>,image.tag=<image_tag> \
    +--set oudConfig.sampleData="200" \
    +--set oudConfig.resources.limits.cpu="1",oudConfig.resources.limits.memory="8Gi",oudConfig.resources.requests.cpu="500m",oudConfig.resources.requests.memory="4Gi" \
    +--set cronJob.kubectlImage.repository=bitnami/kubectl,cronJob.kubectlImage.tag=<version> \
    +--set cronJob.imagePullSecrets[0].name="dockercred" \
    +--set imagePullSecrets[0].name="orclcred" \
    +<release_name> oud-ds-rs
    +

    For example:

    +
    $ helm install --namespace oudns \
    +--set oudConfig.rootUserPassword=<password> \
    +--set persistence.filesystem.hostPath.path=/scratch/shared/oud_user_projects \
    +--set image.repository=container-registry.oracle.com/middleware/oud_cpu,image.tag=12.2.1.4-jdk8-ol7-<October`23> \
    +--set oudConfig.sampleData="200" \
    +--set oudConfig.resources.limits.cpu="1",oudConfig.resources.limits.memory="8Gi",oudConfig.resources.requests.cpu="500m",oudConfig.resources.requests.memory="4Gi" \
    +--set cronJob.kubectlImage.repository=bitnami/kubectl,cronJob.kubectlImage.tag=1.26.6 \
    +--set cronJob.imagePullSecrets[0].name="dockercred" \
    +--set imagePullSecrets[0].name="orclcred" \
    +oud-ds-rs oud-ds-rs
    +

    The following caveats exist:

    +
      +
    • +

      Replace <password> with a the relevant password.

      +
    • +
    • +

      sampleData: "200" will load 200 sample users into the default baseDN dc=example,dc=com. If you do not want sample data, remove this entry. If sampleData is set to 1,000,000 users or greater, then you must add the following entries to the yaml file to prevent inconsistencies in dsreplication: --set deploymentConfig.startupTime=720,deploymentConfig.period=120,deploymentConfig.timeout=60.

      +
    • +
    • +

      The <version> in kubectlImage tag: should be set to the same version as your Kubernetes version (kubectl version). For example if your Kubernetes version is 1.26.6 set to 1.26.6.

      +
    • +
    • +

      If using using NFS for your persistent volume then use:

      +
      --set persistence.networkstorage.nfs.path=<persistent_volume>/oud_user_projects,persistence.networkstorage.nfs.server:<NFS IP address>` \
      +--set persistence.storageClassCreate="true",persistence.storageClass="oud-sc",persistence.provisioner="kubernetes.io/is-default-class" \
      +
      * If you want to create your own storage class, set `storageClassCreate: true`. If `storageClassCreate: true` it is recommended to set `storageClass` to a value of your choice, and `provisioner` to the provisioner supported by your cloud vendor.
      +* If you have an existing storageClass that supports dynamic storage, set `storageClassCreate: false` and `storageClass` to the NAME value returned in "`kubectl get storageclass`". The `provisioner` can be ignored. 
      +   
      +
    • +
    • +

      If using using block storage for your persistent volume then use:

      +
      --set persistence.type="blockstorage",persistence.accessMode="ReadWriteOnce" \
      +--set persistence.storageClassCreate="true",persistence.storageClass="oud-sc",persistence.provisioner="oracle.com/oci" \
      +
      * If you want to create your own storage class, set `storageClassCreate: true`. If `storageClassCreate: true` it is recommended to set `storageClass` to a value of your choice, and `provisioner` to the provisioner supported by your cloud vendor.
      +* If you have an existing storageClass that supports dynamic storage, set `storageClassCreate: false` and `storageClass` to the NAME value returned in "`kubectl get storageclass`". The `provisioner` can be ignored. 	  
      +   
      +
    • +
    • +

      If you are not using Oracle Container Registry or your own container registry for your OUD container image, then you can remove the following: --set imagePullSecrets[0].name="orclcred".

      +
    • +
    • +

      For resources, limits and `requests1, the example CPU and memory values shown are for development environments only. For Enterprise Deployments, please review the performance recommendations and sizing requirements in Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster.

      +

      Note: Limits and requests for CPU resources are measured in CPU units. One CPU in Kubernetes is equivalent to 1 vCPU/Core for cloud providers, and 1 hyperthread on bare-metal Intel processors. An “m” suffix in a CPU attribute indicates ‘milli-CPU’, so 500m is 50% of a CPU. Memory can be expressed in various units, where one Mi is one IEC unit mega-byte (1024^2), and one Gi is one IEC unit giga-byte (1024^3). For more information, see Resource Management for Pods and Containers, Assign Memory Resources to Containers and Pods, and Assign CPU Resources to Containers and Pods.

      +

      Note: The parameters above are also utilized by the Kubernetes Horizontal Pod Autoscaler (HPA). For more details on HPA, see Kubernetes Horizontal Pod Autoscaler.

      +
    • +
    • +

      If you plan on integrating OUD with other Oracle components then you must specify the following:

      +
      --set oudConfig.integration=<Integration option>
      +
      
      +It is recommended to choose the option covering your minimal requirements. Allowed values include: `no-integration` (no integration), `basic` (Directory Integration Platform), `generic` (Directory Integration Platform, Database Net Services and E-Business Suite integration), `eus` (Directory Integration  Platform, Database Net Services, E-Business Suite and Enterprise User Security integration). The default value is `no-integration`
      +     
      +**Note**: This will enable the integration type only. To integrate OUD with the Oracle component referenced, refer to the relevant product component documentation.
      +
      +
    • +
    • +

      If you want to enable Assured Replication, see Enabling Assured Replication (Optional).

      +
    • +
    +
  4. +
  5. +

    Check the OUD deployment as per Verify the OUD deployment and Verify the OUD replication.

    +
  6. +
+

Enabling Assured Replication (Optional)

+

If you want to enable assured replication, perform the following steps:

+
    +
  1. +

    Create a directory on the persistent volume as follows:

    +
    $ cd <persistent_volume>
    +$ mkdir oud-repl-config  
    +$ sudo chown -R 1000:0 oud-repl-config
    +

    For example:

    +
    $ cd /scratch/shared
    +$ mkdir oud-repl-config   
    +$ sudo chown -R 1000:0 oud-repl-config
    +
  2. +
  3. +

    Add the following section in the oud-ds-rs-values-override.yaml:

    +
    replOUD:
    +  envVars:
    +    - name: post_dsreplication_dsconfig_3
    +      value: set-replication-domain-prop --domain-name ${baseDN} --advanced --set assured-type:safe-read --set assured-sd-level:2 --set assured-timeout:5s
    +    - name: execCmd_1
    +      value: /u01/oracle/user_projects/${OUD_INSTANCE_NAME}/OUD/bin/dsconfig --no-prompt --hostname ${sourceHost} --port ${adminConnectorPort} --bindDN "${rootUserDN}" --bindPasswordFile /u01/oracle/user_projects/${OUD_INSTANCE_NAME}/admin/rootPwdFile.txt  --trustAll set-replication-domain-prop --domain-name ${baseDN} --advanced --set assured-type:safe-read --set assured-sd-level:2 --set assured-timeout:5s --provider-name "Multimaster Synchronization"
    +configVolume:
    +  enabled: true
    +  type: networkstorage
    +  storageClassCreate: true
    +  storageClass: oud-config
    +  provisioner: kubernetes.io/is-default-class
    +  networkstorage:
    +    nfs:
    +      server: <IP_address>
    +      path: <persistent_volume>/oud-repl-config
    +  mountPath: /u01/oracle/config-input
    +

    For more information on OUD Assured Replication, and other options and levels, see, Understanding the Oracle Unified Directory Replication Model.

    +

    The following caveats exist:

    +
      +
    • +

      post_dsreplication_dsconfig_N and execCmd_N should be a unique key - change the suffix accordingly. For more information on the environment variable and respective keys, see, Appendix B: Environment Variables.

      +
    • +
    • +

      For configVolume the storage can be networkstorage(nfs) or filesystem(hostPath) as the config volume path has to be accessible from all the Kuberenetes nodes. Please note that block storage is not supported for configVolume.

      +
    • +
    • +

      If you want to create your own storage class, set storageClassCreate: true. If storageClassCreate: true it is recommended to set storageClass to a value of your choice, and provisioner to the provisioner supported by your cloud vendor.

      +
    • +
    • +

      If you have an existing storageClass that supports network storage, set storageClassCreate: false and storageClass to the NAME value returned in “kubectl get storageclass”. Please note that the storage-class should not be the one you used for the persistent volume earlier. The provisioner can be ignored.

      +
    • +
    +
  4. +
+

Helm command output

+

In all the examples above, the following output is shown following a successful execution of the helm install command.

+
NAME: oud-ds-rs
+LAST DEPLOYED:  <DATE>
+NAMESPACE: oudns
+STATUS: deployed
+REVISION: 4
+NOTES:
+#
+# Copyright (c) 2020, Oracle and/or its affiliates.
+#
+#  Licensed under the Universal Permissive License v 1.0 as shown at
+# https://oss.oracle.com/licenses/upl
+#
+#
+Since "nginx" has been chosen, follow the steps below to configure nginx ingress controller.
+Add Repo reference to helm for retriving/installing Chart for nginx-ingress implementation.
+command-# helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
+
+Command helm install to install nginx-ingress related objects like pod, service, deployment, etc.
+# helm install --namespace <namespace for ingress> --values nginx-ingress-values-override.yaml lbr-nginx ingress-nginx/ingress-nginx
+
+For details of content of nginx-ingress-values-override.yaml refer README.md file of this chart.
+
+Run these commands to check port mapping and services:
+# kubectl --namespace <namespace for ingress> get services -o wide -w lbr-nginx-ingress-controller
+# kubectl describe --namespace <namespace for oud-ds-rs chart> ingress.extensions/oud-ds-rs-http-ingress-nginx
+# kubectl describe --namespace <namespace for oud-ds-rs chart> ingress.extensions/oud-ds-rs-admin-ingress-nginx
+
+Accessible interfaces through ingress:
+(External IP Address for LoadBalancer NGINX Controller can be determined through details associated with lbr-nginx-ingress-controller)
+
+1. OUD Admin REST:
+   Port: http/https
+
+2. OUD Data REST:
+   Port: http/https
+
+3. OUD Data SCIM:
+   Port: http/https
+
+4. OUD LDAP/LDAPS:
+   Port: ldap/ldaps
+
+5. OUD Admin LDAPS:
+   Port: ldaps
+
+Please refer to README.md from Helm Chart to find more details about accessing interfaces and configuration parameters.
+

Verify the OUD deployment

+

Run the following command to verify the OUD deployment:

+
$ kubectl --namespace <namespace> get pod,service,secret,pv,pvc,ingress -o wide
+

For example:

+
$ kubectl --namespace oudns get pod,service,secret,pv,pvc,ingress -o wide
+

The output will look similar to the following:

+
NAME                                  READY   STATUS      RESTARTS   AGE     IP             NODE            NOMINATED NODE   READINESS GATES
+pod/oud-ds-rs-0                       1/1     Running     0          14m     10.244.1.180   <Worker Node>   <none>           <none>
+pod/oud-ds-rs-1                       1/1     Running     0          8m26s   10.244.1.181   <Worker Node>   <none>           <none>
+pod/oud-ds-rs-2                       0/1     Running     0          2m24s   10.244.1.182   <Worker Node>   <none>           <none>
+pod/oud-pod-cron-job-27586680-p5d8q   0/1     Completed   0          50s     10.244.1.183   <Worker Node>   <none>           <none>
+
+NAME                          TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                                                          AGE   SELECTOR
+service/oud-ds-rs             ClusterIP   None             <none>        1444/TCP,1888/TCP,1389/TCP,1636/TCP,1080/TCP,1081/TCP,1898/TCP   14m   app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs
+service/oud-ds-rs-0           ClusterIP   None             <none>        1444/TCP,1888/TCP,1898/TCP                                       14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-0
+service/oud-ds-rs-1           ClusterIP   None             <none>        1444/TCP,1888/TCP,1898/TCP                                       14m   app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-1
+service/oud-ds-rs-2           ClusterIP   None             <none>        1444/TCP,1888/TCP,1898/TCP                                       14m   app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-2
+service/oud-ds-rs-http-0      ClusterIP   10.104.112.93    <none>        1080/TCP,1081/TCP                                                14m   app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-0
+service/oud-ds-rs-http-1      ClusterIP   10.103.105.70    <none>        1080/TCP,1081/TCP                                                14m   app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-1
+service/oud-ds-rs-http-2      ClusterIP   10.110.160.107   <none>        1080/TCP,1081/TCP                                                14m   app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-2
+service/oud-ds-rs-lbr-admin   ClusterIP   10.99.238.222    <none>        1888/TCP,1444/TCP                                                14m   app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs
+service/oud-ds-rs-lbr-http    ClusterIP   10.101.250.196   <none>        1080/TCP,1081/TCP                                                14m   app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs
+service/oud-ds-rs-lbr-ldap    ClusterIP   10.104.149.90    <none>        1389/TCP,1636/TCP                                                14m   app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs
+service/oud-ds-rs-ldap-0      ClusterIP   10.109.255.221   <none>        1389/TCP,1636/TCP                                                14m   app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-0
+service/oud-ds-rs-ldap-1      ClusterIP   10.111.135.142   <none>        1389/TCP,1636/TCP                                                14m   app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-1
+service/oud-ds-rs-ldap-2      ClusterIP   10.100.8.145     <none>        1389/TCP,1636/TCP                                                14m   app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-2
+
+NAME                                     TYPE                             DATA   AGE
+secret/dockercred                        kubernetes.io/dockerconfigjson   1      4h24m
+secret/orclcred                          kubernetes.io/dockerconfigjson   1      14m
+secret/oud-ds-rs-creds                   opaque                           8      14m
+secret/oud-ds-rs-tls-cert                kubernetes.io/tls                2      14m
+secret/sh.helm.release.v1.oud-ds-rs.v1   helm.sh/release.v1               1      14m
+
+
+NAME                                 CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                       STORAGECLASS        REASON   AGE    VOLUMEMODE
+persistentvolume/oud-ds-rs-pv        20Gi       RWX            Delete           Bound    oudns/oud-ds-rs-pvc         manual                       14m    Filesystem
+
+NAME                                  STATUS   VOLUME         CAPACITY   ACCESS MODES   STORAGECLASS   AGE   VOLUMEMODE
+persistentvolumeclaim/oud-ds-rs-pvc   Bound    oud-ds-rs-pv   20Gi       RWX            manual         14m   Filesystem
+
+NAME                                                      CLASS    HOSTS                                                               ADDRESS   PORTS     AGE
+ingress.networking.k8s.io/oud-ds-rs-admin-ingress-nginx   <none>   oud-ds-rs-admin-0,oud-ds-rs-admin-0,oud-ds-rs-admin-1 + 3 more...             80, 443   14m
+ingress.networking.k8s.io/oud-ds-rs-http-ingress-nginx    <none>   oud-ds-rs-http-0,oud-ds-rs-http-1,oud-ds-rs-http-2 + 3 more...                80, 443   14m
+
+

Note: If you are using block storage you will see slightly different entries for PV and PVC, for example:

+
NAME                                                  CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS        CLAIM                            STORAGECLASS                        REASON   AGE   VOLUMEMODE
+persistentvolume/ocid1.volume.oc1.iad.<unique_ID>     50Gi       RWO            Delete           Bound         oudns/oud-ds-rs-pv-oud-ds-rs-2   oud-sc                                       60m   Filesystem
+persistentvolume/ocid1.volume.oc1.iad.<unique_ID>     50Gi       RWO            Delete           Bound         oudns/oud-ds-rs-pv-oud-ds-rs-1   oud-sc                                       67m   Filesystem
+persistentvolume/ocid1.volume.oc1.iad.<unique_ID>     50Gi       RWO            Delete           Bound         oudns/oud-ds-rs-pv-oud-ds-rs-3   oud-sc                                       45m   Filesystem
+
+NAME                                             STATUS   VOLUME                             CAPACITY   ACCESS MODES   STORAGECLASS   AGE   VOLUMEMODE
+persistentvolumeclaim/oud-ds-rs-pv-oud-ds-rs-1   Bound    ocid1.volume.oc1.iad.<unique_ID>   50Gi       RWO            oud-sc         67m   Filesystem
+persistentvolumeclaim/oud-ds-rs-pv-oud-ds-rs-2   Bound    ocid1.volume.oc1.iad.<unique_ID>   50Gi       RWO            oud-sc         60m   Filesystem
+persistentvolumeclaim/oud-ds-rs-pv-oud-ds-rs-3   Bound    ocid1.volume.oc1.iad.<unique_ID>   50Gi       RWO            oud-sc         45m   Filesystem
+

Note: Initially pod/oud-ds-rs-0 will appear with a STATUS of 0/1 and it will take approximately 5 minutes before OUD is started (1/1). Once pod/oud-ds-rs-0 has a STATUS of 1/1, pod/oud-ds-rs-1 will appear with a STATUS of 0/1. Once pod/oud-ds-rs-1 is started (1/1), pod/oud-ds-rs-2 will appear. It will take around 15 minutes for all the pods to fully started.

+

While the oud-ds-rs pods have a STATUS of 0/1 the pod is running but OUD server associated with it is currently starting. While the pod is starting you can check the startup status in the pod logs, by running the following command:

+
$ kubectl logs <pod> -n oudns
+

For example:

+
$ kubectl logs oud-ds-rs-0 -n oudns
+

Note : If the OUD deployment fails additionally refer to Troubleshooting for instructions on how describe the failing pod(s). +Once the problem is identified follow Undeploy an OUD deployment to clean down the deployment before deploying again.

+

Kubernetes Objects

+

Kubernetes objects created by the Helm chart are detailed in the table below:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeNameExample NamePurpose
Service Account<deployment/release name>oud-ds-rsKubernetes Service Account for the Helm Chart deployment
Secret<deployment/release name>-credsoud-ds-rs-credsSecret object for Oracle Unified Directory related critical values like passwords
Persistent Volume<deployment/release name>-pvoud-ds-rs-pvPersistent Volume for user_projects mount.
Persistent Volume Claim<deployment/release name>-pvcoud-ds-rs-pvcPersistent Volume Claim for user_projects mount.
Persistent Volume<deployment/release name>-pv-configoud-ds-rs-pv-configPersistent Volume for mounting volume in containers for configuration files like ldif, schema, jks, java.security, etc.
Persistent Volume Claim<deployment/release name>-pvc-configoud-ds-rs-pvc-configPersistent Volume Claim for mounting volume in containers for configuration files like ldif, schema, jks, java.security, etc.
Pod<deployment/release name>-0oud-ds-rs-0Pod/Container for base Oracle Unified Directory Instance which would be populated first with base configuration (like number of sample entries)
Pod<deployment/release name>-Noud-ds-rs-1, oud-ds-rs-2, …Pod(s)/Container(s) for Oracle Unified Directory Instances - each would have replication enabled against base Oracle Unified Directory instance <deployment/release name>-0
Service<deployment/release name>-0oud-ds-rs-0Service for LDAPS Admin, REST Admin and Replication interfaces from base Oracle Unified Directory instance <deployment/release name>-0
Service<deployment/release name>-http-0oud-ds-rs-http-0Service for HTTP and HTTPS interfaces from base Oracle Unified Directory instance <deployment/release name>-0
Service<deployment/release name>-ldap-0oud-ds-rs-ldap-0Service for LDAP and LDAPS interfaces from base Oracle Unified Directory instance <deployment/release name>-0
Service<deployment/release name>-Noud-ds-rs-1, oud-ds-rs-2, …Service(s) for LDAPS Admin, REST Admin and Replication interfaces from base Oracle Unified Directory instance <deployment/release name>-N
Service<deployment/release name>-http-Noud-ds-rs-http-1, oud-ds-rs-http-2, …Service(s) for HTTP and HTTPS interfaces from base Oracle Unified Directory instance <deployment/release name>-N
Service<deployment/release name>-ldap-Noud-ds-rs-ldap-1, oud-ds-rs-ldap-2, …Service(s) for LDAP and LDAPS interfaces from base Oracle Unified Directory instance <deployment/release name>-N
Service<deployment/release name>-lbr-adminoud-ds-rs-lbr-adminService for LDAPS Admin, REST Admin and Replication interfaces from all Oracle Unified Directory instances
Service<deployment/release name>-lbr-httpoud-ds-rs-lbr-httpService for HTTP and HTTPS interfaces from all Oracle Unified Directory instances
Service<deployment/release name>-lbr-ldapoud-ds-rs-lbr-ldapService for LDAP and LDAPS interfaces from all Oracle Unified Directory instances
Ingress<deployment/release name>-admin-ingress-nginxoud-ds-rs-admin-ingress-nginxIngress Rules for HTTP Admin interfaces.
Ingress<deployment/release name>-http-ingress-nginxoud-ds-rs-http-ingress-nginxIngress Rules for HTTP (Data/REST) interfaces.
+
    +
  • In the table above the ‘Example Name’ for each Object is based on the value ‘oud-ds-rs’ as deployment/release name for the Helm chart installation.
  • +
+

Verify the OUD replication

+

Once all the PODs created are visible as READY (i.e. 1/1), you can verify your replication across multiple Oracle Unified Directory instances.

+
    +
  1. +

    To verify the replication group, connect to the container and issue an OUD administration command to show the details. The name of the container can be found by issuing the following:

    +
    $ kubectl get pods -n <namespace> -o jsonpath='{.items[*].spec.containers[*].name}'
    +

    For example:

    +
    $ kubectl get pods -n oudns -o jsonpath='{.items[*].spec.containers[*].name}'
    +

    The output will look similar to the following:

    +
    oud-ds-rs oud-ds-rs oud-ds-rs
    +

    Once you have the container name you can verify the replication status in the following ways:

    +
      +
    • Run dresplication inside the pod
    • +
    • Using kubectl commands
    • +
    +
  2. +
+

Run dresplication inside the pod

+
    +
  1. +

    Run the following command to create a bash shell in the pod:

    +
    $ kubectl --namespace <namespace> exec -it -c <containername> <podname> -- bash
    +

    For example:

    +
    $ kubectl --namespace oudns exec -it -c oud-ds-rs oud-ds-rs-0 -- bash
    +

    This will take you into the pod:

    +
    [oracle@oud-ds-rs-0 oracle]$
    +
  2. +
  3. +

    From the prompt, use the dsreplication command to check the status of your replication group:

    +
    $ cd /u01/oracle/user_projects/oud-ds-rs-0/OUD/bin
    +
    +$ ./dsreplication status --trustAll \
    +--hostname oud-ds-rs-0 --port 1444 --adminUID admin \
    +--dataToDisplay compat-view --dataToDisplay rs-connections
    +

    The output will look similar to the following. Enter credentials where prompted:

    +
    >>>> Specify Oracle Unified Directory LDAP connection parameters
    +    
    +Password for user 'admin':
    +    
    +Establishing connections and reading configuration ..... Done.
    +    
    +dc=example,dc=com - Replication Enabled
    +=======================================
    +    
    +Server               : Entries : M.C. [1] : A.O.M.C. [2] : Port [3] : Encryption [4] : Trust [5] : U.C. [6] : Status [7] : ChangeLog [8] : Group ID [9] : Connected To [10]
    +---------------------:---------:----------:--------------:----------:----------------:-----------:----------:------------:---------------:--------------:-------------------------------
    +oud-ds-rs-0:1444     : 202     : 0        : 0            : 1898     : Disabled       : Trusted   : --       : Normal     : Enabled       : 1            : oud-ds-rs-0:1898
    +                     :         :          :              :          :                :           :          :            :               :              : (GID=1)
    +oud-ds-rs-1:1444     : 202     : 0        : 0            : 1898     : Disabled       : Trusted   : --       : Normal     : Enabled       : 1            : oud-ds-rs-1:1898
    +                     :         :          :              :          :                :           :          :            :               :              : (GID=1)
    +oud-ds-rs-2:1444     : 202     : 0        : 0            : 1898     : Disabled       : Trusted   : --       : Normal     : Enabled       : 1            : oud-ds-rs-2:1898
    +                     :         :          :              :          :                :           :          :            :               :              : (GID=1)
    +    
    +Replication Server [11]        : RS #1 : RS #2 : RS #3
    +-------------------------------:-------:-------:------
    +oud-ds-rs-0:1898               : --    : Yes   : Yes
    +(#1)                           :       :       :
    +oud-ds-rs-1:1898               : Yes   : --    : Yes
    +(#2)                           :       :       :
    +oud-ds-rs-2:1898               : Yes   : Yes   : --
    +(#3)                           :       :       :
    +    
    +[1] The number of changes that are still missing on this element (and that have been applied to at least one other server).
    +[2] Age of oldest missing change: the age (in seconds) of the oldest change that has not yet arrived on this element.
    +[3] The replication port used to communicate between the servers whose contents are being replicated.
    +[4] Whether the replication communication initiated by this element is encrypted or not.
    +[5] Whether the directory server is trusted or not. Updates coming from an untrusted server are discarded and not propagated.
    +[6] The number of untrusted changes. These are changes generated on this server while it is untrusted. Those changes are not propagated to the rest of the topology but are effective on the untrusted server.
    +[7] The status of the replication on this element.
    +[8] Whether the external change log is enabled for the base DN on this server or not.
    +[9] The ID of the replication group to which the server belongs.
    +[10] The replication server this server is connected to with its group ID between brackets.
    +[11] This table represents the connections between the replication servers.  The headers of the columns use a number as identifier for each replication server.  See the values of the first column to identify the corresponding replication server for each number.
    +
  4. +
  5. +

    Type exit to exit the pod.

    +
  6. +
+

Using kubectl commands

+
    +
  1. +

    The dsreplication status command can be invoked using the following kubectl command:

    +
    $ kubectl --namespace <namespace> exec -it -c <containername> <podname> -- \
    +/u01/oracle/user_projects/<OUD Instance/Pod Name>/OUD/bin/dsreplication status \
    +--trustAll --hostname <OUD Instance/Pod Name> --port 1444 --adminUID admin \
    +--dataToDisplay compat-view --dataToDisplay rs-connections
    +

    For example:

    +
    $ kubectl --namespace oudns exec -it -c oud-ds-rs oud-ds-rs-0 -- \
    +/u01/oracle/user_projects/oud-ds-rs-0/OUD/bin/dsreplication status \
    +--trustAll --hostname oud-ds-rs-0 --port 1444 --adminUID admin \
    +--dataToDisplay compat-view --dataToDisplay rs-connections
    +

    The output will be the same as per Run dresplication inside the pod.

    +
  2. +
+

Verify OUD assured replication status

+

Note: This section only needs to be followed if you enabled assured replication as per Enabling Assured Replication (Optional).

+
    +
  1. +

    Run the following command to create a bash shell in the pod:

    +
    $ kubectl --namespace <namespace> exec -it -c <containername> <podname> -- bash
    +

    For example:

    +
    $ kubectl --namespace oudns exec -it -c oud-ds-rs oud-ds-rs-0 -- bash
    +

    This will take you into the pod:

    +
    [oracle@oud-ds-rs-0 oracle]$
    +
  2. +
  3. +

    At the prompt, enter the following commands:

    +
    $ echo $bindPassword1 > /tmp/pwd.txt
    +$ /u01/oracle/user_projects/${OUD_INSTANCE_NAME}/OUD/bin/dsconfig --no-prompt --hostname ${OUD_INSTANCE_NAME} --port ${adminConnectorPort} --bindDN "${rootUserDN}" --bindPasswordFile /tmp/pwd.txt  --trustAll get-replication-domain-prop --domain-name ${baseDN} --advanced --property assured-type --property assured-sd-level --property assured-timeout --provider-name "Multimaster Synchronization"
    +

    The output will look similar to the following:

    +
    Property         : Value(s)
    +-----------------:----------
    +assured-sd-level : 2
    +assured-timeout  : 5 s
    +assured-type     : safe-read
    +
  4. +
+

Verify the cronjob

+
    +
  1. +

    Run the following command to make sure the cronjob is created:

    +
    $ kubectl get cronjob -n <namespace>
    +

    For example:

    +
    $ kubectl get cronjob -n oudns
    +

    The output will look similar to the following:

    +
    NAME               SCHEDULE       SUSPEND   ACTIVE   LAST SCHEDULE   AGE
    +oud-pod-cron-job   */30 * * * *   False     0        5m18s           19m
    +
  2. +
  3. +

    Run the following command to make sure the job(s) is created:

    +
    $ kubectl get job -n <namespace> -o wide
    +

    For example:

    +
    $ kubectl get job -n oudns -o wide
    +

    The output will look similar to the following:

    +
    NAME                        COMPLETIONS   DURATION   AGE     CONTAINERS        IMAGES                   SELECTOR
    +oud-pod-cron-job-27586680   1/1           1s         5m36s   cron-kubectl      bitnami/kubectl:1.26.6   controller-uid=700ab9f7-6094-488a-854d-f1b914de5f61
    +
  4. +
+

Disabling the cronjob

+

If you need to disable the job, for example if maintenance needs to be performed on the node, you can disable the job as follows:

+
    +
  1. +

    Run the following command to edit the cronjob:

    +
    $ kubectl edit cronjob pod-cron-job -n <namespace>
    +

    For example:

    +
    $ kubectl edit cronjob oud-pod-cron-job -n oudns
    +

    Note: This opens an edit session for the cronjob where parameters can be changed using standard vi commands.

    +
  2. +
  3. +

    In the edit session search for suspend and change the vaule from false to true:

    +
    ...
    +schedule: '*/30 * * * *'
    +successfulJobsHistoryLimit: 3
    +suspend: true
    +...
    +
  4. +
  5. +

    Save the file and exit (wq!).

    +
  6. +
  7. +

    Run the following to make sure the cronjob is suspended:

    +
    $ kubectl get cronjob -n <namespace>
    +

    For example:

    +
    $ kubectl get cronjob -n oudns
    +

    The output will look similar to the following:

    +
    NAME               SCHEDULE       SUSPEND   ACTIVE   LAST SCHEDULE   AGE
    +oud-pod-cron-job   */30 * * * *   True      0        7m47s           21m
    +
  8. +
  9. +

    To enable the cronjob again, repeat the above steps and set suspend to false.

    +
  10. +
+

Ingress Configuration

+

With an OUD instance now deployed you are now ready to configure an ingress controller to direct traffic to OUD as per Configure an ingress for an OUD.

+

Undeploy an OUD deployment

+

Delete the OUD deployment

+
    +
  1. +

    Find the deployment release name:

    +
    $ helm --namespace <namespace> list
    +

    For example:

    +
    $ helm --namespace oudns list
    +

    The output will look similar to the following:

    +
    NAME            NAMESPACE       REVISION        UPDATED   STATUS          CHART           APP VERSION
    +oud-ds-rs       oudns           1               <DATE>    deployed        oud-ds-rs-0.2   12.2.1.4.0
    +
  2. +
  3. +

    Delete the deployment using the following command:

    +
    $ helm uninstall --namespace <namespace> <release>
    +

    For example:

    +
    $ helm uninstall --namespace oudns oud-ds-rs
    +release "oud-ds-rs" uninstalled
    +
  4. +
  5. +

    Run the following command to view the status:

    +
    $ kubectl --namespace oudns get pod,service,secret,pv,pvc,ingress -o wide
    +

    Initially the pods and persistent volume (PV) and persistent volume claim (PVC) will move to a Terminating status:

    +
    NAME              READY   STATUS        RESTARTS   AGE   IP             NODE            NOMINATED NODE   READINESS GATES
    +
    +pod/oud-ds-rs-0   1/1     Terminating   0          24m   10.244.1.180   <Worker Node>   <none>           <none>
    +pod/oud-ds-rs-1   1/1     Terminating   0          18m   10.244.1.181   <Worker Node>   <none>           <none>
    +pod/oud-ds-rs-2   1/1     Terminating   0          12m   10.244.1.182   <Worker Node>   <none>           <none>
    +
    +NAME                         TYPE                                  DATA   AGE
    +secret/default-token-msmmd   kubernetes.io/service-account-token   3      3d20h
    +secret/dockercred            kubernetes.io/dockerconfigjson        1      3d20h
    +secret/orclcred              kubernetes.io/dockerconfigjson        1      3d20h
    +
    +NAME                                 CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS        CLAIM                       STORAGECLASS        REASON   AGE    VOLUMEMODE
    +persistentvolume/oud-ds-rs-pv        20Gi       RWX            Delete           Terminating   oudns/oud-ds-rs-pvc         manual                       24m    Filesystem
    +
    +NAME                                  STATUS        VOLUME         CAPACITY   ACCESS MODES   STORAGECLASS   AGE   VOLUMEMODE
    +persistentvolumeclaim/oud-ds-rs-pvc   Terminating   oud-ds-rs-pv   20Gi       RWX            manual         24m   Filesystem
    +

    Run the command again until the pods, PV and PVC disappear.

    +
  6. +
  7. +

    If the PV or PVC’s don’t delete, remove them manually:

    +
    $ kubectl delete pvc oud-ds-rs-pvc -n oudns
    +$ kubectl delete pv oud-ds-rs-pv -n oudns
    +

    Note: If using blockstorage, you will see a PV and PVC for each pod. Delete all of the PVC’s and PV’s using the above commands.

    +
  8. +
+

Delete the persistent volume contents

+

Note: The steps below are not relevant for block storage.

+
    +
  1. +

    Delete the contents of the oud_user_projects directory in the persistent volume:

    +
    $ cd <persistent_volume>/oud_user_projects
    +$ rm -rf *
    +

    For example:

    +
    $ cd /scratch/shared/oud_user_projects
    +$ rm -rf *
    +
  2. +
+

Appendix A: Configuration Parameters

+

The following table lists the configurable parameters of the oud-ds-rs chart and their default values.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterDescriptionDefault Value
replicaCountNumber of DS+RS instances/pods/services to be created with replication enabled against a base Oracle Unified Directory instance/pod.3
restartPolicyNamerestartPolicy to be configured for each POD containing Oracle Unified Directory instanceOnFailure
image.repositoryOracle Unified Directory Image Registry/Repository and name. Based on this, image parameter would be configured for Oracle Unified Directory pods/containersoracle/oud
image.tagOracle Unified Directory Image Tag. Based on this, image parameter would be configured for Oracle Unified Directory pods/containers12.2.1.4.0
image.pullPolicypolicy to pull the imageIfnotPresent
imagePullSecrets.namename of Secret resource containing private registry credentialsregcred
nameOverrideoverride the fullname with this name
fullnameOverrideOverrides the fullname with the provided string
serviceAccount.createSpecifies whether a service account should be createdtrue
serviceAccount.nameIf not set and create is true, a name is generated using the fullname templateoud-ds-rs-< fullname >-token-< randomalphanum >
podSecurityContextSecurity context policies to add to the controller pod
securityContextSecurity context policies to add by default
service.typetype of controller service to createClusterIP
nodeSelectornode labels for pod assignment
tolerationsnode taints to tolerate
affinitynode/pod affinities
ingress.enabledtrue
ingress.typeSupported value: nginxnginx
ingress.nginx.http.hostHostname to be used with Ingress Rules. If not set, hostname would be configured according to fullname. Hosts would be configured as < fullname >-http.< domain >, < fullname >-http-0.< domain >, < fullname >-http-1.< domain >, etc.
ingress.nginx.http.domainDomain name to be used with Ingress Rules. In ingress rules, hosts would be configured as < host >.< domain >, < host >-0.< domain >, < host >-1.< domain >, etc.
ingress.nginx.http.backendPorthttp
ingress.nginx.http.nginxAnnotations{ kubernetes.io/ingress.class: “nginx" }
ingress.nginx.admin.hostHostname to be used with Ingress Rules. If not set, hostname would be configured according to fullname. Hosts would be configured as < fullname >-admin.< domain >, < fullname >-admin-0.< domain >, < fullname >-admin-1.< domain >, etc.
ingress.nginx.admin.domainDomain name to be used with Ingress Rules. In ingress rules, hosts would be configured as < host >.< domain >, < host >-0.< domain >, < host >-1.< domain >, etc.
ingress.nginx.admin.nginxAnnotations{ kubernetes.io/ingress.class: “nginx” nginx.ingress.kubernetes.io/backend-protocol: “https"}
ingress.ingress.tlsSecretSecret name to use an already created TLS Secret. If such secret is not provided, one would be created with name < fullname >-tls-cert. If the TLS Secret is in different namespace, name can be mentioned as < namespace >/< tlsSecretName >
ingress.certCNSubject’s common name (cn) for SelfSigned Cert.< fullname >
ingress.certValidityDaysValidity of Self-Signed Cert in days365
secret.enabledIf enabled it will use the secret created with base64 encoding. if value is false, secret would not be used and input values (through –set, –values, etc.) would be used while creation of pods.true
secret.namesecret name to use an already created Secretoud-ds-rs-< fullname >-creds
secret.typeSpecifies the type of the secretOpaque
persistence.enabledIf enabled, it will use the persistent volume. if value is false, PV and PVC would not be used and pods would be using the default emptyDir mount volume.true
persistence.pvnamepvname to use an already created Persistent Volume , If blank will use the default nameoud-ds-rs-< fullname >-pv
persistence.pvcnamepvcname to use an already created Persistent Volume Claim , If blank will use default nameoud-ds-rs-< fullname >-pvc
persistence.typesupported values: either filesystem or networkstorage or blockstorage or customfilesystem
persistence.filesystem.hostPath.pathThe path location mentioned should be created and accessible from the local host provided with necessary privileges for the user./scratch/shared/oud_user_projects
persistence.networkstorage.nfs.pathPath of NFS Share location/scratch/shared/oud_user_projects
persistence.networkstorage.nfs.serverIP or hostname of NFS Server0.0.0.0
persistence.custom.*Based on values/data, YAML content would be included in PersistenceVolume Object
persistence.accessModeSpecifies the access mode of the location provided. ReadWriteMany for Filesystem/NFS, ReadWriteOnce for block storage.ReadWriteMany
persistence.sizeSpecifies the size of the storage10Gi
persistence.storageClassCreateif true, it will create the storageclass. if value is false, please provide existing storage class (storageClass) to be used.empty
persistence.storageClassSpecifies the storageclass of the persistence volume.empty
persistence.provisionerIf storageClassCreate is true, provide the custom provisioner if any .kubernetes.io/is-default-class
persistence.annotationsspecifies any annotations that will be used{ }
configVolume.enabledIf enabled, it will use the persistent volume. If value is false, PV and PVC would not be used and pods would be using the default emptyDir mount volume.true
configVolume.mountPathIf enabled, it will use the persistent volume. If value is false, PV and PVC would not be used and there would not be any mount point available for configfalse
configVolume.pvnamepvname to use an already created Persistent Volume , If blank will use the default nameoud-ds-rs-< fullname >-pv-config
configVolume.pvcnamepvcname to use an already created Persistent Volume Claim , If blank will use default nameoud-ds-rs-< fullname >-pvc-config
configVolume.typesupported values: either filesystem or networkstorage or customfilesystem
configVolume.filesystem.hostPath.pathThe path location mentioned should be created and accessible from the local host provided with necessary privileges for the user./scratch/shared/oud_user_projects
configVolume.networkstorage.nfs.pathPath of NFS Share location/scratch/shared/oud_config
configVolume.networkstorage.nfs.serverIP or hostname of NFS Server0.0.0.0
configVolume.custom.*Based on values/data, YAML content would be included in PersistenceVolume Object
configVolume.accessModeSpecifies the access mode of the location providedReadWriteMany
configVolume.sizeSpecifies the size of the storage10Gi
configVolume.storageClassSpecifies the storageclass of the persistence volume.empty
configVolume.annotationsSpecifies any annotations that will be used{ }
configVolume.storageClassCreateIf true, it will create the storageclass. if value is false, provide existing storage class (storageClass) to be used.true
configVolume.provisionerIf configVolume.storageClassCreate is true, please provide the custom provisioner if any.kubernetes.io/is-default-class
oudPorts.adminldapsPort on which Oracle Unified Directory Instance in the container should listen for Administration Communication over LDAPS Protocol1444
oudPorts.adminhttpsPort on which Oracle Unified Directory Instance in the container should listen for Administration Communication over HTTPS Protocol.1888
oudPorts.ldapPort on which Oracle Unified Directory Instance in the container should listen for LDAP Communication.1389
oudPorts.ldapsPort on which Oracle Unified Directory Instance in the container should listen for LDAPS Communication.1636
oudPorts.httpPort on which Oracle Unified Directory Instance in the container should listen for HTTP Communication.1080
oudPorts.httpsPort on which Oracle Unified Directory Instance in the container should listen for HTTPS Communication.1081
oudPorts.replicationPort value to be used while setting up replication server.1898
oudConfig.baseDNBaseDN for Oracle Unified Directory Instancesdc=example,dc=com
oudConfig.rootUserDNRoot User DN for Oracle Unified Directory Instancescn=Directory Manager
oudConfig.rootUserPasswordPassword for Root User DNRandomAlphanum
oudConfig.sampleDataTo specify that the database should be populated with the specified number of sample entries.0
oudConfig.sleepBeforeConfigBased on the value for this parameter, initialization/configuration of each Oracle Unified Directory replica would be delayed.120
oudConfig.adminUIDAdminUID to be configured with each replicated Oracle Unified Directory instanceadmin
oudConfig.adminPasswordPassword for AdminUID. If the value is not passed, value of rootUserPassword would be used as password for AdminUID.rootUserPassword
baseOUD.envVarsConfigMapReference to ConfigMap which can contain additional environment variables to be passed on to POD for Base Oracle Unified Directory Instance. Following are the environment variables which would not be honored from the ConfigMap. instanceType, sleepBeforeConfig, OUD_INSTANCE_NAME, hostname, baseDN, rootUserDN, rootUserPassword, adminConnectorPort, httpAdminConnectorPort, ldapPort, ldapsPort, httpPort, httpsPort, replicationPort, sampleData.-
baseOUD.envVarsEnvironment variables in Yaml Map format. This is helpful when its requried to pass environment variables through –values file. List of env variables which would not be honored from envVars map is same as list of env var names mentioned for envVarsConfigMap. For a full list of environment variables, see Appendix B: Environment Variables.-
replOUD.envVarsConfigMapReference to ConfigMap which can contain additional environment variables to be passed on to PODs for Replicated Oracle Unified Directory Instances. Following are the environment variables which would not be honored from the ConfigMap. instanceType, sleepBeforeConfig, OUD_INSTANCE_NAME, hostname, baseDN, rootUserDN, rootUserPassword, adminConnectorPort, httpAdminConnectorPort, ldapPort, ldapsPort, httpPort, httpsPort, replicationPort, sampleData, sourceHost, sourceServerPorts, sourceAdminConnectorPort, sourceReplicationPort, dsreplication_1, dsreplication_2, dsreplication_3, dsreplication_4, post_dsreplication_dsconfig_1, post_dsreplication_dsconfig_2-
replOUD.envVarsEnvironment variables in Yaml Map format. This is helpful when its required to pass environment variables through –values file. List of env variables which would not be honored from envVars map is same as list of env var names mentioned for envVarsConfigMap. For a full list of environment variables, see Appendix B: Environment Variables.-
podManagementPolicyDefines the policy for pod management within the statefulset. Typical values are OrderedReady/ParallelOrderedReady
updateStrategyAllows you to configure and disable automated rolling updates for containers, labels, resource request/limits, and annotations for the Pods in a StatefulSet. Typical values are OnDelete/RollingUpdateRollingUpdate
busybox.imagebusy box image name. Used for initcontainersbusybox
oudConfig.cleanupbeforeStartUsed to remove the individual pod directories during restart. Recommended value is false. Note: Do not change the default value (false) as it will delete the existing data and clone it from base pod again.false
oudConfig.disablereplicationbeforeStopThis parameter is used to disable replication when a pod is restarted. Recommended value is false. Note Do not change the default value (false), as changing the value will result in an issue where the pod won’t join the replication topology after a restart.false
oudConfig.resources.requests.memoryThis parameter is used to set the memory request for the OUD pod4Gi
oudConfig.resources.requests.cpuThis parameter is used to set the cpu request for the OUD pod0.5
oudConfig.resources.limits.memoryThis parameter is used to set the memory limit for the OUD pod4Gi
oudConfig.resources.limits.cpuThis parameter is used to set the cpu limit for the OUD pod1
replOUD.groupIdGroup ID to be used/configured with each Oracle Unified Directory instance in replicated topology.1
service.lbrtypeType of load balancer Service to be created for admin, http,ldap services. Values allowed: ClusterIP/NodePortClusterIP
oudPorts.nodePorts.adminldapsPublic port on which the OUD instance in the container should listen for administration communication over LDAPS Protocol. The port number should be between 30000-32767. No duplicate values are allowed. Note: Set only if service.lbrtype is set as NodePort. If left blank then k8s will assign random ports in between 30000 and 32767.
oudPorts.nodePorts.adminhttpsPublic port on which the OUD instance in the container should listen for administration communication over HTTPS Protocol. The port number should be between 30000-32767. No duplicate values are allowed. Note: Set only if service.lbrtype is set as NodePort. If left blank then k8s will assign random ports in between 30000 and 32767.
oudPorts.nodePorts.ldapPublic port on which the OUD instance in the container should listen for LDAP communication. The port number should be between 30000-32767. No duplicate values are allowed. Note: Set only if service.lbrtype is set as NodePort. If left blank then k8s will assign random ports in between 30000 and 32767.
oudPorts.nodePorts.ldapsPublic port on which the OUD instance in the container should listen for LDAPS communication. The port number should be between 30000-32767. No duplicate values are allowed. Note: Set only if service.lbrtype is set as NodePort. If left blank then k8s will assign random ports in between 30000 and 32767.
oudPorts.nodePorts.httpPublic port on which the OUD instance in the container should listen for HTTP communication. The port number should be between 30000-32767. No duplicate values are allowed. Note: Set only if service.lbrtype is set as NodePort. If left blank then k8s will assign random ports in between 30000 and 32767.
oudPorts.nodePorts.httpsPublic port on which the OUD instance in the container should listen for HTTPS communication. The port number should be between 30000-32767. No duplicate values are allowed. Note: Set only if service.lbrtype is set as NodePort. If left blank then k8s will assign random ports in between 30000 and 32767.
oudConfig.integrationSpecifies which Oracle components the server can be integrated with. It is recommended to choose the option covering your minimal requirements. Allowed values: no-integration (no integration), basic (Directory Integration Platform), generic (Directory Integration Platform, Database Net Services and E-Business Suite integration), eus (Directory Integration Platform, Database Net Services, E-Business Suite and Enterprise User Security integration)no-integration
elk.logStashImageThe version of logstash you want to installlogstash:8.3.1
elk.sslenabledIf SSL is enabled for ELK set the value to true, or if NON-SSL set to false. This value must be lowercaseTRUE
elk.eshostsThe URL for sending logs to Elasticsearch. HTTP if NON-SSL is usedhttps://elasticsearch.example.com:9200
elk.esuserThe name of the user for logstash to access Elasticsearchlogstash_internal
elk.espasswordThe password for ELK_USERpassword
elk.esapikeyThe API key detailsapikey
elk.esindexThe log nameoudlogs-00001
elk.imagePullSecretssecret to be used for pulling logstash imagedockercred
+

Appendix B: Environment Variables

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Environment VariableDescriptionDefault Value
ldapPortPort on which the Oracle Unified Directory instance in the container should listen for LDAP communication. Use ‘disabled’ if you do not want to enable it.1389
ldapsPortPort on which the Oracle Unified Directory instance in the container should listen for LDAPS communication. Use ‘disabled’ if you do not want to enable it.1636
rootUserDNDN for the Oracle Unified Directory instance root user.——
rootUserPasswordPassword for the Oracle Unified Directory instance root user.——
adminConnectorPortPort on which the Oracle Unified Directory instance in the container should listen for administration communication over LDAPS. Use ‘disabled’ if you do not want to enable it. Note that at least one of the LDAP or the HTTP administration ports must be enabled.1444
httpAdminConnectorPortPort on which the Oracle Unified Directory Instance in the container should listen for Administration Communication over HTTPS Protocol. Use ‘disabled’ if you do not want to enable it. Note that at least one of the LDAP or the HTTP administration ports must be enabled.1888
httpPortPort on which the Oracle Unified Directory Instance in the container should listen for HTTP Communication. Use ‘disabled’ if you do not want to enable it.1080
httpsPortPort on which the Oracle Unified Directory Instance in the container should listen for HTTPS Communication. Use ‘disabled’ if you do not want to enable it.1081
sampleDataSpecifies the number of sample entries to populate the Oracle Unified Directory instance with on creation. If this parameter has a non-numeric value, the parameter addBaseEntry is added to the command instead of sampleData. Similarly, when the ldifFile_n parameter is specified sampleData will not be considered and ldifFile entries will be populated.0
adminUIDUser ID of the Global Administrator to use to bind to the server. This parameter is primarily used with the dsreplication command.——
adminPasswordPassword for adminUID——
bindDN1BindDN to be used while setting up replication using dsreplication to connect to First Directory/Replication Instance.——
bindPassword1Password for bindDN1——
bindDN2BindDN to be used while setting up replication using dsreplication to connect to Second Directory/Replication Instance.——
bindPassword2Password for bindDN2——
replicationPortPort value to be used while setting up a replication server. This variable is used to substitute values in dsreplication parameters.1898
sourceHostValue for the hostname to be used while setting up a replication server. This variable is used to substitute values in dsreplication parameters.——
initializeFromHostValue for the hostname to be used while initializing data on a new Oracle Unified Directory instance replicated from an existing instance. This variable is used to substitute values in dsreplication parameters. It is possible to have a different value for sourceHost and initializeFromHost while setting up replication with Replication Server, sourceHost can be used for the Replication Server and initializeFromHost can be used for an existing Directory instance from which data will be initialized.$sourceHost
serverTuningValues to be used to tune JVM settings. The default value is jvm-default. If specific tuning parameters are required, they can be added using this variable.jvm-default
offlineToolsTuningValues to be used to specify the tuning for offline tools. This variable if not specified will consider jvm-default as the default or specify the complete set of values with options if wanted to set to specific tuningjvm-default
generateSelfSignedCertificateSet to “true” if the requirement is to generate a self signed certificate when creating an Oracle Unified Directory instance. If no value is provided this value takes the default, “true”. If using a certificate generated separately this value should be set to “false”.true
usePkcs11KeystoreUse a certificate in a PKCS#11 token that the replication gateway will use as servercertificate when accepting encrypted connections from the Oracle Directory Server Enterprise Edition server. Set to “true” if the requirement is to use the usePkcs11Keystore parameter when creating an Oracle Unified Directory instance. By default this parameter is not set. To use this option generateSelfSignedCertificate should be set to “false”.——
enableStartTLSEnable StartTLS to allow secure communication with the directory server by using the LDAP port. By default this parameter is not set. To use this option generateSelfSignedCertificate should be set to “false”.——
useJCEKSSpecifies the path of a JCEKS that contains a certificate that the replication gateway will use as server certificate when accepting encrypted connections from the Oracle Directory Server Enterprise Edition server. If required this should specify the keyStorePath, for example, /u01/oracle/config/keystore.——
useJavaKeystoreSpecify the path to the Java Keystore (JKS) that contains the server certificate. If required this should specify the path to the JKS, for example, /u01/oracle/config/keystore. By default this parameter is not set. To use this option generateSelfSignedCertificate should be set to “false”.——
usePkcs12keyStoreSpecify the path to the PKCS#12 keystore that contains the server certificate. If required this should specify the path, for example, /u01/oracle/config/keystore.p12. By default this parameter is not set.——
keyStorePasswordFileUse the password in the specified file to access the certificate keystore. A password is required when you specify an existing certificate (JKS, JCEKS, PKCS#11, orPKCS#12) as a server certificate. If required this should specify the path of the password file, for example, /u01/oracle/config/keystorepassword.txt. By default this parameter is not set.——
eusPasswordSchemeSet password storage scheme, if configuring Oracle Unified Directory for Enterprise User Security. Set this to a value of either “sha1” or “sha2”. By default this parameter is not set.——
jmxPortPort on which the Directory Server should listen for JMX communication. Use ‘disabled’ if you do not want to enable it.disabled
javaSecurityFileSpecify the path to the Java security file. If required this should specify the path, for example, /u01/oracle/config/new_security_file. By default this parameter is not set.——
schemaConfigFile_n‘n’ in the variable name represents a numeric value between 1 and 50. This variable is used to set the full path of LDIF files that need to be passed to the Oracle Unified Directory instance for schema configuration/extension. If required this should specify the path, for example, schemaConfigFile_1=/u01/oracle/config/00_test.ldif.——
ldifFile_n‘n’ in the variable name represents a numeric value between 1 and 50. This variable is used to set the full path of LDIF files that need to be passed to the Oracle Unified Directory instance for initial data population. If required this should specify the path, for example, ldifFile_1=/u01/oracle/config/test1.ldif.——
dsconfigBatchFile_n‘n’ in the variable name represents a numeric value between 1 and 50. This variable is used to set the full path of LDIF files that need to be passed to the Oracle Unified Directory instance for batch processing by the dsconfig command. If required this should specify the path, for example, dsconfigBatchFile_1=/u01/oracle/config/dsconfig_1.txt. When executing the dsconfig command the following values are added implicitly to the arguments contained in the batch file : ${hostname}, ${adminConnectorPort}, ${bindDN} and ${bindPasswordFile}——
dstune_n‘n’ in the variable name represents a numeric value between 1 and 50. Allows commands and options to be passed to the dstune utility as a full command.——
dsconfig_n‘n’ in the variable name represents a numeric value between 1 and 300. Each file represents a set of execution parameters for the dsconfig command. For each dsconfig execution, the following variables are added implicitly : ${hostname}, ${adminConnectorPort}, ${bindDN}, ${bindPasswordFile}.——
dsreplication_n‘n’ in the variable name represents a numeric value between 1 and 50. Each file represents a set of execution parameters for the dsreplication command. For each dsreplication execution, the following variables are added implicitly : ${hostname}, ${ldapPort}, ${ldapsPort}, ${adminConnectorPort}, ${replicationPort}, ${sourceHost}, ${initializeFromHost}, and ${baseDN}. Depending on the dsreplication sub-command, the following variables are added implicitly : ${bindDN1}, ${bindPasswordFile1}, ${bindDN2}, ${bindPasswordFile2}, ${adminUID}, and ${adminPasswordFile}.——
post_dsreplication_dsconfig_n‘n’ in the variable name represents a numeric value between 1 and 300. Each file represents a set of execution parameters for the dsconfig command to be run following execution of the dsreplication command. For each dsconfig execution, the following variables/values are added implicitly : –provider-name “Multimaster Synchronization”, ${hostname}, ${adminConnectorPort}, ${bindDN}, ${bindPasswordFile}.——
rebuildIndex_n‘n’ in the variable name represents a numeric value between 1 and 50. Each file represents a set of execution parameters for the rebuild-index command. For each rebuild-index execution, the following variables are added implicitly : ${hostname}, ${adminConnectorPort}, ${bindDN}, ${bindPasswordFile}, and ${baseDN}.——
manageSuffix_n‘n’ in the variable name represents a numeric value between 1 and 50. Each file represents a set of execution parameters for the manage-suffix command. For each manage-suffix execution, the following variables are added implicitly : ${hostname}, ${adminConnectorPort}, ${bindDN}, ${bindPasswordFile}.——
importLdif_n‘n’ in the variable name represents a numeric value between 1 and 50. Each file represents a set of execution parameters for the import-ldif command. For each import-ldif execution, the following variables are added implicitly : ${hostname}, ${adminConnectorPort}, ${bindDN}, ${bindPasswordFile}.——
execCmd_n‘n’ in the variable name represents a numeric value between 1 and 300. Each file represents a command to be executed in the container. For each command execution, the following variables are replaced, if present in the command : ${hostname}, ${ldapPort}, ${ldapsPort}, ${adminConnectorPort}.——
restartAfterRebuildIndexSpecifies whether to restart the server after building the index.false
restartAfterSchemaConfigSpecifies whether to restart the server after configuring the schema.false
+

Note For the following parameters above, the following statement applies:

+
    +
  • dsconfig_n
  • +
  • dsreplication_n
  • +
  • post_dsreplication_dsconfig_n
  • +
  • rebuildIndex_n
  • +
  • manageSuffix_n
  • +
  • importLdif_n
  • +
  • execCmd_n
  • +
+

If values are provided the following variables will be substituted with their values: ${hostname},${ldapPort},${ldapsPort},${adminConnectorPort},${replicationPort},${sourceHost},${initializeFromHost},${sourceAdminConnectorPort},${sourceReplicationPort},${baseDN},${rootUserDN},${adminUID},${rootPwdFile},${bindPasswordFile},${adminPwdFile},${bindPwdFile1},${bindPwdFile2}

+ + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oud/create-oud-instances/index.xml b/docs/23.4.1/idm-products/oud/create-oud-instances/index.xml new file mode 100644 index 000000000..143d89a47 --- /dev/null +++ b/docs/23.4.1/idm-products/oud/create-oud-instances/index.xml @@ -0,0 +1,14 @@ + + + + Create Oracle Unified Directory Instances on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oud/create-oud-instances/ + Recent content in Create Oracle Unified Directory Instances on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oud/index.html b/docs/23.4.1/idm-products/oud/index.html new file mode 100644 index 000000000..576b01356 --- /dev/null +++ b/docs/23.4.1/idm-products/oud/index.html @@ -0,0 +1,4141 @@ + + + + + + + + + + + + Oracle Unified Directory :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + Oracle Unified Directory +

+ + + + + + + +

Oracle Unified Directory on Kubernetes

+

Oracle supports the deployment of Oracle Unified Directory on Kubernetes. See the following sections:

+ + + + + + + + + + + + + + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oud/index.xml b/docs/23.4.1/idm-products/oud/index.xml new file mode 100644 index 000000000..638d370a2 --- /dev/null +++ b/docs/23.4.1/idm-products/oud/index.xml @@ -0,0 +1,24 @@ + + + + Oracle Unified Directory on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oud/ + Recent content in Oracle Unified Directory on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + Patch and Upgrade + /fmw-kubernetes/23.4.1/idm-products/oud/patch-and-upgrade/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oud/patch-and-upgrade/ + In this section you learn how to upgrade OUD from a previous version. Follow the section relevant to the version you are upgrading from. + Upgrading to October 23 (23.4.1) from April 23 (23.2.1) or later Upgrading to October 23 (23.4.1) from October 22 (22.4.1) or January 23 (23.1.1) Upgrading to October 23 (23.4.1) from July 22 (22.3.1) Upgrading to October 23 (23.4.1) from releases prior to July 22 (22.3.1) Upgrading Elasticsearch and Kibana Note: If on July 22 (22. + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oud/introduction/index.html b/docs/23.4.1/idm-products/oud/introduction/index.html new file mode 100644 index 000000000..256d8b10c --- /dev/null +++ b/docs/23.4.1/idm-products/oud/introduction/index.html @@ -0,0 +1,3987 @@ + + + + + + + + + + + + Introduction :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + Introduction +

+ + + + + + + +

Oracle Unified Directory provides a comprehensive Directory Solution for robust Identity Management. +Oracle Unified Directory is an all-in-one directory solution with storage, proxy, synchronization and virtualization capabilities. While unifying the approach, it provides all the services required for high-performance Enterprise and carrier-grade environments. Oracle Unified Directory ensures scalability to billions of entries, ease of installation, elastic deployments, enterprise manageability and effective monitoring.

+

This project supports deployment of Oracle Unified Directory (OUD) container images based on the 12cPS4 (12.2.1.4.0) release within a Kubernetes environment. The OUD container image refers to binaries for OUD Release 12.2.1.4.0 and it has the capability to create different types of OUD Instances (Directory Service, Proxy, Replication) in containers.

+

This project has several key features to assist you with deploying and managing Oracle Unified Directory in a Kubernetes environment. You can:

+
    +
  • Create Oracle Unified Directory instances in a Kubernetes persistent volume (PV). This PV can reside in an NFS file system, block storage device, or other Kubernetes volume types.
  • +
  • Start servers based on declarative startup parameters and desired states.
  • +
  • Expose the Oracle Unified Directory services for external access.
  • +
  • Scale Oracle Unified Directory by starting and stopping servers on demand.
  • +
  • Monitor the Oracle Unified Directory instance using Prometheus and Grafana.
  • +
+

Current production release

+

The current production release for the Oracle Unified Directory 12c PS4 (12.2.1.4.0) deployment on Kubernetes is 23.4.1.

+

Recent changes and known issues

+

See the Release Notes for recent changes and known issues for Oracle Unified Directory deployment on Kubernetes.

+

Getting started

+

This documentation explains how to configure OUD on a Kubernetes cluster where no other Oracle Identity Management products will be deployed. For detailed information about this type of deployment, start at Prerequisites and follow this documentation sequentially. Please note that this documentation does not explain how to configure a Kubernetes cluster given the product can be deployed on any compliant Kubernetes vendor.

+

If you are deploying multiple Oracle Identity Management products on the same Kubernetes cluster, then you must follow the Enterprise Deployment Guide outlined in Enterprise Deployments. +Please note, you also have the option to follow the Enterprise Deployment Guide even if you are only installing OUD and no other Oracle Identity Management products.

+

Note: If you need to understand how to configure a Kubernetes cluster ready for an Oracle Unified Directory deployment, you should follow the Enterprise Deployment Guide referenced in Enterprise Deployments. The Enterprise Deployment Automation section also contains details on automation scripts that can:

+
    +
  • Automate the creation of a Kubernetes cluster on Oracle Cloud Infrastructure (OCI), ready for the deployment of Oracle Identity Management products.
  • +
  • Automate the deployment of Oracle Identity Management products on any compliant Kubernetes cluster.
  • +
+

Documentation for earlier releases

+

To view documentation for an earlier release, see:

+ + + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oud/introduction/index.xml b/docs/23.4.1/idm-products/oud/introduction/index.xml new file mode 100644 index 000000000..84d4b28da --- /dev/null +++ b/docs/23.4.1/idm-products/oud/introduction/index.xml @@ -0,0 +1,14 @@ + + + + Introduction on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oud/introduction/ + Recent content in Introduction on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oud/manage-oud-containers/hpa/index.html b/docs/23.4.1/idm-products/oud/manage-oud-containers/hpa/index.html new file mode 100644 index 000000000..f1619d2f6 --- /dev/null +++ b/docs/23.4.1/idm-products/oud/manage-oud-containers/hpa/index.html @@ -0,0 +1,4238 @@ + + + + + + + + + + + + d. Kubernetes Horizontal Pod Autoscaler :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + d. Kubernetes Horizontal Pod Autoscaler +

+ + + + + + +
    +
  1. Prerequisite configuration
  2. +
  3. Deploy the Kubernetes Metrics Server +
      +
    1. Troubleshooting
    2. +
    +
  4. +
  5. Deploy HPA
  6. +
  7. Testing HPA
  8. +
  9. Delete the HPA
  10. +
  11. Other considerations
  12. +
+

Kubernetes Horizontal Pod Autoscaler (HPA) allows automatic scaling (up and down) of the OUD servers. If load increases then extra OUD servers will be started as required. Similarly, if load decreases, OUD servers will be automatically shutdown.

+

For more information on HPA, see Horizontal Pod Autoscaling.

+

The instructions below show you how to configure and run an HPA to scale OUD servers, based on CPU utilization or memory resource metrics.

+

Note: If you enable HPA and then decide you want to start/stop/scale OUD servers manually as per Scaling Up/Down OUD Pods, it is recommended to delete HPA beforehand as per Delete the HPA.

+

Prerequisite configuration

+

In order to use HPA, OUD must have been created with the required resources parameter as per Create OUD instances. For example:

+
oudConfig:
+ # memory, cpu parameters for both requests and limits for oud instances
+  resources:
+    limits:
+      cpu: "1"
+      memory: "8Gi"
+    requests:
+      cpu: "500m" 
+      memory: "4Gi"
+

If you created the OUD servers at any point since July 22 (22.3.1) then these values are the defaults. You can check using the following command:

+
$ helm show values oud-ds-rs -n oudns
+

The output will look similar to the following:

+
...
+# memory, cpu parameters for both requests and limits for oud instances
+  resources:
+    requests:
+      memory: "4Gi"
+      cpu: "500m"
+    limits:
+      memory: "8Gi"
+      cpu: "2"
+ ...
+

Deploy the Kubernetes Metrics Server

+

Before deploying HPA you must deploy the Kubernetes Metrics Server.

+
    +
  1. +

    Check to see if the Kubernetes Metrics Server is already deployed:

    +
    $ kubectl get pods -n kube-system | grep metric
    +

    If a row is returned as follows, then Kubernetes Metric Server is deployed and you can move to Deploy HPA.

    +
    metrics-server-d9694457-mf69d           1/1     Running   0             5m13s
    +
  2. +
  3. +

    If no rows are returned by the previous command, then the Kubernetes Metric Server needs to be deployed. Run the following commands to get the components.yaml:

    +
    $ mkdir $WORKDIR/kubernetes/hpa
    +$ cd $WORKDIR/kubernetes/hpa
    +$ wget https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
    +
  4. +
  5. +

    Deploy the Kubernetes Metrics Server by running the following command:

    +
    $ kubectl apply -f components.yaml
    +

    The output will look similar to the following:

    +
    serviceaccount/metrics-server created
    +clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created
    +clusterrole.rbac.authorization.k8s.io/system:metrics-server created
    +rolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader created
    +clusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator created
    +clusterrolebinding.rbac.authorization.k8s.io/system:metrics-server created
    +service/metrics-server created
    +deployment.apps/metrics-server created
    +apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created
    +
  6. +
  7. +

    Run the following command to check Kubernetes Metric Server is running:

    +
    $ kubectl get pods -n kube-system | grep metric
    +

    Make sure the pod has a READY status of 1/1:

    +
    metrics-server-d9694457-mf69d           1/1     Running   0             39s
    +
  8. +
+

Troubleshooting

+

If the Kubernetes Metric Server does not reach the READY 1/1 state, run the following commands:

+
$ kubectl describe pod <metrics-server-pod> -n kube-system
+$ kubectl logs <metrics-server-pod> -n kube-system
+

If you see errors such as:

+
Readiness probe failed: HTTP probe failed with statuscode: 500
+

and:

+
E0907 13:07:50.937308       1 scraper.go:140] "Failed to scrape node" err="Get \"https://X.X.X.X:10250/metrics/resource\": x509: cannot validate certificate for 100.105.18.113 because it doesn't contain any IP SANs" node="worker-node1"
+

then you may need to install a valid cluster certificate for your Kubernetes cluster.

+

For testing purposes, you can resolve this issue by:

+
    +
  1. +

    Delete the Kubernetes Metrics Server by running the following command:

    +
    $ kubectl delete -f $WORKDIR/kubernetes/hpa/components.yaml
    +
  2. +
  3. +

    Edit the $WORKDIR/hpa/components.yaml and locate the args: section. Add kubelet-insecure-tls to the arguments. For example:

    +
    spec:
    +  containers:
    +  - args:
    +    - --cert-dir=/tmp
    +    - --secure-port=4443
    +    - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
    +    - --kubelet-use-node-status-port
    +    - --kubelet-insecure-tls
    +    - --metric-resolution=15s
    +    image: registry.k8s.io/metrics-server/metrics-server:v0.6.4
    + ...
    +
  4. +
  5. +

    Deploy the Kubenetes Metrics Server using the command:

    +
    $ kubectl apply -f components.yaml
    +

    Run the following and make sure the READY status shows 1/1:

    +
    $ kubectl get pods -n kube-system | grep metric
    +

    The output should look similar to the following:

    +
    metrics-server-d9694457-mf69d           1/1     Running   0             40s
    +
  6. +
+

Deploy HPA

+

The steps below show how to configure and run an HPA to scale OUD, based on the CPU or memory utilization resource metrics.

+

Assuming the example OUD configuration in Create OUD instances, three OUD servers are started by default (oud-ds-rs-0, oud-ds-rs-1, oud-ds-rs-2).

+

In the following example an HPA resource is created, targeted at the statefulset oud-ds-rs. This resource will autoscale OUD servers from a minimum of 3 OUD servers up to 5 OUD servers. Scaling up will occur when the average CPU is consistently over 70%. Scaling down will occur when the average CPU is consistently below 70%.

+
    +
  1. +

    Navigate to the $WORKDIR/kubernetes/hpa and create an autoscalehpa.yaml file that contains the following.

    +
    #
    +apiVersion: autoscaling/v2
    +kind: HorizontalPodAutoscaler
    +metadata:
    +  name: oud-sts-hpa
    +  namespace: oudns
    +spec:
    +  scaleTargetRef:
    +    apiVersion: apps/v1
    +    kind: StatefulSet
    +    name: oud-ds-rs #statefulset name of oud
    +  behavior:
    +    scaleDown:
    +      stabilizationWindowSeconds: 60
    +    scaleUp:
    +      stabilizationWindowSeconds: 60
    +  minReplicas: 3
    +  maxReplicas: 5
    +  metrics:
    +  - type: Resource
    +    resource:
    +      name: cpu
    +      target:
    +        type: Utilization
    +        averageUtilization: 70
    +

    Note : minReplicas should match the number of OUD servers started by default. Set maxReplicas to the maximum amount of OUD servers that can be started.

    +

    Note: To find the statefulset name, in this example oud-ds-rs, run “kubectl get statefulset -n oudns”.

    +

    Note: For setting HPA based on Memory Metrics, update the metrics block with the following content. Please note we recommend using only CPU or Memory, not both.

    +
    metrics:
    +- type: Resource
    +  resource:
    +    name: memory
    +    target:
    +      type: Utilization
    +      averageUtilization: 70
    +
  2. +
  3. +

    Run the following command to create the autoscaler:

    +
    $ kubectl apply -f autoscalehpa.yaml
    +

    The output will look similar to the following:

    +
    horizontalpodautoscaler.autoscaling/oud-sts-hpa created
    +
  4. +
  5. +

    Verify the status of the autoscaler by running the following:

    +
    $ kubectl get hpa -n oudns
    +

    The output will look similar to the following:

    +
    NAME          REFERENCE               TARGETS   MINPODS   MAXPODS   REPLICAS   AGE
    +oud-sts-hpa   StatefulSet/oud-ds-rs   5%/70%    3         5         3          33s
    +

    In the example above, this shows that CPU is currently running at 5% for the oud-sts-hpa.

    +
  6. +
+

Testing HPA

+
    +
  1. +

    Check the current status of the OUD servers:

    +
    $ kubectl get pods -n oudns
    +

    The output will look similar to the following:

    +
    NAME                              READY   STATUS      RESTARTS   AGE
    +oud-ds-rs-0                       1/1     Running     0          5h15m
    +oud-ds-rs-1                       1/1     Running     0          5h9m
    +oud-ds-rs-2                       1/1     Running     0          5h2m
    +oud-pod-cron-job-28242120-bwtcz   0/1     Completed   0          61m
    +oud-pod-cron-job-28242150-qf8fg   0/1     Completed   0          31m
    +oud-pod-cron-job-28242180-q69lm   0/1     Completed   0          92s
    +

    In the above oud-ds-rs-0, oud-ds-rs-0, oud-ds-rs-2 are running.

    +
  2. +
  3. +

    To test HPA can scale up the OUD servers, run the following commands:

    +
    $ kubectl exec --stdin --tty oud-ds-rs-0 -n oudns -- /bin/bash
    +

    This will take you inside a bash shell inside the oud-ds-rs-0 pod:

    +
    [oracle@oud-ds-rs-0 oracle]$
    +

    Inside the bash shell, run the following command to increase the load on the CPU:

    +
    [oracle@oud-ds-rs-0 oracle]$ dd if=/dev/zero of=/dev/null
    +

    This command will continue to run in the foreground.

    +
  4. +
  5. +

    Repeat the step above for the oud-ds-rs-1 pod:

    +
    $ kubectl exec --stdin --tty oud-ds-rs-1 -n oudns -- /bin/bash
    +[oracle@oud-ds-rs-1 oracle]$
    +[oracle@oud-ds-rs-1 oracle]$ dd if=/dev/zero of=/dev/null
    +
  6. +
  7. +

    In a command window outside the bash shells, run the following command to view the current CPU usage:

    +
    $ kubectl get hpa -n oudns
    +

    The output will look similar to the following:

    +
    NAME          REFERENCE               TARGETS    MINPODS   MAXPODS   REPLICAS   AGE
    +oud-sts-hpa   StatefulSet/oud-ds-rs   125%/70%   3         5         3          5m15s
    +

    In the above example the CPU has increased to 125%. As this is above the 70% limit, the autoscaler increases the replicas by starting additional OUD servers.

    +
  8. +
  9. +

    Run the following to see if any more OUD servers are started:

    +
    $ kubectl get pods -n oudns
    +

    The output will look similar to the following:

    +
    NAME                              READY   STATUS      RESTARTS   AGE
    +oud-ds-rs-0                       1/1     Running     0          5h50m
    +oud-ds-rs-1                       1/1     Running     0          5h44m
    +oud-ds-rs-2                       1/1     Running     0          5h37m
    +oud-ds-rs-3                       1/1     Running     0          9m29s
    +oud-ds-rs-4                       1/1     Running     0          5m17s
    +oud-pod-cron-job-28242150-qf8fg   0/1     Completed   0          66m
    +oud-pod-cron-job-28242180-q69lm   0/1     Completed   0          36m
    +oud-pod-cron-job-28242210-kn7sv   0/1     Completed   0          6m28s
    +

    In the example above one more OUD server has started (oud-ds-rs-4).

    +

    Note: It may take some time for the server to appear and start. Once the server is at READY status of 1/1, the server is started.

    +
  10. +
  11. +

    To stop the load on the CPU, in both bash shells, issue a Control C, and then exit the bash shell:

    +
    [oracle@oud-ds-rs-0 oracle]$ dd if=/dev/zero of=/dev/null
    +^C
    +[oracle@oud-ds-rs-0 oracle]$ exit
    +
  12. +
  13. +

    Run the following command to view the current CPU usage:

    +
    $ kubectl get hpa -n oudns
    +

    The output will look similar to the following:

    +
    NAME          REFERENCE               TARGETS   MINPODS   MAXPODS   REPLICAS   AGE
    +oud-sts-hpa   StatefulSet/oud-ds-rs   4%/70%    3         5         5          40m
    +

    In the above example CPU has dropped to 4%. As this is below the 70% threshold, you should see the autoscaler scale down the servers:

    +
    $ kubectl get pods -n oudns
    +

    The output will look similar to the following:

    +
    NAME                              READY   STATUS        RESTARTS   AGE
    +oud-ds-rs-0                       1/1     Running       0          5h54m
    +oud-ds-rs-1                       1/1     Running       0          5h48m
    +oud-ds-rs-2                       1/1     Running       0          5h41m
    +oud-ds-rs-3                       1/1     Running       0          13m
    +oud-ds-rs-4                       1/1     Terminating   0          8m27s
    +oud-pod-cron-job-28242150-qf8fg   0/1     Completed     0          70m
    +oud-pod-cron-job-28242180-q69lm   0/1     Completed     0          40m
    +oud-pod-cron-job-28242210-kn7sv   0/1     Completed     0          10m
    +

    Eventually, the extra server will disappear:

    +
    NAME                              READY   STATUS        RESTARTS   AGE
    +oud-ds-rs-0                       1/1     Running       0          5h57m
    +oud-ds-rs-1                       1/1     Running       0          5h51m
    +oud-ds-rs-2                       1/1     Running       0          5h44m
    +oud-ds-rs-3                       1/1     Running       0          16m
    +oud-pod-cron-job-28242150-qf8fg   0/1     Completed     0          73m
    +oud-pod-cron-job-28242180-q69lm   0/1     Completed     0          43m
    +oud-pod-cron-job-28242210-kn7sv   0/1     Completed     0          13m
    +
  14. +
+

Delete the HPA

+
    +
  1. +

    If you need to delete the HPA, you can do so by running the following command:

    +
    $ cd $WORKDIR/kubernetes/hpa
    +$ kubectl delete -f autoscalehpa.yaml
    +
  2. +
+

Other considerations

+
    +
  • If HPA is deployed and you need to upgrade the OUD image, then you must delete the HPA before upgrading. Once the upgrade is successful you can deploy HPA again.
  • +
  • If you choose to scale up or scale down an OUD server manually as per Scaling Up/Down OUD Pods, then it is recommended to delete the HPA before doing so.
  • +
+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oud/manage-oud-containers/index.html b/docs/23.4.1/idm-products/oud/manage-oud-containers/index.html new file mode 100644 index 000000000..64fad4c29 --- /dev/null +++ b/docs/23.4.1/idm-products/oud/manage-oud-containers/index.html @@ -0,0 +1,4054 @@ + + + + + + + + + + + + Manage Oracle Unified Directory Containers :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + Manage Oracle Unified Directory Containers +

+ + + + + + + +

Important considerations for Oracle Unified Directory instances in Kubernetes.

+ + + + + + + + + + + + + + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oud/manage-oud-containers/index.xml b/docs/23.4.1/idm-products/oud/manage-oud-containers/index.xml new file mode 100644 index 000000000..a88d97f25 --- /dev/null +++ b/docs/23.4.1/idm-products/oud/manage-oud-containers/index.xml @@ -0,0 +1,56 @@ + + + + Manage Oracle Unified Directory Containers on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oud/manage-oud-containers/ + Recent content in Manage Oracle Unified Directory Containers on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + a) Scaling Up/Down OUD Pods + /fmw-kubernetes/23.4.1/idm-products/oud/manage-oud-containers/scaling-up-down/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oud/manage-oud-containers/scaling-up-down/ + Introduction This section describes how to increase or decrease the number of OUD pods in the Kubernetes deployment. +Note: The instructions below are for scaling servers up or down manually. If you wish to use autoscaling, see Kubernetes Horizontal Pod Autoscaler. Please note, if you have enabled autoscaling, it is recommended to delete the autoscaler before running the commands below. +View existing OUD pods By default the oud-ds-rs helm chart deployment starts three pods: oud-ds-rs-0 and two replica pods oud-ds-rs-1 and oud-ds-rs-2. + + + + b) Logging and Visualization for Helm Chart oud-ds-rs Deployment + /fmw-kubernetes/23.4.1/idm-products/oud/manage-oud-containers/logging-and-visualization/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oud/manage-oud-containers/logging-and-visualization/ + Introduction Install Elasticsearch and Kibana Create a Kubernetes secret Enable Logstash Upgrade OUD deployment with ELK configuration Verify the pods Verify and access the Kibana console Introduction This section describes how to install and configure logging and visualization for the oud-ds-rs Helm chart deployment. +The ELK stack consists of Elasticsearch, Logstash, and Kibana. Using ELK you can gain insights in real-time from the log data from your applications. + + + + c) Monitoring an Oracle Unified Directory Instance + /fmw-kubernetes/23.4.1/idm-products/oud/manage-oud-containers/monitoring-oud-instance/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oud/manage-oud-containers/monitoring-oud-instance/ + Introduction Install Prometheus and Grafana Create a Kubernetes namespace Add Prometheus and Grafana Helm repositories Install the Prometheus operator View Prometheus and Grafana objects created Add the NodePort Verify using Grafana GUI Introduction After the Oracle Unified Directory instance (OUD) is set up you can monitor it using Prometheus and Grafana. +Install Prometheus and Grafana Create a Kubernetes namespace Create a Kubernetes namespace to provide a scope for Prometheus and Grafana objects such as pods and services that you create in the environment. + + + + d. Kubernetes Horizontal Pod Autoscaler + /fmw-kubernetes/23.4.1/idm-products/oud/manage-oud-containers/hpa/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oud/manage-oud-containers/hpa/ + Prerequisite configuration Deploy the Kubernetes Metrics Server Troubleshooting Deploy HPA Testing HPA Delete the HPA Other considerations Kubernetes Horizontal Pod Autoscaler (HPA) allows automatic scaling (up and down) of the OUD servers. If load increases then extra OUD servers will be started as required. Similarly, if load decreases, OUD servers will be automatically shutdown. +For more information on HPA, see Horizontal Pod Autoscaling. +The instructions below show you how to configure and run an HPA to scale OUD servers, based on CPU utilization or memory resource metrics. + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oud/manage-oud-containers/logging-and-visualization/index.html b/docs/23.4.1/idm-products/oud/manage-oud-containers/logging-and-visualization/index.html new file mode 100644 index 000000000..79665874b --- /dev/null +++ b/docs/23.4.1/idm-products/oud/manage-oud-containers/logging-and-visualization/index.html @@ -0,0 +1,4252 @@ + + + + + + + + + + + + b) Logging and Visualization for Helm Chart oud-ds-rs Deployment :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + b) Logging and Visualization for Helm Chart oud-ds-rs Deployment +

+ + + + + + +
    +
  1. Introduction
  2. +
  3. Install Elasticsearch and Kibana
  4. +
  5. Create a Kubernetes secret
  6. +
  7. Enable Logstash +
      +
    1. Upgrade OUD deployment with ELK configuration
    2. +
    3. Verify the pods
    4. +
    +
  8. +
  9. Verify and access the Kibana console
  10. +
+

Introduction

+

This section describes how to install and configure logging and visualization for the oud-ds-rs Helm chart deployment.

+

The ELK stack consists of Elasticsearch, Logstash, and Kibana. Using ELK you can gain insights in real-time from the log data from your applications.

+
    +
  • Elasticsearch is a distributed, RESTful search and analytics engine capable of solving a growing number of use cases. As the heart of the Elastic Stack, it centrally stores your data so you can discover the expected and uncover the unexpected.
  • +
  • Logstash is an open source, server-side data processing pipeline that ingests data from a multitude of sources simultaneously, transforms it, and then sends it to your favorite “stash.”
  • +
  • Kibana lets you visualize your Elasticsearch data and navigate the Elastic Stack. It gives you the freedom to select the way you give shape to your data. And you don’t always have to know what you’re looking for.
  • +
+

Install Elasticsearch and Kibana

+

If you do not already have a centralized Elasticsearch (ELK) stack then you must configure this first. For details on how to configure the ELK stack, follow +Installing Elasticsearch (ELK) Stack and Kibana

+

Create the logstash pod

+

Variables used in this chapter

+

In order to create the logstash pod, you must create a yaml file. This file contains variables which you must substitute with variables applicable to your ELK environment.

+

Most of the values for the variables will be based on your ELK deployment as per Installing Elasticsearch (ELK) Stack and Kibana.

+

The table below outlines the variables and values you must set:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
VariableSample ValueDescription
<ELK_VER>8.3.1The version of logstash you want to install.
<ELK_SSL>trueIf SSL is enabled for ELK set the value to true, or if NON-SSL set to false. This value must be lowercase.
<ELK_HOSTS>https://elasticsearch.example.com:9200The URL for sending logs to Elasticsearch. HTTP if NON-SSL is used.
<ELK_USER>logstash_internalThe name of the user for logstash to access Elasticsearch.
<ELK_PASSWORD>passwordThe password for ELK_USER.
<ELK_APIKEY>apikeyThe API key details.
+

You will also need the BASE64 version of the Certificate Authority (CA) certificate(s) that signed the certificate of the Elasticsearch server. If using a self-signed certificate, this is the self signed certificate of the Elasticsearch server. See Copying the Elasticsearch Certificate for details on how to get the correct certificate. In the example below the certificate is called elk.crt.

+

Create a kubernetes secret

+
    +
  1. +

    Create a Kubernetes secret for Elasticsearch using the API Key or Password.

    +

    a) If ELK uses an API Key for authentication:

    +
    $ kubectl create secret generic elasticsearch-pw-elastic -n <domain_namespace> --from-literal password=<ELK_APIKEY>
    +

    For example:

    +
    $ kubectl create secret generic elasticsearch-pw-elastic -n oudns --from-literal password=<ELK_APIKEY>
    +

    The output will look similar to the following:

    +
    secret/elasticsearch-pw-elastic created
    +

    b) If ELK uses a password for authentication:

    +
    $ kubectl create secret generic elasticsearch-pw-elastic -n <domain_namespace> --from-literal password=<ELK_PASSWORD>
    +

    For example:

    +
    $ kubectl create secret generic elasticsearch-pw-elastic -n oudns --from-literal password=<ELK_PASSWORD>
    +

    The output will look similar to the following:

    +
    secret/elasticsearch-pw-elastic created
    +

    Note: It is recommended that the ELK Stack is created with authentication enabled. If no authentication is enabled you may create a secret using the values above.

    +
  2. +
  3. +

    Check that the dockercred secret that was created previously in Create a Kubernetes secret for cronjob images exists:

    +
    $ kubectl get secret -n <domain_namespace> | grep dockercred
    +

    For example,

    +
    $ kubectl get secret -n oudns | grep dockercred
    +

    The output will look similar to the following:

    +
    dockercred                        kubernetes.io/dockerconfigjson        1      149m
    +

    If the secret does not exist, create it as per Create a Kubernetes secret for cronjob images.

    +
  4. +
+

Enable logstash

+
    +
  1. +

    Navigate to the $WORKDIR/kubernetes/helm directory and create a logging-override-values.yaml file as follows:

    +
    elk:
    +  imagePullSecrets:
    +    - name: dockercred
    +  IntegrationEnabled: true
    +  logStashImage: logstash:<ELK_VER>
    +  logstashConfigMap: false
    +  esindex: oudlogs-00001
    +  sslenabled: <ELK_SSL>
    +  eshosts: <ELK_HOSTS>
    +  # Note: We need to provide either esuser,espassword or esapikey
    +  esuser: <ELK_USER>
    +  espassword: elasticsearch-pw-elastic
    +  esapikey: elasticsearch-pw-elastic
    +
      +
    • Change the <ELK_VER>, <ELK_SSL>, <ELK_HOSTS>, and <ELK_USER> to match the values for your environment.
    • +
    • If using SSL, replace the elk.crt in $WORKDIR/kubernetes/helm/oud-ds-rs/certs/ with the elk.crt for your ElasticSearch server.
    • +
    • If using API KEY for your ELK authentication, leave both esuser: and espassword: with no value.
    • +
    • If using a password for ELK authentication, leave esapi_key: but delete elasticsearch-pw-elastic.
    • +
    • If no authentication is used for ELK, leave esuser, espassword, and esapi_key with no value assigned.
    • +
    • The rest of the lines in the yaml file should not be changed.
    • +
    +

    For example:

    +
    elk:
    +  imagePullSecrets:
    +    - name: dockercred
    +  IntegrationEnabled: true
    +  logStashImage: logstash:8.3.1
    +  logstashConfigMap: false
    +  esindex: oudlogs-00001
    +  sslenabled: true   
    +  eshosts: https://elasticsearch.example.com:9200
    +  # Note: We need to provide either esuser,espassword or esapikey
    +  esuser: logstash_internal
    +  espassword: elasticsearch-pw-elastic
    +  esapikey:
    +
  2. +
+

Upgrade OUD deployment with ELK configuration

+
    +
  1. +

    Run the following command to upgrade the OUD deployment with the ELK configuration:

    +
    $ helm upgrade --namespace <namespace> --values <valuesfile.yaml> <releasename> oud-ds-rs --reuse-values
    +

    For example:

    +
    $ helm upgrade --namespace oudns --values logging-override-values.yaml oud-ds-rs oud-ds-rs --reuse-values
    +

    The output should look similar to the following:

    +
    Release "oud-ds-rs" has been upgraded. Happy Helming!
    +NAME: oud-ds-rs
    +LAST DEPLOYED: <DATE>
    +NAMESPACE: oudns
    +STATUS: deployed
    +REVISION: 2
    +NOTES:
    +#
    +# Copyright (c) 2020, 2022, Oracle and/or its affiliates.
    +#
    +# Licensed under the Universal Permissive License v 1.0 as shown at
    +# https://oss.oracle.com/licenses/upl
    +#
    +#
    +Since "nginx" has been chosen, follow the steps below to configure nginx ingress controller.
    +Add Repo reference to helm for retriving/installing Chart for nginx-ingress implementation.
    +command-# helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
    +
    +Command helm install to install nginx-ingress related objects like pod, service, deployment, etc.
    +# helm install --namespace <namespace for ingress> --values nginx-ingress-values-override.yaml lbr-nginx ingress-nginx/ingress-nginx
    +
    +For details of content of nginx-ingress-values-override.yaml refer README.md file of this chart.
    +
    +Run these commands to check port mapping and services:
    +# kubectl --namespace <namespace for ingress> get services -o wide -w lbr-nginx-ingress-controller
    +# kubectl describe --namespace <namespace for oud-ds-rs chart> ingress.extensions/oud-ds-rs-http-ingress-nginx
    +# kubectl describe --namespace <namespace for oud-ds-rs chart> ingress.extensions/oud-ds-rs-admin-ingress-nginx
    +
    +Accessible interfaces through ingress:
    + (External IP Address for LoadBalancer NGINX Controller can be determined through details associated with lbr-nginx-ingress-controller)
    +
    +1. OUD Admin REST:
    +   Port: http/https
    +
    +2. OUD Data REST:
    +   Port: http/https
    +
    +3. OUD Data SCIM:
    +   Port: http/https
    +
    +4. OUD LDAP/LDAPS:
    +   Port: ldap/ldaps
    +
    +5. OUD Admin LDAPS:
    +   Port: ldaps
    +
    +Please refer to README.md from Helm Chart to find more details about accessing interfaces and configuration parameters.
    +
    +
    +Accessible interfaces through ingress:
    +
    +1. OUD Admin REST:
    +   Port: http/https
    +
    +2. OUD Data REST:
    +   Port: http/https
    +
    +3. OUD Data SCIM:
    +   Port: http/https
    +
    +Please refer to README.md from Helm Chart to find more details about accessing interfaces and configuration parameters.
    +
  2. +
+

Verify the pods

+
    +
  1. +

    Run the following command to check the logstash pod is created correctly:

    +
    $ kubectl get pods -n <namespace>
    +

    For example:

    +
    $ kubectl get pods -n oudns
    +

    The output should look similar to the following:

    +
    NAME                                  READY   STATUS      RESTARTS   AGE
    +oud-ds-rs-0                           1/1     Running     0          150m
    +oud-ds-rs-1                           1/1     Running     0          143m
    +oud-ds-rs-2                           1/1     Running     0          137m
    +oud-ds-rs-logstash-5dc8d94597-knk8g   1/1     Running     0          2m12s
    +oud-pod-cron-job-27758370-wpfq7       0/1     Completed   0          66m
    +oud-pod-cron-job-27758400-kd6pn       0/1     Completed   0          36m
    +oud-pod-cron-job-27758430-ndmgj       0/1     Completed   0          6m33s
    +

    Note: Wait a couple of minutes to make sure the pod has not had any failures or restarts. If the pod fails you can view the pod log using:

    +
    $ kubectl logs -f oud-ds-rs-logstash-<pod> -n oudns
    +

    Most errors occur due to misconfiguration of the logging-override-values.yaml. This is usually because of an incorrect value set, or the certificate was not pasted with the correct indentation.

    +

    If the pod has errors, view the helm history to find the last working revision, for example:

    +
    $ helm history oud-ds-rs -n oudns
    +

    The output will look similar to the following:

    +
    REVISION        UPDATED                         STATUS          CHART           APP VERSION     DESCRIPTION
    +1               Tue Jan 10 14:06:01 2023        superseded      oud-ds-rs-0.2   12.2.1.4.0      Install complete
    +2               Tue Jan 10 16:34:21 2023        deployed        oud-ds-rs-0.2   12.2.1.4.0      Upgrade complete
    +

    Rollback to the previous working revision by running:

    +
    $ helm rollback <release> <revision> -n <domain_namespace>
    +

    For example:

    +
    helm rollback oud-ds-rs 1 -n oudns
    +

    Once you have resolved the issue in the yaml files, run the helm upgrade command outlined earlier to recreate the logstash pod.

    +
  2. +
+

Verify and access the Kibana console

+

To access the Kibana console you will need the Kibana URL as per Installing Elasticsearch (ELK) Stack and Kibana.

+

For Kibana 7.7.x and below:

+
    +
  1. +

    Access the Kibana console with http://<hostname>:<port>/app/kibana and login with your username and password.

    +
  2. +
  3. +

    From the Navigation menu, navigate to Management > Kibana > Index Patterns.

    +
  4. +
  5. +

    In the Create Index Pattern page enter oudlogs* for the Index pattern and click Next Step.

    +
  6. +
  7. +

    In the Configure settings page, from the Time Filter field name drop down menu select @timestamp and click Create index pattern.

    +
  8. +
  9. +

    Once the index pattern is created click on Discover in the navigation menu to view the OIG logs.

    +
  10. +
+

For Kibana version 7.8.X and above:

+
    +
  1. +

    Access the Kibana console with http://<hostname>:<port>/app/kibana and login with your username and password.

    +
  2. +
  3. +

    From the Navigation menu, navigate to Management > Stack Management.

    +
  4. +
  5. +

    Click Data Views in the Kibana section.

    +
  6. +
  7. +

    Click Create Data View and enter the following information:

    +
      +
    • Name: oudlogs*
    • +
    • Timestamp: @timestamp
    • +
    +
  8. +
  9. +

    Click Create Data View.

    +
  10. +
  11. +

    From the Navigation menu, click Discover to view the log file entries.

    +
  12. +
  13. +

    From the drop down menu, select oudlogs* to view the log file entries.

    +
  14. +
+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oud/manage-oud-containers/monitoring-oud-instance/index.html b/docs/23.4.1/idm-products/oud/manage-oud-containers/monitoring-oud-instance/index.html new file mode 100644 index 000000000..23734416a --- /dev/null +++ b/docs/23.4.1/idm-products/oud/manage-oud-containers/monitoring-oud-instance/index.html @@ -0,0 +1,4112 @@ + + + + + + + + + + + + c) Monitoring an Oracle Unified Directory Instance :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + c) Monitoring an Oracle Unified Directory Instance +

+ + + + + + +
    +
  1. Introduction
  2. +
  3. Install Prometheus and Grafana +
      +
    1. Create a Kubernetes namespace
    2. +
    3. Add Prometheus and Grafana Helm repositories
    4. +
    5. Install the Prometheus operator
    6. +
    7. View Prometheus and Grafana objects created
    8. +
    9. Add the NodePort
    10. +
    +
  4. +
  5. Verify using Grafana GUI
  6. +
+

Introduction

+

After the Oracle Unified Directory instance (OUD) is set up you can monitor it using Prometheus and Grafana.

+

Install Prometheus and Grafana

+

Create a Kubernetes namespace

+
    +
  1. +

    Create a Kubernetes namespace to provide a scope for Prometheus and Grafana objects such as pods and services that you create in the environment. To create your namespace issue the following command:

    +
    $ kubectl create namespace <namespace>
    +

    For example:

    +
    $ kubectl create namespace monitoring
    +

    The output will look similar to the following:

    +
    namespace/monitoring created
    +
  2. +
+

Add Prometheus and Grafana Helm repositories

+
    +
  1. +

    Add the Prometheus and Grafana Helm repositories by issuing the following command:

    +
    $ helm repo add prometheus https://prometheus-community.github.io/helm-charts
    +

    The output will look similar to the following:

    +
    "prometheus" has been added to your repositories
    +
  2. +
  3. +

    Run the following command to update the repositories:

    +
    $ helm repo update
    +

    The output will look similar to the following:

    +
    Hang tight while we grab the latest from your chart repositories...
    +...Successfully got an update from the "stable" chart repository
    +...Successfully got an update from the "prometheus" chart repository
    +...Successfully got an update from the "prometheus-community" chart repository
    +
    +Update Complete.  Happy Helming!
    +
  4. +
+

Install the Prometheus operator

+
    +
  1. +

    Install the Prometheus operator using the helm command:

    +
    $ helm install <release_name> prometheus/kube-prometheus-stack -n <namespace>
    +

    For example:

    +
    $ helm install monitoring prometheus/kube-prometheus-stack -n monitoring
    +

    The output should look similar to the following:

    +
    NAME: monitoring
    +LAST DEPLOYED: <DATE>
    +NAMESPACE: monitoring
    +STATUS: deployed
    +REVISION: 1
    +NOTES:
    +kube-prometheus-stack has been installed. Check its status by running:
    +  kubectl --namespace monitoring get pods -l "release=monitoring"
    +
    +Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create & configure Alertmanager and Prometheus instances using the Operator.
    +

    Note: If your cluster does not have access to the internet to pull external images, such as prometheus or grafana, you must load the images in a local container registry. You must then install as follows:

    +
    helm install --set grafana.image.repository==container-registry.example.com/grafana --set grafana.image.tag=8.4.2 monitoring prometheus/kube-prometheus-stack -n monitoring
    +
  2. +
+

View Prometheus and Grafana Objects created

+

View the objects created for Prometheus and Grafana by issuing the following command:

+
$ kubectl get all,service,pod -o wide -n <namespace>
+

For example:

+
$ kubectl get all,service,pod -o wide -n monitoring
+

The output will look similar to the following:

+
NAME                                                         READY   STATUS    RESTARTS   AGE   IP               NODE                 NOMINATED NODE   READINESS GATES
+pod/alertmanager-monitoring-kube-prometheus-alertmanager-0   2/2     Running   0          36s   10.244.1.78      <worker-node>   <none>           <none>
+pod/monitoring-grafana-578f79599c-qc9gd                      3/3     Running   0          47s   10.244.2.200     <worker-node>   <none>           <none>
+pod/monitoring-kube-prometheus-operator-65cdf7995-kndgg      1/1     Running   0          47s   10.244.2.199     <worker-node>   <none>           <none>
+pod/monitoring-kube-state-metrics-56bfd4f44f-85l4p           1/1     Running   0          47s   10.244.1.76      <worker-node>   <none>           <none>
+pod/monitoring-prometheus-node-exporter-g2x9g                1/1     Running   0          47s   100.102.48.121   <master-node>   <none>           <none>
+pod/monitoring-prometheus-node-exporter-p9kkq                1/1     Running   0          47s   100.102.48.84    <worker-node>   <none>           <none>
+pod/monitoring-prometheus-node-exporter-rzhrd                1/1     Running   0          47s   100.102.48.28    <worker-node>   <none>           <none>
+pod/prometheus-monitoring-kube-prometheus-prometheus-0       2/2     Running   0          35s   10.244.1.79      <worker-node>   <none>           <none>
+
+NAME                                              TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                      AGE   SELECTOR
+service/alertmanager-operated                     ClusterIP   None             <none>        9093/TCP,9094/TCP,9094/UDP   36s   app.kubernetes.io/name=alertmanager
+service/monitoring-grafana                        ClusterIP   10.110.193.30    <none>        80/TCP                       47s   app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=grafana
+service/monitoring-kube-prometheus-alertmanager   ClusterIP   10.104.2.37      <none>        9093/TCP                     47s   alertmanager=monitoring-kube-prometheus-alertmanager,app.kubernetes.io/name=alertmanager
+service/monitoring-kube-prometheus-operator       ClusterIP   10.99.162.229    <none>        443/TCP                      47s   app=kube-prometheus-stack-operator,release=monitoring
+service/monitoring-kube-prometheus-prometheus     ClusterIP   10.108.161.46    <none>        9090/TCP                     47s   app.kubernetes.io/name=prometheus,prometheus=monitoring-kube-prometheus-prometheus
+service/monitoring-kube-state-metrics             ClusterIP   10.111.162.185   <none>        8080/TCP                     47s   app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=kube-state-metrics
+service/monitoring-prometheus-node-exporter       ClusterIP   10.109.21.136    <none>        9100/TCP                     47s   app=prometheus-node-exporter,release=monitoring
+service/prometheus-operated                       ClusterIP   None             <none>        9090/TCP                     35s   app.kubernetes.io/name=prometheus
+
+NAME                                                 DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR   AGE   CONTAINERS      IMAGES                                    SELECTOR
+daemonset.apps/monitoring-prometheus-node-exporter   3         3         3       3            3           <none>          47s   node-exporter   quay.io/prometheus/node-exporter:v1.3.1   app=prometheus-node-exporter,release=monitoring
+
+NAME                                                  READY   UP-TO-DATE   AVAILABLE   AGE   CONTAINERS                                            IMAGES                                                                                          SELECTOR
+deployment.apps/monitoring-grafana                    1/1     1            1           47s   grafana-sc-dashboard,grafana-sc-datasources,grafana   quay.io/kiwigrid/k8s-sidecar:1.15.6,quay.io/kiwigrid/k8s-sidecar:1.15.6,grafana/grafana:8.4.2   app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=grafana
+deployment.apps/monitoring-kube-prometheus-operator   1/1     1            1           47s   kube-prometheus-stack                                 quay.io/prometheus-operator/prometheus-operator:v0.55.0                                         app=kube-prometheus-stack-operator,release=monitoring
+deployment.apps/monitoring-kube-state-metrics         1/1     1            1           47s   kube-state-metrics                                    k8s.gcr.io/kube-state-metrics/kube-state-metrics:v2.4.1                                         app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=kube-state-metrics
+
+NAME                                                            DESIRED   CURRENT   READY   AGE   CONTAINERS                                            IMAGES                                                                                          SELECTOR
+replicaset.apps/monitoring-grafana-578f79599c                   1         1         1       47s   grafana-sc-dashboard,grafana-sc-datasources,grafana   quay.io/kiwigrid/k8s-sidecar:1.15.6,quay.io/kiwigrid/k8s-sidecar:1.15.6,grafana/grafana:8.4.2   app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=grafana,pod-template-hash=578f79599c
+replicaset.apps/monitoring-kube-prometheus-operator-65cdf7995   1         1         1       47s   kube-prometheus-stack                                 quay.io/prometheus-operator/prometheus-operator:v0.55.0                                         app=kube-prometheus-stack-operator,pod-template-hash=65cdf7995,release=monitoring
+replicaset.apps/monitoring-kube-state-metrics-56bfd4f44f        1         1         1       47s   kube-state-metrics                                    k8s.gcr.io/kube-state-metrics/kube-state-metrics:v2.4.1                                         app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=kube-state-metrics,pod-template-hash=56bfd4f44f
+
+NAME                                                                    READY   AGE   CONTAINERS                     IMAGES
+statefulset.apps/alertmanager-monitoring-kube-prometheus-alertmanager   1/1     36s   alertmanager,config-reloader   quay.io/prometheus/alertmanager:v0.23.0,quay.io/prometheus-operator/prometheus-config-reloader:v0.55.0
+statefulset.apps/prometheus-monitoring-kube-prometheus-prometheus       1/1     35s   prometheus,config-reloader     quay.io/prometheus/prometheus:v2.33.5,quay.io/prometheus-operator/prometheus-config-reloader:v0.55.0
+

Add the NodePort

+
    +
  1. +

    Edit the grafana service to add the NodePort:

    +
    $ kubectl edit service/<deployment_name>-grafana -n <namespace>
    +

    For example:

    +
    $ kubectl edit service/monitoring-grafana -n monitoring
    +

    Note: This opens an edit session for the domain where parameters can be changed using standard vi commands.

    +

    Change the ports entry and add nodePort: 30091 and type: NodePort:

    +
      ports:
    +  - name: http-web
    +    nodePort: 30091
    +    port: 80
    +    protocol: TCP
    +    targetPort: 3000
    +  selector:
    +    app.kubernetes.io/instance: monitoring
    +    app.kubernetes.io/name: grafana
    +  sessionAffinity: None
    +  type: NodePort
    +
  2. +
  3. +

    Save the file and exit (:wq).

    +
  4. +
+

Verify Using Grafana GUI

+
    +
  1. +

    Access the Grafana GUI using http://<HostIP>:<nodeport> and login with admin/prom-operator. Change the password when prompted.

    +
  2. +
  3. +

    Download the K8 Cluster Detail Dashboard json file from: https://grafana.com/grafana/dashboards/10856.

    +
  4. +
  5. +

    Import the Grafana dashboard by navigating on the left hand menu to Dashboards > Import. Click Upload JSON file and select the json downloaded file. In the Prometheus drop down box select Prometheus. Click Import. The dashboard should be displayed.

    +
  6. +
  7. +

    Verify your installation by viewing some of the customized dashboard views.

    +
  8. +
+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oud/manage-oud-containers/scaling-up-down/index.html b/docs/23.4.1/idm-products/oud/manage-oud-containers/scaling-up-down/index.html new file mode 100644 index 000000000..9d90ce0d6 --- /dev/null +++ b/docs/23.4.1/idm-products/oud/manage-oud-containers/scaling-up-down/index.html @@ -0,0 +1,4069 @@ + + + + + + + + + + + + a) Scaling Up/Down OUD Pods :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + a) Scaling Up/Down OUD Pods +

+ + + + + + +

Introduction

+

This section describes how to increase or decrease the number of OUD pods in the Kubernetes deployment.

+

Note: The instructions below are for scaling servers up or down manually. If you wish to use autoscaling, see Kubernetes Horizontal Pod Autoscaler. Please note, if you have enabled autoscaling, it is recommended to delete the autoscaler before running the commands below.

+

View existing OUD pods

+

By default the oud-ds-rs helm chart deployment starts three pods: oud-ds-rs-0 and two replica pods oud-ds-rs-1 and oud-ds-rs-2.

+

The number of pods started is determined by the replicaCount, which is set to 3 by default. A value of 3 starts the three pods above.

+

To scale up or down the number of OUD pods, set replicaCount accordingly.

+

Run the following command to view the number of pods in the OUD deployment:

+
$ kubectl --namespace <namespace> get pods -o wide
+

For example:

+
$ kubectl --namespace oudns get pods -o wide
+

The output will look similar to the following:

+
NAME              READY   STATUS    RESTARTS   AGE     IP             NODE          NOMINATED NODE   READINESS GATES
+pod/oud-ds-rs-0   1/1     Running   0          22h   10.244.0.195   <Worker Node>   <none>           <none>
+pod/oud-ds-rs-1   1/1     Running   0          22h   10.244.0.194   <Worker Node>   <none>           <none>
+pod/oud-ds-rs-2   1/1     Running   0          22h   10.244.0.193   <Worker Node>   <none>           <none>
+

Scaling up OUD pods

+

In this example, replicaCount is increased to 4 which creates a new OUD pod oud-ds-rs-3 with associated services created.

+

You can scale up the number of OUD pods using one of the following methods:

+
    +
  1. Using a YAML file
  2. +
  3. Using --set argument
  4. +
+

Using a YAML file

+
    +
  1. +

    Navigate to the $WORKDIR/kubernetes/helm directory:

    +
    $ cd $WORKDIR/kubernetes/helm
    +
  2. +
  3. +

    Create a oud-scaleup-override.yaml file that contains:

    +
    replicaCount: 4
    +
  4. +
  5. +

    Run the following command to scale up the OUD pods:

    +
    $ helm upgrade --namespace <namespace> \
    +--values oud-scaleup-override.yaml \
    +<release_name> oud-ds-rs --reuse-values
    +

    For example:

    +
    $ helm upgrade --namespace oudns \
    +--values oud-scaleup-override.yaml \
    +oud-ds-rs oud-ds-rs --reuse-values
    +
  6. +
+

Using --set argument

+
    +
  1. +

    Run the following command to scale up the OUD pods:

    +
    $ helm upgrade --namespace <namespace> \
    +--set replicaCount=4 \
    +<release_name> oud-ds-rs --reuse-values
    +

    For example:

    +
    $ helm upgrade --namespace oudns \
    +--set replicaCount=4 \
    +oud-ds-rs oud-ds-rs --reuse-values
    +
  2. +
+

Verify the pods

+
    +
  1. +

    Verify the new OUD pod oud-ds-rs-3 and has started:

    +
    $ kubectl get pod,service -o wide -n <namespace> 
    +

    For example:

    +
    $ kubectl get pods,service -n oudns
    +

    The output will look similar to the following:

    +
    NAME              READY   STATUS    RESTARTS   AGE     IP             NODE          NOMINATED NODE   READINESS GATES
    +pod/oud-ds-rs-0   1/1     Running   0          22h   10.244.0.195   <Worker Node>   <none>           <none>
    +pod/oud-ds-rs-1   1/1     Running   0          22h   10.244.0.194   <Worker Node>   <none>           <none>
    +pod/oud-ds-rs-2   1/1     Running   0          22h   10.244.0.193   <Worker Node>   <none>           <none>
    +pod/oud-ds-rs-3   1/1     Running   0          17m   10.244.0.193   <Worker Node>   <none>           <none>
    +     
    +NAME                          TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                      AGE     SELECTOR
    +service/oud-ds-rs             ClusterIP   None             <none>        1444/TCP,1888/TCP,1389/TCP,1636/TCP,1080/TCP,1081/TCP,1898/TCP   22h    app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs
    +service/oud-ds-rs-0           ClusterIP   None             <none>        1444/TCP,1888/TCP,1898/TCP                                       22h    app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-0
    +service/oud-ds-rs-1           ClusterIP   None             <none>        1444/TCP,1888/TCP,1898/TCP                                       22h    app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-1
    +service/oud-ds-rs-2           ClusterIP   None             <none>        1444/TCP,1888/TCP,1898/TCP                                       22h    app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-2
    +service/oud-ds-rs-3           ClusterIP   None             <none>        1444/TCP,1888/TCP,1898/TCP                                       9m9s   app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-3
    +service/oud-ds-rs-http-0      ClusterIP   10.104.112.93    <none>        1080/TCP,1081/TCP                                                22h    app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-0
    +service/oud-ds-rs-http-1      ClusterIP   10.103.105.70    <none>        1080/TCP,1081/TCP                                                22h    app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-1
    +service/oud-ds-rs-http-2      ClusterIP   10.110.160.107   <none>        1080/TCP,1081/TCP                                                22h    app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-2
    +service/oud-ds-rs-http-3      ClusterIP   10.102.93.179    <none>        1080/TCP,1081/TCP                                                9m9s   app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-3
    +service/oud-ds-rs-lbr-admin   ClusterIP   10.99.238.222    <none>        1888/TCP,1444/TCP                                                22h    app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs
    +service/oud-ds-rs-lbr-http    ClusterIP   10.101.250.196   <none>        1080/TCP,1081/TCP                                                22h    app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs
    +service/oud-ds-rs-lbr-ldap    ClusterIP   10.104.149.90    <none>        1389/TCP,1636/TCP                                                22h    app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs
    +service/oud-ds-rs-ldap-0      ClusterIP   10.109.255.221   <none>        1389/TCP,1636/TCP                                                22h    app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-0
    +service/oud-ds-rs-ldap-1      ClusterIP   10.111.135.142   <none>        1389/TCP,1636/TCP                                                22h    app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-1
    +service/oud-ds-rs-ldap-2      ClusterIP   10.100.8.145     <none>        1389/TCP,1636/TCP                                                22h    app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-2
    +service/oud-ds-rs-ldap-3      ClusterIP   10.111.177.46    <none>        1389/TCP,1636/TCP                                                9m9s   app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-3
    +

    Note: It will take several minutes before all the services listed above show. While the oud-ds-rs-3 pod has a STATUS of 0/1 the pod is started but the OUD server associated with it is currently starting. While the pod is starting you can check the startup status in the pod log, by running the following command:

    +
    $ kubectl logs oud-ds-rs-3 -n oudns
    +
  2. +
+

Scaling down OUD pods

+

Scaling down OUD pods is performed in exactly the same as in Scaling up OUD pods except the replicaCount is reduced to the required number of pods.

+

Once the kubectl command is executed the pod(s) will move to a Terminating state. In the example below replicaCount was reduced from 4 to 3 and hence oud-ds-rs-3 has moved to Terminating:

+
$ kubectl get pods -n oudns
+   
+NAME              READY   STATUS        RESTARTS   AGE     IP             NODE          NOMINATED NODE   READINESS GATES
+pod/oud-ds-rs-0   1/1     Running       0          22h   10.244.0.195   <Worker Node>   <none>           <none>
+pod/oud-ds-rs-1   1/1     Running       0          22h   10.244.0.194   <Worker Node>   <none>           <none>
+pod/oud-ds-rs-2   1/1     Running       0          22h   10.244.0.193   <Worker Node>   <none>           <none>
+pod/oud-ds-rs-3   1/1     Terminating   0          21m   10.244.0.193   <Worker Node>   <none>           <none>
+

The pod will take a minute or two to stop and then will disappear:

+
$ kubectl get pods -n oudns
+   
+NAME              READY   STATUS        RESTARTS   AGE     IP             NODE          NOMINATED NODE   READINESS GATES
+pod/oud-ds-rs-0   1/1     Running       0          22h   10.244.0.195   <Worker Node>   <none>           <none>
+pod/oud-ds-rs-1   1/1     Running       0          22h   10.244.0.194   <Worker Node>   <none>           <none>
+pod/oud-ds-rs-2   1/1     Running       0          22h   10.244.0.193   <Worker Node>   <none>           <none>
+
+ +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oud/patch-and-upgrade/index.html b/docs/23.4.1/idm-products/oud/patch-and-upgrade/index.html new file mode 100644 index 000000000..5d1e25bd2 --- /dev/null +++ b/docs/23.4.1/idm-products/oud/patch-and-upgrade/index.html @@ -0,0 +1,4465 @@ + + + + + + + + + + + + Patch and Upgrade :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+ +
+ +
+ +
+ +

+ + Patch and Upgrade +

+ + + + + + +

In this section you learn how to upgrade OUD from a previous version. Follow the section relevant to the version you are upgrading from.

+
    +
  1. Upgrading to October 23 (23.4.1) from April 23 (23.2.1) or later
  2. +
  3. Upgrading to October 23 (23.4.1) from October 22 (22.4.1) or January 23 (23.1.1)
  4. +
  5. Upgrading to October 23 (23.4.1) from July 22 (22.3.1)
  6. +
  7. Upgrading to October 23 (23.4.1) from releases prior to July 22 (22.3.1)
  8. +
  9. Upgrading Elasticsearch and Kibana
  10. +
+

Note: If on July 22 (22.3.1) or later, and have Kubernetes Horizontal Pod Autoscaler (HPA) enabled, you must disable HPA before performing the steps in the relevant upgrade section. See Delete the HPA.

+

Upgrading to October 23 (23.4.1) from April 23 (23.2.1) or later

+

The instructions below are for upgrading from April 23 (23.2.1) or later to October 23 (23.4.1).

+

Note: If you are not using Oracle Container Registry or your own container registry, then you must first load the new container image on all nodes in your Kubernetes cluster.

+
    +
  1. +

    Navigate to the $WORKDIR/kubernetes/helm directory:

    +
    $ cd $WORKDIR/kubernetes/helm
    +
  2. +
  3. +

    Create a oud-patch-override.yaml file that contains:

    +
    image:
    +  repository: <image_location>
    +  tag: <image_tag>
    + imagePullSecrets:
    +   - name: orclcred
    +

    For example:

    +
    image:
    +  repository: container-registry.oracle.com/middleware/oud_cpu
    +  tag: 12.2.1.4-jdk8-ol7-<October'23>
    +imagePullSecrets:
    +  - name: orclcred
    +

    The following caveats exist:

    +
      +
    • +

      If you are not using Oracle Container Registry or your own container registry for your OUD container image, then you can remove the following:

      +
      imagePullSecrets:
      +  - name: orclcred
      +
    • +
    +
  4. +
  5. +

    Run the following command to upgrade the deployment:

    +
    $ helm upgrade --namespace <namespace> \
    +--values oud-patch-override.yaml \
    +<release_name> oud-ds-rs --reuse-values
    +

    For example:

    +
    $ helm upgrade --namespace oudns \
    +--values oud-patch-override.yaml \
    +oud-ds-rs oud-ds-rs --reuse-values
    +
  6. +
+

Upgrading to October 23 (23.4.1) from October 22 (22.4.1) or January 23 (23.1.1)

+

The instructions below are for upgrading from October 22 (22.4.1) or January 23 (23.1.1), to October 23 (23.4.1).

+

Note: If you are not using Oracle Container Registry or your own container registry, then you must first load the new container image on all nodes in your Kubernetes cluster.

+

Scale down OUD

+
    +
  1. +

    Make sure the base pod (oud-ds-rs-0) is running and healthy (READY 1/1) by running the following command:

    +
    $ kubectl get pods -n <namespace>
    +

    For example:

    +
    $ kubectl get pods -n oudns
    +

    The output will look similar to the following:

    +
    NAME                              READY   STATUS      RESTARTS   AGE
    +oud-ds-rs-0                       1/1     Running     0          21h
    +oud-ds-rs-1                       1/1     Running     0          20h
    +oud-ds-rs-2                       1/1     Running     0          20h
    +
  2. +
  3. +

    Ensure dsreplication is healthy by running the following command:

    +
    $ $ kubectl --namespace <namespace> exec -it -c <containername> <podname> -- \
    +/u01/oracle/user_projects/<OUD Instance/Pod Name>/OUD/bin/dsreplication status \
    +--trustAll --hostname <OUD Instance/Pod Name> --port 1444 --adminUID admin \
    +--dataToDisplay compat-view --dataToDisplay rs-connections
    +

    For example:

    +
    $ kubectl --namespace oudns exec -it -c oud-ds-rs oud-ds-rs-0 -- \
    +/u01/oracle/user_projects/oud-ds-rs-0/OUD/bin/dsreplication status \
    +--trustAll --hostname oud-ds-rs-0 --port 1444 --adminUID admin \
    +--dataToDisplay compat-view --dataToDisplay rs-connections
    +

    The output will look similar to the following:

    +
    NAME                              READY   STATUS      RESTARTS   AGE
    +   
    +>>>> Specify Oracle Unified Directory LDAP connection parameters
    +    
    +Password for user 'admin':
    +    
    +Establishing connections and reading configuration ..... Done.
    +    
    +dc=example,dc=com - Replication Enabled
    +=======================================
    +    
    +Server               : Entries : M.C. [1] : A.O.M.C. [2] : Port [3] : Encryption [4] : Trust [5] : U.C. [6] : Status [7] : ChangeLog [8] : Group ID [9] : Connected To [10]
    +---------------------:---------:----------:--------------:----------:----------------:-----------:----------:------------:---------------:--------------:-------------------------------
    +oud-ds-rs-0:1444     : 202     : 0        : 0            : 1898     : Disabled       : Trusted   : --       : Normal     : Enabled       : 1            : oud-ds-rs-0:1898
    +                     :         :          :              :          :                :           :          :            :               :              : (GID=1)
    +oud-ds-rs-1:1444     : 202     : 0        : 0            : 1898     : Disabled       : Trusted   : --       : Normal     : Enabled       : 1            : oud-ds-rs-1:1898
    +                     :         :          :              :          :                :           :          :            :               :              : (GID=1)
    +oud-ds-rs-2:1444     : 202     : 0        : 0            : 1898     : Disabled       : Trusted   : --       : Normal     : Enabled       : 1            : oud-ds-rs-2:1898
    +                     :         :          :              :          :                :           :          :            :               :              : (GID=1)
    +    
    +Replication Server [11]        : RS #1 : RS #2 : RS #3
    +-------------------------------:-------:-------:------
    +oud-ds-rs-0:1898               : --    : Yes   : Yes
    +(#1)                           :       :       :
    +oud-ds-rs-1:1898               : Yes   : --    : Yes
    +(#2)                           :       :       :
    +oud-ds-rs-2:1898               : Yes   : Yes   : --
    +(#3)                           :       :       :
    +    
    +etc...
    +
  4. +
  5. +

    Scale down OUD by reducing the replicas to 1:

    +
    $ cd $WORKDIR/kubernetes/helm
    +$ helm upgrade -n oudns --set replicaCount=1 oud-ds-rs oud-ds-rs --reuse-values
    +

    Note: The $WORKDIR is the directory for your existing release, not October 23.

    +

    The output will be similar to the following:

    +
    Release "oud-ds-rs" has been upgraded. Happy Helming!
    +NAME: oud-ds-rs
    +LAST DEPLOYED: <DATE>
    +NAMESPACE: oudns
    +STATUS: deployed
    +REVISION: 2
    +NOTES:
    +   
    +etc..
    +

    Make sure the replica pods are shutdown before proceeding:

    +
    $ kubectl get pods -n oudns
    +   
    +   
    +NAME                              READY   STATUS      RESTARTS   AGE
    +oud-ds-rs-0                       1/1     Running     0          21h
    +

    Note: It will take several minutes before the replica pods disappear.

    +
  6. +
+

Backup OUD data

+
    +
  1. +

    Take a backup of the OUD data for every pod in the NFS shared volume:

    +
    $ kubectl exec -it -n oudns oud-ds-rs-0 -- bash
    +[oracle@oud-ds-rs-0 oracle]$ cd user_projects
    +[oracle@oud-ds-rs-0 user_projects]$ mkdir OUD_backup_<DATE>
    +[oracle@oud-ds-rs-0 user_projects]$ cp -r oud-ds-rs-* OUD_backup_<DATE>/
    +
  2. +
  3. +

    Make sure the backup created successfully:

    +
    [oracle@oud-ds-rs-0 user_projects]$ ls -l OUD_backup_<date>
    +total 2
    +drwxr-x---. 5 oracle root 3 <DATE> oud-ds-rs-0
    +drwxr-x---. 5 oracle root 3 <DATE> oud-ds-rs-1
    +drwxr-x---. 5 oracle root 3 <DATE> oud-ds-rs-2
    +
  4. +
  5. +

    Remove the non-zero pod directories oud-ds-rs-1 and oud-ds-rs-2:

    +
    [oracle@oud-ds-rs-0 user_projects]$ rm -rf oud-ds-rs-1 oud-ds-rs-2
    +
  6. +
  7. +

    Exit the oud-ds-rs-0 bash session:

    +
    [oracle@oud-ds-rs-0 user_projects]$ exit
    +
  8. +
+

Setup the October 23 code repository to deploy OUD

+
    +
  1. +

    Create a working directory on the persistent volume to setup the latest source code:

    +
    $ mkdir <persistent_volume>/<workdir>
    +

    For example:

    +
    $ mkdir /scratch/shared/OUDK8SJuly23
    +
  2. +
  3. +

    Download the latest OUD deployment scripts from the OUD repository:

    +
    $ cd <persistent_volume>/<workdir>
    +$ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1
    +

    For example:

    +
    $ mkdir /scratch/shared/OUDK8SJuly23
    +$ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1
    +
  4. +
  5. +

    Set the $WORKDIR environment variable as follows:

    +
    $ export WORKDIR=<workdir>/fmw-kubernetes/OracleUnifiedDirectory
    +

    For example:

    +
    $ export WORKDIR=/scratch/shared/OUDK8SJuly23/fmw-kubernetes/OracleUnifiedDirectory
    +
  6. +
+

Update the OUD container image

+
    +
  1. +

    Navigate to the $WORKDIR/kubernetes/helm directory:

    +
    $ cd $WORKDIR/kubernetes/helm
    +
  2. +
  3. +

    Create a oud-patch-override.yaml file that contains:

    +
    image:
    +  repository: <image_location>
    +  tag:  <image_tag>
    +  pullPolicy: IfNotPresent
    +imagePullSecrets:
    +  - name: orclcred
    +oudConfig:
    +  cleanupbeforeStart: false
    +  disablereplicationbeforeStop: false
    +replicaCount: 3
    +

    For example:

    +
    image:
    +  repository: container-registry.oracle.com/middleware/oud_cpu
    +  tag:  12.2.1.4-jdk8-ol7-<October'23>
    +  pullPolicy: IfNotPresent
    +imagePullSecrets:
    +  - name: orclcred
    +oudConfig:
    +  cleanupbeforeStart: false
    +  disablereplicationbeforeStop: false
    +replicaCount: 3
    +

    The following caveats exist:

    +
      +
    • +

      If you are not using Oracle Container Registry or your own container registry for your OUD container image, then you can remove the following:

      +
      imagePullSecrets:
      +  - name: orclcred
      +
    • +
    +
  4. +
  5. +

    Run the following command to upgrade the deployment:

    +
    $ cd $WORKDIR/kubernetes/helm
    +$ helm upgrade --namespace <namespace> \
    +--values oud-patch-override.yaml \
    +<release_name> oud-ds-rs --reuse-values
    +

    For example:

    +
    $ cd $WORKDIR/kubernetes/helm
    +$ helm upgrade --namespace oudns \
    +--values oud-patch-override.yaml \
    +oud-ds-rs oud-ds-rs --reuse-values
    +

    The output should look similar to the following:

    +
    Release "oud-ds-rs" has been upgraded. Happy Helming!
    +NAME: oud-ds-rs
    +LAST DEPLOYED: <DATE>
    +NAMESPACE: oudns
    +STATUS: deployed
    +REVISION: 3
    +NOTES:
    +etc..
    +
  6. +
+

Verify the pods

+
    +
  1. +

    After updating with the new image the pods will restart. Verify the pods are running:

    +
    $ kubectl --namespace <namespace> get pods
    +

    For example:

    +
    $ kubectl --namespace oudns get pods
    +

    The output will look similar to the following:

    +
    NAME                              READY   STATUS      RESTARTS   AGE
    +oud-ds-rs-0                       1/1     Running     0          11m
    +oud-ds-rs-1                       1/1     Running     0          28m
    +oud-ds-rs-2                       1/1     Running     0          22m
    +...
    +

    Note: It will take several minutes before the pods oud-ds-rs-1 and oud-ds-rs-2 start, and oud-ds-rs-0 restarts. While the OUD pods have a STATUS of 0/1 the pods are started but the OUD server associated with it is currently starting.

    +
  2. +
  3. +

    Verify the pods are using the new image by running the following command:

    +
    $ kubectl describe pod <pod> -n <namespace>
    +

    For example:

    +
    $ kubectl describe pod oud-ds-rs-0 -n oudns | grep Image
    +

    The output will look similar to the following:

    +
    ...
    +Image:          container-registry.oracle.com/middleware/oud_cpu:12.2.1.4-jdk8-ol7-<October'23>
    +Image ID:       container-registry.oracle.com/middleware/oud_cpu@sha256:<sha256>
    +
  4. +
  5. +

    Ensure dsreplication is healthy by running the following command:

    +
    $ $ kubectl --namespace <namespace> exec -it -c <containername> <podname> -- \
    +/u01/oracle/user_projects/<OUD Instance/Pod Name>/OUD/bin/dsreplication status \
    +--trustAll --hostname <OUD Instance/Pod Name> --port 1444 --adminUID admin \
    +--dataToDisplay compat-view --dataToDisplay rs-connections
    +

    For example:

    +
    $ kubectl --namespace oudns exec -it -c oud-ds-rs oud-ds-rs-0 -- \
    +/u01/oracle/user_projects/oud-ds-rs-0/OUD/bin/dsreplication status \
    +--trustAll --hostname oud-ds-rs-0 --port 1444 --adminUID admin \
    +--dataToDisplay compat-view --dataToDisplay rs-connections
    +

    The output will look similar to the following:

    +
    NAME                              READY   STATUS      RESTARTS   AGE
    +   
    +>>>> Specify Oracle Unified Directory LDAP connection parameters
    +    
    +Password for user 'admin':
    +    
    +Establishing connections and reading configuration ..... Done.
    +    
    +dc=example,dc=com - Replication Enabled
    +=======================================
    +    
    +Server               : Entries : M.C. [1] : A.O.M.C. [2] : Port [3] : Encryption [4] : Trust [5] : U.C. [6] : Status [7] : ChangeLog [8] : Group ID [9] : Connected To [10]
    +---------------------:---------:----------:--------------:----------:----------------:-----------:----------:------------:---------------:--------------:-------------------------------
    +oud-ds-rs-0:1444     : 202     : 0        : 0            : 1898     : Disabled       : Trusted   : --       : Normal     : Enabled       : 1            : oud-ds-rs-0:1898
    +                     :         :          :              :          :                :           :          :            :               :              : (GID=1)
    +oud-ds-rs-1:1444     : 202     : 0        : 0            : 1898     : Disabled       : Trusted   : --       : Normal     : Enabled       : 1            : oud-ds-rs-1:1898
    +                     :         :          :              :          :                :           :          :            :               :              : (GID=1)
    +oud-ds-rs-2:1444     : 202     : 0        : 0            : 1898     : Disabled       : Trusted   : --       : Normal     : Enabled       : 1            : oud-ds-rs-2:1898
    +                     :         :          :              :          :                :           :          :            :               :              : (GID=1)
    +    
    +Replication Server [11]        : RS #1 : RS #2 : RS #3
    +-------------------------------:-------:-------:------
    +oud-ds-rs-0:1898               : --    : Yes   : Yes
    +(#1)                           :       :       :
    +oud-ds-rs-1:1898               : Yes   : --    : Yes
    +(#2)                           :       :       :
    +oud-ds-rs-2:1898               : Yes   : Yes   : --
    +(#3)                           :       :       :
    +    
    +etc...
    +
  6. +
  7. +

    Once the validation steps are performed and you are confident OUD is working correctly, you can optionally delete the OUD backup data in the NFS shared volume:

    +
    $ kubectl exec -it -n oudns oud-ds-rs-0 -- bash
    +[oracle@oud-ds-rs-0 oracle]$ cd user_projects/OUD_backup_<DATE>/
    +[oracle@oud-ds-rs-0 OUD_backup_<DATE>]$ rm -rf oud-ds-rs-0  oud-ds-rs-1  oud-ds-rs-2
    +
  8. +
+

Upgrading to October 23 (23.4.1) from July 22 (22.3.1)

+

The instructions below are for upgrading from July 22 (22.3.1) to October 23 (23.4.1).

+
    +
  1. Follow Upgrading to October 23 (23.4.1) from October 22 (22.4.1) or January 23 (23.1.1) to upgrade the image.
  2. +
  3. Once the image is upgraded, follow Upgrading Elasticsearch and Kibana.
  4. +
+

Upgrading to October 23 (23.4.1) from releases prior to July 22 (22.3.1)

+

In releases prior to July 22 (22.3.1) OUD used pod based deployment. From July 22 (22.3.1) onwards OUD is deployed using StatefulSets.

+

If you are upgrading from a release prior to July 22 (22.3.1) you must follow the steps below to deploy a new OUD instance to use your existing OUD data in <persistent_volume>/oud_user_projects.

+

Note: The steps below will incur a small outage.

+

Delete the existing deployment

+
    +
  1. +

    Find the deployment release name as follows:

    +
    $ helm --namespace <namespace> list
    +

    For example:

    +
    $ helm --namespace oudns list
    +

    The output will look similar to the following:

    +
    NAME            NAMESPACE       REVISION        UPDATED                                   STATUS          CHART           APP VERSION
    +oud-ds-rs       oudns           1               <DATE>    deployed        oud-ds-rs-0.2   12.2.1.4.0
    +
  2. +
  3. +

    Delete the deployment using the following command:

    +
    $ helm uninstall --namespace <namespace> <release>
    +

    For example:

    +
    $ helm uninstall --namespace oudns oud-ds-rs
    +release "oud-ds-rs" uninstalled
    +
  4. +
  5. +

    Run the following command to view the status:

    +
    $ kubectl --namespace oudns get pod,service,secret,pv,pvc,ingress -o wide
    +

    Initially the pods and persistent volume (PV) and persistent volume claim (PVC) will move to a Terminating status:

    +
    NAME              READY   STATUS        RESTARTS   AGE   IP             NODE            NOMINATED NODE   READINESS GATES
    +
    +pod/oud-ds-rs-0   1/1     Terminating   0          24m   10.244.1.180   <Worker Node>   <none>           <none>
    +pod/oud-ds-rs-1   1/1     Terminating   0          18m   10.244.1.181   <Worker Node>   <none>           <none>
    +pod/oud-ds-rs-2   1/1     Terminating   0          12m   10.244.1.182   <Worker Node>   <none>           <none>
    +
    +NAME                         TYPE                                  DATA   AGE
    +secret/default-token-msmmd   kubernetes.io/service-account-token   3      3d20h
    +secret/dockercred            kubernetes.io/dockerconfigjson        1      3d20h
    +secret/orclcred              kubernetes.io/dockerconfigjson        1      3d20h
    +
    +NAME                                 CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS        CLAIM                       STORAGECLASS        REASON   AGE    VOLUMEMODE
    +persistentvolume/oud-ds-rs-pv        20Gi       RWX            Delete           Terminating   oudns/oud-ds-rs-pvc         manual                       24m    Filesystem
    +
    +NAME                                  STATUS        VOLUME         CAPACITY   ACCESS MODES   STORAGECLASS   AGE   VOLUMEMODE
    +persistentvolumeclaim/oud-ds-rs-pvc   Terminating   oud-ds-rs-pv   20Gi       RWX            manual         24m   Filesystem
    +

    Run the command again until the pods, PV and PVC disappear.

    +
  6. +
+

Setup the code repository to deploy OUD

+
    +
  1. +

    Create a working directory on the persistent volume to setup the latest source code:

    +
    $ mkdir <persistent_volume>/<workdir>
    +

    For example:

    +
    $ mkdir /scratch/shared/OUDK8SJuly23
    +
  2. +
  3. +

    Download the latest OUD deployment scripts from the OUD repository:

    +
    $ cd <persistent_volume>/<workdir>
    +$ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1
    +

    For example:

    +
    $ cd /scratch/shared/OUDK8SJuly23
    +$ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1
    +
  4. +
  5. +

    Set the $WORKDIR environment variable as follows:

    +
    $ export WORKDIR=<workdir>/fmw-kubernetes/OracleUnifiedDirectory
    +

    For example:

    +
    $ export WORKDIR=/scratch/shared/OUDK8SJuly23/fmw-kubernetes/OracleUnifiedDirectory
    +
  6. +
+

Create a new instance against your existing persistent volume

+
    +
  1. +

    Navigate to the $WORKDIR/kubernetes/helm directory

    +
    $ cd $WORKDIR/kubernetes/helm
    +
  2. +
  3. +

    Create an oud-ds-rs-values-override.yaml as follows:

    +
    image:
    +  repository: <image_location>
    +  tag: <image_tag>
    +  pullPolicy: IfNotPresent
    +imagePullSecrets:
    +  - name: orclcred
    +oudConfig:
    +  rootUserPassword: <password>
    +  sampleData: "200"
    +persistence:
    +  type: filesystem
    +  filesystem:
    +    hostPath:
    +      path: <persistent_volume>/oud_user_projects
    +cronJob:
    +  kubectlImage:
    +    repository: bitnami/kubectl
    +    tag: <version>
    +    pullPolicy: IfNotPresent
    + 
    +  imagePullSecrets:
    +    - name: dockercred
    +

    For example:

    +
    image:
    +  repository: container-registry.oracle.com/middleware/oud_cpu
    +  tag: 12.2.1.4-jdk8-ol7-<October'23>
    +  pullPolicy: IfNotPresent
    +imagePullSecrets:
    +  - name: orclcred
    +oudConfig:
    +  rootUserPassword: <password>
    +  sampleData: "200"
    +persistence:
    +  type: filesystem
    +  filesystem:
    +    hostPath:
    +      path: /scratch/shared/oud_user_projects
    +cronJob:
    +  kubectlImage:
    +    repository: bitnami/kubectl
    +    tag: 1.24.5
    +    pullPolicy: IfNotPresent
    + 
    +  imagePullSecrets:
    +    - name: dockercred
    +

    The following caveats exist:

    +
      +
    • +

      The <persistent_volume>/oud_user_projects must point to the directory used in your previous deployment otherwise your existing OUD data will not be used. Make sure you take a backup of the <persistent_volume>/oud_user_projects directory before proceeding further.

      +
    • +
    • +

      Replace <password> with the password used in your previous deployment.

      +
    • +
    • +

      The <version> in kubectlImage tag: should be set to the same version as your Kubernetes version (kubectl version). For example if your Kubernetes version is 1.24.5 set to 1.24.5.

      +
    • +
    • +

      If you are not using Oracle Container Registry or your own container registry for your OUD container image, then you can remove the following:

      +
      imagePullSecrets:
      +  - name: orclcred
      +
    • +
    • +

      If using NFS for your persistent volume then change the persistence section as follows:

      +
      persistence:
      +  type: networkstorage
      +  networkstorage:
      +    nfs: 
      +      path: <persistent_volume>/oud_user_projects
      +      server: <NFS IP address>
      +
    • +
    +
  4. +
  5. +

    Run the following command to deploy OUD:

    +
    $ helm install --namespace <namespace> \
    +--values oud-ds-rs-values-override.yaml \
    +<release_name> oud-ds-rs
    +

    For example:

    +
    $ helm install --namespace oudns \
    +--values oud-ds-rs-values-override.yaml \
    +oud-ds-rs oud-ds-rs
    +
  6. +
  7. +

    Check the OUD deployment as per Verify the OUD deployment and Verify the OUD replication.

    +
  8. +
  9. +

    Upgrade Elasticsearch and Kibana by following Upgrading Elasticsearch and Kibana.

    +
  10. +
+

Upgrading Elasticsearch and Kibana

+

This section shows how to upgrade Elasticsearch and Kibana. From October 22 (22.4.1) onwards, OUD logs should be stored on a centralized Elasticsearch and Kibana stack.

+

Note: This section should only be followed if upgrading from July 22 (22.3.1) or earlier to October 23 (23.4.1). If you are upgrading from October 22 or later to October 23 do not follow this section.

+

Undeploy Elasticsearch and Kibana

+

From October 22 (22.4.1) onwards, OUD logs should be stored on a centralized Elasticsearch and Kibana (ELK) stack.

+

Deployments prior to October 22 (22.4.1) used local deployments of Elasticsearch and Kibana.

+

If you are upgrading from July 22 (22.3.1) or earlier, to October 23 (23.4.1), you must first undeploy Elasticsearch and Kibana using the steps below:

+
    +
  1. +

    Navigate to the $WORKDIR/kubernetes/helm directory and create a logging-override-values-uninstall.yaml with the following:

    +
    elk:
    +  enabled: false
    +
  2. +
  3. +

    Run the following command to remove the existing ELK deployment:

    +
    $ helm upgrade --namespace <domain_namespace> --values <valuesfile.yaml> <releasename> oud-ds-rs --reuse-values
    +

    For example:

    +
    $ helm upgrade --namespace oudns --values logging-override-values-uninstall.yaml oud-ds-rs oud-ds-rs --reuse-values
    +
  4. +
+

Deploy ElasticSearch and Kibana in centralized stack

+
    +
  1. Follow Install Elasticsearch stack and Kibana to deploy ElasticSearch and Kibana in a centralized stack.
  2. +
+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oud/prepare-your-environment/index.html b/docs/23.4.1/idm-products/oud/prepare-your-environment/index.html new file mode 100644 index 000000000..8b9797773 --- /dev/null +++ b/docs/23.4.1/idm-products/oud/prepare-your-environment/index.html @@ -0,0 +1,4067 @@ + + + + + + + + + + + + Prepare Your Environment :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+ +
+ +
+ +
+ +

+ + Prepare Your Environment +

+ + + + + + + +
    +
  1. Check the Kubernetes cluster is ready
  2. +
  3. Obtain the OUD container image
  4. +
  5. Create a persistent volume directory
  6. +
  7. Setup the code repository to deploy OUD
  8. +
+

Check the Kubernetes cluster is ready

+

As per the Prerequisites a Kubernetes cluster should have already been configured.

+
    +
  1. +

    Run the following command on the master node to check the cluster and worker nodes are running:

    +
    $ kubectl get nodes,pods -n kube-system
    +

    The output will look similar to the following:

    +
    NAME                  STATUS   ROLES                  AGE   VERSION
    +node/worker-node1     Ready    <none>                 17h   v1.26.6+1.el8
    +node/worker-node2     Ready    <none>                 17h   v1.26.6+1.el8
    +node/master-node      Ready    control-plane,master   23h   v1.26.6+1.el8
    +
    +NAME                                     READY   STATUS    RESTARTS   AGE
    +pod/coredns-66bff467f8-fnhbq             1/1     Running   0          23h
    +pod/coredns-66bff467f8-xtc8k             1/1     Running   0          23h
    +pod/etcd-master                          1/1     Running   0          21h
    +pod/kube-apiserver-master-node           1/1     Running   0          21h
    +pod/kube-controller-manager-master-node  1/1     Running   0          21h
    +pod/kube-flannel-ds-amd64-lxsfw          1/1     Running   0          17h
    +pod/kube-flannel-ds-amd64-pqrqr          1/1     Running   0          17h
    +pod/kube-flannel-ds-amd64-wj5nh          1/1     Running   0          17h
    +pod/kube-proxy-2kxv2                     1/1     Running   0          17h
    +pod/kube-proxy-82vvj                     1/1     Running   0          17h
    +pod/kube-proxy-nrgw9                     1/1     Running   0          23h
    +pod/kube-scheduler-master                1/1     Running   0          21h
    +
  2. +
+

Obtain the OUD container image

+

The OUD Kubernetes deployment requires access to an OUD container image. The image can be obtained in the following ways:

+
    +
  • Prebuilt OUD container image
  • +
  • Build your own OUD container image using WebLogic Image Tool
  • +
+

Prebuilt OUD container image

+

The prebuilt OUD October 2023 container image can be downloaded from Oracle Container Registry. This image is prebuilt by Oracle and includes Oracle Unified Directory 12.2.1.4.0, the October Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program..

+

Note: Before using this image you must login to Oracle Container Registry, navigate to Middleware > oud_cpu and accept the license agreement.

+

You can use this image in the following ways:

+
    +
  • Pull the container image from the Oracle Container Registry automatically during the OUD Kubernetes deployment.
  • +
  • Manually pull the container image from the Oracle Container Registry and then upload it to your own container registry.
  • +
  • Manually pull the container image from the Oracle Container Registry and manually stage it on the master node and each worker node.
  • +
+

Build your own OUD container image using WebLogic Image Tool

+

You can build your own OUD container image using the WebLogic Image Tool. This is recommended if you need to apply one off patches to a Prebuilt OUD container image. For more information about building your own container image with WebLogic Image Tool, see Create or update image.

+

You can use an image built with WebLogic Image Tool in the following ways:

+
    +
  • Manually upload them to your own container registry.
  • +
  • Manually stage them on the master node and each worker node.
  • +
+

Note: This documentation does not tell you how to pull or push the above images into a private container registry, or stage them on the master and worker nodes. Details of this can be found in the Enterprise Deployment Guide.

+

Create a persistent volume directory

+

Note: This section should not be followed if using block storage.

+

As referenced in Prerequisites the nodes in the Kubernetes cluster must have access to a persistent volume such as a Network File System (NFS) mount or a shared file system.

+

In this example /scratch/shared/ is a shared directory accessible from all nodes.

+
    +
  1. +

    On the master node run the following command to create a user_projects directory:

    +
    $ cd <persistent_volume>
    +$ mkdir oud_user_projects   
    +$ sudo chown -R 1000:0 oud_user_projects
    +

    For example:

    +
    $ cd /scratch/shared
    +$ mkdir oud_user_projects   
    +$ sudo chown -R 1000:0 oud_user_projects
    +
  2. +
  3. +

    On the master node run the following to ensure it is possible to read and write to the persistent volume:

    +
    $ cd <persistent_volume>/oud_user_projects
    +$ touch file.txt
    +$ ls filemaster.txt
    +

    For example:

    +
    $ cd /scratch/shared/oud_user_projects
    +$ touch filemaster.txt
    +$ ls filemaster.txt
    +

    On the first worker node run the following to ensure it is possible to read and write to the persistent volume:

    +
    $ cd /scratch/shared/oud_user_projects
    +$ ls filemaster.txt
    +$ touch fileworker1.txt
    +$ ls fileworker1.txt
    +

    Repeat the above for any other worker nodes e.g fileworker2.txt etc. Once proven that it’s possible to read and write from each node to the persistent volume, delete the files created.

    +
  4. +
+

Setup the code repository to deploy OUD

+

Oracle Unified Directory deployment on Kubernetes leverages deployment scripts provided by Oracle for creating Oracle Unified Directory containers using the Helm charts provided. To deploy Oracle Unified Directory on Kubernetes you should set up the deployment scripts as below:

+
    +
  1. +

    Create a working directory to setup the source code.

    +
    $ mkdir <workdir>
    +

    For example:

    +
    $ mkdir /scratch/shared/OUDContainer
    +
  2. +
  3. +

    Download the latest OUD deployment scripts from the OUD repository:

    +
    $ cd <workdir>
    +$ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1
    +

    For example:

    +
    $ cd /scratch/shared/OUDContainer
    +$ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1
    +
  4. +
  5. +

    Set the $WORKDIR environment variable as follows:

    +
    $ export WORKDIR=<workdir>/fmw-kubernetes/OracleUnifiedDirectory
    +

    For example:

    +
    $ export WORKDIR=/scratch/shared/OUDContainer/fmw-kubernetes/OracleUnifiedDirectory
    +

    You are now ready to create the OUD deployment as per Create OUD instances.

    +
  6. +
+ + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oud/prepare-your-environment/index.xml b/docs/23.4.1/idm-products/oud/prepare-your-environment/index.xml new file mode 100644 index 000000000..5de03f67d --- /dev/null +++ b/docs/23.4.1/idm-products/oud/prepare-your-environment/index.xml @@ -0,0 +1,14 @@ + + + + Prepare Your Environment on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oud/prepare-your-environment/ + Recent content in Prepare Your Environment on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oud/prerequisites/index.html b/docs/23.4.1/idm-products/oud/prerequisites/index.html new file mode 100644 index 000000000..c08596257 --- /dev/null +++ b/docs/23.4.1/idm-products/oud/prerequisites/index.html @@ -0,0 +1,3968 @@ + + + + + + + + + + + + Prerequisites :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + Prerequisites +

+ + + + + + + +

Introduction

+

This document provides information about the system requirements for deploying and running Oracle Unified Directory 12c PS4 (12.2.1.4.0) in a Kubernetes environment.

+

System Requirements for Oracle Unified Directory on Kubernetes

+
    +
  • A running Kubernetes cluster that meets the following requirements: +
      +
    • The Kubernetes cluster must have sufficient nodes and resources.
    • +
    • An installation of Helm is required on the Kubernetes cluster. Helm is used to create and deploy the necessary resources on the Kubernetes cluster.
    • +
    • A supported container engine must be installed and running on the Kubernetes cluster.
    • +
    • The Kubernetes cluster and container engine must meet the minimum version requirements outlined in document ID 2723908.1 on My Oracle Support.
    • +
    • The nodes in the Kubernetes cluster must have access to a persistent volume such as a Network File System (NFS) mount, a shared file system, or block storage. If you intend to use assured replication in OUD, you must have a persistent volume available that uses a Network File System (NFS) mount, or a shared file system for the config volume. See Enabling Assured Replication.
    • +
    +
  • +
+

Note: This documentation does not tell you how to install a Kubernetes cluster, Helm, the container engine, or how to push container images to a container registry. +Please refer to your vendor specific documentation for this information. Also see Getting Started.

+ + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oud/prerequisites/index.xml b/docs/23.4.1/idm-products/oud/prerequisites/index.xml new file mode 100644 index 000000000..7f9a9afc0 --- /dev/null +++ b/docs/23.4.1/idm-products/oud/prerequisites/index.xml @@ -0,0 +1,14 @@ + + + + Prerequisites on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oud/prerequisites/ + Recent content in Prerequisites on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oud/release-notes/index.html b/docs/23.4.1/idm-products/oud/release-notes/index.html new file mode 100644 index 000000000..188e7326e --- /dev/null +++ b/docs/23.4.1/idm-products/oud/release-notes/index.html @@ -0,0 +1,4064 @@ + + + + + + + + + + + + Release Notes :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + Release Notes +

+ + + + + + + +

Review the latest changes and known issues for Oracle Unified Directory on Kubernetes.

+

Recent changes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
DateVersionChange
October, 202323.4.1Supports Oracle Unified Directory 12.2.1.4 domain deployment using the October 2023 container image which contains the October Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program.
+ Support for Block Device Storage. See, Create OUD Instances.
+ Ability to set resource requests and limits for CPU and memory on an OUD instance. See, Create OUD Instances.
+ Support for Assured Replication. See, Create OUD Instances.
+ Support for the Kubernetes Horizontal Pod Autoscaler (HPA). See, Kubernetes Horizontal Pod Autoscaler.
+ Supports integration options such as Enterprise User Security (EUS), EBusiness Suite (EBS), and Directory Integration Platform (DIP).
To upgrade to October 23 (23.4.1) you must follow the instructions in Patch and Upgrade.
July, 202323.3.1Supports Oracle Unified Directory 12.2.1.4 domain deployment using the July 2023 container image which contains the July Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program.
To upgrade to July 23 (23.3.1) you must follow the instructions in Patch and Upgrade.
April, 202323.2.1Supports Oracle Unified Directory 12.2.1.4 domain deployment using the April 2023 container image which contains the April Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program.
To upgrade to April 23 (23.2.1) you must follow the instructions in Patch and Upgrade.
January, 202323.1.1Supports Oracle Unified Directory 12.2.1.4 domain deployment using the January 2023 container image which contains the January Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program.
October, 202222.4.1Supports Oracle Unified Directory 12.2.1.4 domain deployment using the October 2022 container image which contains the October Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program.
Changes to deployment of Logging and Visualization with Elasticsearch and Kibana.
OUD container images are now only available from container-registry.oracle.com and are no longer available from My Oracle Support.
July, 202222.3.1Supports Oracle Unified Directory 12.2.1.4 domain deployment using the July 2022 container image which contains the July Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. From July 2022 onwards OUD deployment is performed using StatefulSets.
April, 202222.2.1Updated for CRI-O support.
November 202121.4.2Voyager ingress removed as no longer supported.
October 202121.4.1A) References to supported Kubernetes, Helm and Docker versions removed and replaced with Support note reference. B) Namespace and domain names changed to be consistent with Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster. C) Upgrading a Kubernetes Cluster and Security Hardening removed as vendor specific.
November 202020.4.1Initial release of Oracle Unified Directory on Kubernetes.
+ + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oud/release-notes/index.xml b/docs/23.4.1/idm-products/oud/release-notes/index.xml new file mode 100644 index 000000000..eef25f69e --- /dev/null +++ b/docs/23.4.1/idm-products/oud/release-notes/index.xml @@ -0,0 +1,14 @@ + + + + Release Notes on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oud/release-notes/ + Recent content in Release Notes on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oud/troubleshooting/index.html b/docs/23.4.1/idm-products/oud/troubleshooting/index.html new file mode 100644 index 000000000..3150466f7 --- /dev/null +++ b/docs/23.4.1/idm-products/oud/troubleshooting/index.html @@ -0,0 +1,4203 @@ + + + + + + + + + + + + Troubleshooting :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + Troubleshooting +

+ + + + + + + +
    +
  1. Check the status of a namespace
  2. +
  3. View pod logs
  4. +
  5. View pod description
  6. +
  7. Known issues
  8. +
+

Check the status of a namespace

+

To check the status of objects in a namespace use the following command:

+
$ kubectl --namespace <namespace> get nodes,pod,service,secret,pv,pvc,ingress -o wide
+

For example:

+
$ kubectl --namespace oudns get pod,service,secret,pv,pvc,ingress -o wide
+

The output will look similar to the following:

+
NAME                                  READY   STATUS      RESTARTS   AGE     IP             NODE            NOMINATED NODE   READINESS GATES
+pod/oud-ds-rs-0                       1/1     Running     0          14m     10.244.1.180   <Worker Node>   <none>           <none>
+pod/oud-ds-rs-1                       1/1     Running     0          8m26s   10.244.1.181   <Worker Node>   <none>           <none>
+pod/oud-ds-rs-2                       0/1     Running     0          2m24s   10.244.1.182   <Worker Node>   <none>           <none>
+pod/oud-pod-cron-job-27586680-p5d8q   0/1     Completed   0          50s     10.244.1.183   <Worker Node>   <none>           <none>
+
+NAME                          TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                                                          AGE   SELECTOR
+service/oud-ds-rs             ClusterIP   None             <none>        1444/TCP,1888/TCP,1389/TCP,1636/TCP,1080/TCP,1081/TCP,1898/TCP   14m   app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs
+service/oud-ds-rs-0           ClusterIP   None             <none>        1444/TCP,1888/TCP,1898/TCP                                       14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-0
+service/oud-ds-rs-1           ClusterIP   None             <none>        1444/TCP,1888/TCP,1898/TCP                                       14m   app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-1
+service/oud-ds-rs-2           ClusterIP   None             <none>        1444/TCP,1888/TCP,1898/TCP                                       14m   app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-2
+service/oud-ds-rs-http-0      ClusterIP   10.104.112.93    <none>        1080/TCP,1081/TCP                                                14m   app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-0
+service/oud-ds-rs-http-1      ClusterIP   10.103.105.70    <none>        1080/TCP,1081/TCP                                                14m   app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-1
+service/oud-ds-rs-http-2      ClusterIP   10.110.160.107   <none>        1080/TCP,1081/TCP                                                14m   app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-2
+service/oud-ds-rs-lbr-admin   ClusterIP   10.99.238.222    <none>        1888/TCP,1444/TCP                                                14m   app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs
+service/oud-ds-rs-lbr-http    ClusterIP   10.101.250.196   <none>        1080/TCP,1081/TCP                                                14m   app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs
+service/oud-ds-rs-lbr-ldap    ClusterIP   10.104.149.90    <none>        1389/TCP,1636/TCP                                                14m   app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs
+service/oud-ds-rs-ldap-0      ClusterIP   10.109.255.221   <none>        1389/TCP,1636/TCP                                                14m   app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-0
+service/oud-ds-rs-ldap-1      ClusterIP   10.111.135.142   <none>        1389/TCP,1636/TCP                                                14m   app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-1
+service/oud-ds-rs-ldap-2      ClusterIP   10.100.8.145     <none>        1389/TCP,1636/TCP                                                14m   app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-2
+
+NAME                                     TYPE                             DATA   AGE
+secret/dockercred                        kubernetes.io/dockerconfigjson   1      4h24m
+secret/orclcred                          kubernetes.io/dockerconfigjson   1      14m
+secret/oud-ds-rs-creds                   opaque                           8      14m
+secret/oud-ds-rs-tls-cert                kubernetes.io/tls                2      14m
+secret/sh.helm.release.v1.oud-ds-rs.v1   helm.sh/release.v1               1      14m
+
+
+NAME                                 CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                       STORAGECLASS        REASON   AGE    VOLUMEMODE
+persistentvolume/oud-ds-rs-pv        20Gi       RWX            Delete           Bound    oudns/oud-ds-rs-pvc         manual                       14m    Filesystem
+
+NAME                                  STATUS   VOLUME         CAPACITY   ACCESS MODES   STORAGECLASS   AGE   VOLUMEMODE
+persistentvolumeclaim/oud-ds-rs-pvc   Bound    oud-ds-rs-pv   20Gi       RWX            manual         14m   Filesystem
+
+NAME                                                      CLASS    HOSTS                                                               ADDRESS   PORTS     AGE
+ingress.networking.k8s.io/oud-ds-rs-admin-ingress-nginx   <none>   oud-ds-rs-admin-0,oud-ds-rs-admin-0,oud-ds-rs-admin-1 + 3 more...             80, 443   14m
+ingress.networking.k8s.io/oud-ds-rs-http-ingress-nginx    <none>   oud-ds-rs-http-0,oud-ds-rs-http-1,oud-ds-rs-http-2 + 3 more...                80, 443   14m
+

Include/exclude elements (nodes,pod,service,secret,pv,pvc,ingress) as required.

+

View pod logs

+

To view logs for a pod use the following command:

+
$ kubectl logs <pod> -n <namespace>
+

For example:

+
$ kubectl logs oud-ds-rs-0 -n oudns
+

View pod description

+

Details about a pod can be viewed using the kubectl describe command:

+
$ kubectl describe pod <pod> -n <namespace>
+

For example:

+
$ kubectl describe pod oud-ds-rs-0 -n oudns
+

The output will look similar to the following:

+
Name:         oud-ds-rs-0
+Namespace:    oudns
+Priority:     0
+Node:         <Worker Node>/100.105.18.114
+Start Time:   <DATE>
+Labels:       app.kubernetes.io/instance=oud-ds-rs
+              app.kubernetes.io/name=oud-ds-rs
+              controller-revision-hash=oud-ds-rs-5c8b8f67c9
+              statefulset.kubernetes.io/pod-name=oud-ds-rs-0
+Annotations:  <none>
+Status:       Running
+IP:           10.244.2.48
+IPs:
+  IP:           10.244.2.48
+Controlled By:  StatefulSet/oud-ds-rs
+Init Containers:
+  mount-pv:
+    Container ID:  cri-o://905af11c6f032f2dfa18b1e3956d7936cb7dd04d9d0df0cfcf8ed061e6930b52
+    Image:         <location>/busybox
+    Image ID:      <location>@sha256:2c8ed5408179ff4f53242a4bdd2706110ce000be239fe37a61be9c52f704c437
+    Port:          <none>
+    Host Port:     <none>
+    Command:
+      /bin/sh
+      -c
+    Args:
+      ordinal=${OUD_INSTANCE_NAME##*-}; if [[ ${CLEANUP_BEFORE_START} == "true" ]]; then if [[ "$ordinal" != "0" ]]; then cd /u01/oracle; rm -fr /u01/oracle/user_projects/$(OUD_INSTANCE_NAME)/OUD; fi; fi
+      if [[ ${CONFIGVOLUME_ENABLED} == "true" ]]; then if [[ "$ordinal" == "0" ]]; then cp "/mnt/baseOUD.props" "${CONFIGVOLUME_MOUNTPATH}/config-baseOUD.props"; else cp "/mnt/replOUD.props" "${CONFIGVOLUME_MOUNTPATH}/config-replOUD.props"; fi; fi;
+    State:          Terminated
+      Reason:       Completed
+      Exit Code:    0
+      Started:      <DATE>
+      Finished:     <DATE>
+    Ready:          True
+    Restart Count:  0
+    Environment:
+      OUD_INSTANCE_NAME:       oud-ds-rs-0 (v1:metadata.name)
+      CONFIGVOLUME_ENABLED:    false
+      CONFIGVOLUME_MOUNTPATH:  /u01/oracle/config-input
+      CLEANUP_BEFORE_START:    false
+    Mounts:
+      /u01/oracle/user_projects from oud-ds-rs-pv (rw)
+      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-65skp (ro)
+Containers:
+  oud-ds-rs:
+    Container ID:   cri-o://d691b090dfbb1ee1b8606952497d80642424a82a2290071b325ea720098817c3
+    Image:          container-registry.oracle.com/middleware/oud_cpu:12.2.1.4-jdk8-ol7-<April'23>
+    Image ID:       container-registry.oracle.com/middleware/oud_cpu@sha256:faca16dbbcda1985ff567eefe3f2ca7bae6cbbb7ebcd296fffb040ce61e9396a
+    Ports:          1444/TCP, 1888/TCP, 1389/TCP, 1636/TCP, 1080/TCP, 1081/TCP, 1898/TCP
+    Host Ports:     0/TCP, 0/TCP, 0/TCP, 0/TCP, 0/TCP, 0/TCP, 0/TCP
+    State:          Running
+      Started:      <DATE>
+    Ready:          True
+    Restart Count:  0
+    Limits:
+      cpu:     1
+      memory:  4Gi
+    Requests:
+      cpu:      500m
+      memory:   4Gi
+    Liveness:   tcp-socket :ldap delay=300s timeout=30s period=60s #success=1 #failure=5
+    Readiness:  exec [/u01/oracle/container-scripts/checkOUDInstance.sh] delay=300s timeout=30s period=60s #success=1 #failure=10
+    Environment:
+      instanceType:                   DS2RS_STS
+      OUD_INSTANCE_NAME:              oud-ds-rs-0 (v1:metadata.name)
+      MY_NODE_NAME:                    (v1:spec.nodeName)
+      MY_POD_NAME:                    oud-ds-rs-0 (v1:metadata.name)
+      sleepBeforeConfig:              3
+      sourceHost:                     oud-ds-rs-0
+      baseDN:                         dc=example,dc=com
+      rootUserDN:                     <set to the key 'rootUserDN' in secret 'oud-ds-rs-creds'>        Optional: false
+      rootUserPassword:               <set to the key 'rootUserPassword' in secret 'oud-ds-rs-creds'>  Optional: false
+      adminUID:                       <set to the key 'adminUID' in secret 'oud-ds-rs-creds'>          Optional: false
+      adminPassword:                  <set to the key 'adminPassword' in secret 'oud-ds-rs-creds'>     Optional: false
+      bindDN1:                        <set to the key 'bindDN1' in secret 'oud-ds-rs-creds'>           Optional: false
+      bindPassword1:                  <set to the key 'bindPassword1' in secret 'oud-ds-rs-creds'>     Optional: false
+      bindDN2:                        <set to the key 'bindDN2' in secret 'oud-ds-rs-creds'>           Optional: false
+      bindPassword2:                  <set to the key 'bindPassword2' in secret 'oud-ds-rs-creds'>     Optional: false
+      sourceServerPorts:              oud-ds-rs-0:1444
+      sourceAdminConnectorPort:       1444
+      sourceReplicationPort:          1898
+      sampleData:                     200
+      adminConnectorPort:             1444
+      httpAdminConnectorPort:         1888
+      ldapPort:                       1389
+      ldapsPort:                      1636
+      httpPort:                       1080
+      httpsPort:                      1081
+      replicationPort:                1898
+      dsreplication_1:                verify --hostname ${sourceHost} --port ${sourceAdminConnectorPort} --baseDN ${baseDN} --serverToRemove $(OUD_INSTANCE_NAME):${adminConnectorPort} --connectTimeout 600000 --readTimeout 600000
+      dsreplication_2:                enable --host1 ${sourceHost} --port1 ${sourceAdminConnectorPort} --replicationPort1 ${sourceReplicationPort} --host2 $(OUD_INSTANCE_NAME) --port2 ${adminConnectorPort} --replicationPort2 ${replicationPort} --baseDN ${baseDN} --connectTimeout 600000 --readTimeout 600000
+      dsreplication_3:                initialize --hostSource ${initializeFromHost} --portSource ${sourceAdminConnectorPort} --hostDestination $(OUD_INSTANCE_NAME) --portDestination ${adminConnectorPort} --baseDN ${baseDN} --connectTimeout 600000 --readTimeout 600000
+      dsreplication_4:                verify --hostname $(OUD_INSTANCE_NAME) --port ${adminConnectorPort} --baseDN ${baseDN} --connectTimeout 600000 --readTimeout 600000
+      post_dsreplication_dsconfig_1:  set-replication-domain-prop --domain-name ${baseDN} --set group-id:1
+      post_dsreplication_dsconfig_2:  set-replication-server-prop --set group-id:1
+    Mounts:
+      /u01/oracle/user_projects from oud-ds-rs-pv (rw)
+      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-65skp (ro)
+Conditions:
+  Type              Status
+  Initialized       True
+  Ready             True
+  ContainersReady   True
+  PodScheduled      True
+Volumes:
+  oud-ds-rs-pv:
+    Type:       PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
+    ClaimName:  oud-ds-rs-pvc
+    ReadOnly:   false
+  kube-api-access-65skp:
+    Type:                    Projected (a volume that contains injected data from multiple sources)
+    TokenExpirationSeconds:  3607
+    ConfigMapName:           kube-root-ca.crt
+    ConfigMapOptional:       <nil>
+    DownwardAPI:             true
+QoS Class:                   Burstable
+Node-Selectors:              <none>
+Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
+                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
+Events:                      <none>
+
+

Known issues

+

dsreplication output after scale up/down shows pod in unknown state

+

Sometimes when scaling up or down, it is possible to get incorrect data in the dsreplication output. In the example below the replicaCount was changed from 4 to 3. The oud-ds-rs-3 server appears as <Unknown> when it should have disappeared:

+
dc=example,dc=com - Replication Enabled
+=======================================
+ 
+Server                         : Entries : M.C. [1] : A.O.M.C. [2] : Port [3] : Encryption [4] : Trust [5] : U.C. [6] : Status [7] : ChangeLog [8] : Group ID [9] : Connected To [10]
+-------------------------------:---------:----------:--------------:----------:----------------:-----------:----------:------------:---------------:--------------:-------------------------------
+oud-ds-rs-3:<Unknown>          : --      : N/A      : --           : 1898     : Disabled       : --        : --       : Unknown    : --            : N/A          : --
+[11]                           :         :          :              :          :                :           :          :            :               :              :
+oud-ds-rs-0:1444               : 39135   : 0        : 0            : 1898     : Disabled       : Trusted   : --       : Normal     : Enabled       : 1            : oud-ds-rs-2:1898
+                               :         :          :              :          :                :           :          :            :               :              : (GID=1)
+oud-ds-rs-1:1444               : 39135   : 0        : 0            : 1898     : Disabled       : Trusted   : --       : Normal     : Enabled       : 1            : oud-ds-rs-1:1898
+                               :         :          :              :          :                :           :          :            :               :              : (GID=1)
+oud-ds-rs-2:1444               : 39135   : 0        : 0            : 1898     : Disabled       : Trusted   : --       : Normal     : Enabled       : 1            : oud-ds-rs-2:1898
+                               :         :          :              :          :                :           :          :            :               :              : (GID=1)
+ 
+Replication Server [12]       : RS #1 : RS #2 : RS #3 : RS #4
+------------------------------:-------:-------:-------:------
+oud-ds-rs-0:1898 (#1)  : --    : Yes   : Yes   : N/A
+oud-ds-rs-1:1898 (#2)  : Yes   : --    : Yes   : N/A
+oud-ds-rs-2:1898 (#3)  : Yes   : Yes   : --    : N/A
+oud-ds-rs-3:1898 (#4)  : No    : No    : No    : --
+
+

In this situation perform the following steps to remove the server:

+
    +
  1. +

    Run the following command to enter the OUD Kubernetes pod:

    +
    $ kubectl --namespace <namespace> exec -it -c <containername> <podname> -- bash
    +

    For example:

    +
    kubectl --namespace oudns exec -it -c oud-ds-rs oud-ds-rs-0 -- bash
    +

    This will take you into the pod:

    +
    [oracle@oud-ds-rs-0 oracle]$
    +
  2. +
  3. +

    Once inside the pod run the following command to create a password file:

    +
    echo <ADMIN_PASSWORD> > /tmp/adminpassword.txt
    +
  4. +
  5. +

    Run the following command to remove the replicationPort:

    +
    /u01/oracle/oud/bin/dsreplication disable --hostname localhost --port $adminConnectorPort --adminUID admin --trustAll --adminPasswordFile /tmp/adminpassword.txt --no-prompt --unreachableServer oud-ds-rs-3:$replicationPort
    +

    The output will look similar to the following:

    +
    Establishing connections and reading configuration ........ Done.
    + 
    +The following errors were encountered reading the configuration of the
    +existing servers:
    +Could not connect to the server oud-ds-rs-3:1444.  Check that the
    +server is running and that is accessible from the local machine.  Details:
    +oud-ds-rs-3:1444
    +The tool will try to update the configuration in a best effort mode.
    + 
    +Removing references to replication server oud-ds-rs-3:1898 ..... Done.
    +
  6. +
  7. +

    Run the following command to remove the adminConnectorPort:

    +
    /u01/oracle/oud/bin/dsreplication disable --hostname localhost --port $adminConnectorPort --adminUID admin --trustAll --adminPasswordFile /tmp/adminpassword.txt --no-prompt --unreachableServer oud-ds-rs-3:$adminConnectorPort
    +

    The output will look similar to the following:

    +
    Establishing connections and reading configuration ...... Done.
    + 
    +Removing server oud-ds-rs-3:1444 from the registration information ..... Done.
    +
  8. +
  9. +

    Delete the password file:

    +
    rm /tmp/adminpassword.txt
    +
  10. +
+ + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oud/troubleshooting/index.xml b/docs/23.4.1/idm-products/oud/troubleshooting/index.xml new file mode 100644 index 000000000..34ca1fa84 --- /dev/null +++ b/docs/23.4.1/idm-products/oud/troubleshooting/index.xml @@ -0,0 +1,14 @@ + + + + Troubleshooting on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oud/troubleshooting/ + Recent content in Troubleshooting on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oudsm/configure-ingress/index.html b/docs/23.4.1/idm-products/oudsm/configure-ingress/index.html new file mode 100644 index 000000000..cfc21f07f --- /dev/null +++ b/docs/23.4.1/idm-products/oudsm/configure-ingress/index.html @@ -0,0 +1,4157 @@ + + + + + + + + + + + + Configure an Ingress for OUDSM :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + Configure an Ingress for OUDSM +

+ + + + + + + +
    +
  1. +

    Introduction

    +
  2. +
  3. +

    Install NGINX

    +

    a. Configure the repository

    +

    b. Create a namespace

    +

    c. Install NGINX using helm

    +
  4. +
  5. +

    Access to interfaces through ingress

    +
  6. +
+

Introduction

+

The instructions below explain how to set up NGINX as an ingress for OUDSM.

+

Install NGINX

+

Use Helm to install NGINX.

+

Configure the repository

+
    +
  1. +

    Add the Helm chart repository for installing NGINX using the following command:

    +
    $ helm repo add stable https://kubernetes.github.io/ingress-nginx
    +

    The output will look similar to the following:

    +
    "stable" has been added to your repositories
    +
  2. +
  3. +

    Update the repository using the following command:

    +
    $ helm repo update
    +

    The output will look similar to the following:

    +
    Hang tight while we grab the latest from your chart repositories...
    +...Successfully got an update from the "stable" chart repository
    +Update Complete. Happy Helming!
    +
  4. +
+

Create a namespace

+
    +
  1. +

    Create a Kubernetes namespace for NGINX:

    +
    $ kubectl create namespace <namespace>
    +

    For example:

    +
    $ kubectl create namespace mynginx
    +

    The output will look similar to the following:

    +
    namespace/mynginx created
    +
  2. +
+

Install NGINX using helm

+
    +
  1. +

    Create a $WORKDIR/kubernetes/helm/nginx-ingress-values-override.yaml that contains the following:

    +

    Note: The configuration below deploys an ingress using LoadBalancer. If you prefer to use NodePort, change the configuration accordingly. For more details about NGINX configuration see: NGINX Ingress Controller.

    +
    controller:
    +  admissionWebhooks:
    +    enabled: false
    +  extraArgs:
    +    # The secret referred to by this flag contains the default certificate to be used when accessing the catch-all server.
    +    # If this flag is not provided NGINX will use a self-signed certificate.
    +    # If the TLS Secret is in different namespace, name can be mentioned as <namespace>/<tlsSecretName>
    +    default-ssl-certificate: oudsmns/oudsm-tls-cert
    +  service:
    +    # controller service external IP addresses
    +    # externalIPs:
    +    #  - < External IP Address >
    +    # To configure Ingress Controller Service as LoadBalancer type of Service
    +    # Based on the Kubernetes configuration, External LoadBalancer would be linked to the Ingress Controller Service
    +    type: LoadBalancer
    +    # Configuration for NodePort to be used for Ports exposed through Ingress
    +    # If NodePorts are not defined/configured, Node Port would be assigned automatically by Kubernetes
    +    # These NodePorts are helpful while accessing services directly through Ingress and without having External Load Balancer.
    +    nodePorts:
    +      # For HTTP Interface exposed through LoadBalancer/Ingress
    +      http: 30080
    +      # For HTTPS Interface exposed through LoadBalancer/Ingress
    +      https: 30443
    +
  2. +
  3. +

    To install and configure NGINX ingress issue the following command:

    +
    $ helm install --namespace <namespace> \
    +--values nginx-ingress-values-override.yaml \
    +lbr-nginx stable/ingress-nginx
    +

    Where:

    +
      +
    • lbr-nginx is your deployment name
    • +
    • stable/ingress-nginx is the chart reference
    • +
    +

    For example:

    +
    $ helm install --namespace mynginx \
    +--values nginx-ingress-values-override.yaml \
    +lbr-nginx stable/ingress-nginx
    +

    The output will be similar to the following:

    +
    NAME: lbr-nginx
    +LAST DEPLOYED: <DATE>
    +NAMESPACE: mynginx
    +STATUS: deployed
    +REVISION: 1
    +TEST SUITE: None
    +NOTES:
    +The ingress-nginx controller has been installed.
    +It may take a few minutes for the LoadBalancer IP to be available.
    +You can watch the status by running 'kubectl --namespace mynginx get services -o wide -w lbr-nginx-ingress-nginx-controller'
    +  
    +An example Ingress that makes use of the controller:
    +  apiVersion: networking.k8s.io/v1
    +  kind: Ingress
    +  metadata:
    +    name: example
    +    namespace: foo
    +  spec:
    +    ingressClassName: nginx
    +    rules:
    +      - host: www.example.com
    +        http:
    +          paths:
    +            - pathType: Prefix
    +              backend:
    +                service:
    +                  name: exampleService
    +                  port:
    +                    number: 80
    +              path: /
    +    # This section is only required if TLS is to be enabled for the Ingress
    +    tls:
    +      - hosts:
    +        - www.example.com
    +        secretName: example-tls
    +
    +If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided:
    +
    +  apiVersion: v1
    +  kind: Secret
    +  metadata:
    +    name: example-tls
    +    namespace: foo
    +  data:
    +    tls.crt: <base64 encoded cert>
    +    tls.key: <base64 encoded key>
    +  type: kubernetes.io/tls
    +
  4. +
+

Access to interfaces through ingress

+

Using the Helm chart, ingress objects are created according to configuration. The following table details the rules configured in ingress object(s) for access to Oracle Unified Directory Services Manager Interfaces through ingress.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PortNodePortHostExample HostnamePathBackend Service:PortExample Service Name:Port
http/https30080/30443<deployment/release name>-Noudsm-N*<deployment/release name>-N:httpoudsm-1:http
http/https30080/30443**/oudsm /console<deployment/release name>-lbr:httpoudsm-lbr:http
+
    +
  • In the table above, the Example Name for each Object is based on the value ‘oudsm’ as the deployment/release name for the Helm chart installation.
  • +
  • The NodePorts mentioned in the table are according to ingress configuration described in previous section.
  • +
  • When an External LoadBalancer is not available/configured, interfaces can be accessed through NodePort on the Kubernetes node.
  • +
+

Changes in /etc/hosts to validate hostname based ingress rules

+

If it is not possible to have LoadBalancer configuration updated to have host names added for Oracle Unified Directory Services Manager Interfaces, then the following entries can be added in /etc/hosts files on the host from where Oracle Unified Directory Services Manager interfaces would be accessed.

+
<IP Address of External LBR or Kubernetes Node>	oudsm oudsm-1 oudsm-2 oudsm-N
+
    +
  • In the table above, host names are based on the value ‘oudsm’ as the deployment/release name for the Helm chart installation.
  • +
  • When an External LoadBalancer is not available/configured, Interfaces can be accessed through NodePort on the Kubernetes Node.
  • +
+

Validate OUDSM URL’s

+
    +
  1. +

    Launch a browser and access the OUDSM console.

    +
      +
    • If using an External LoadBalancer: https://<External LBR Host>/oudsm.
    • +
    • If not using an External LoadBalancer use https://<Kubernetes Node>:30443/oudsm.
    • +
    +
  2. +
  3. +

    Access the WebLogic Administration console by accessing the following URL and login with weblogic/<password> where weblogic/<password> is the adminUser and adminPass set when creating the OUDSM instance.

    +
      +
    • If using an External LoadBalancer: https://<External LBR Host>/console.
    • +
    • If not using an External LoadBalancer use https://<Kubernetes Node>:30443/console.
    • +
    +
  4. +
+ + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oudsm/configure-ingress/index.xml b/docs/23.4.1/idm-products/oudsm/configure-ingress/index.xml new file mode 100644 index 000000000..6f8041eec --- /dev/null +++ b/docs/23.4.1/idm-products/oudsm/configure-ingress/index.xml @@ -0,0 +1,14 @@ + + + + Configure an Ingress for OUDSM on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oudsm/configure-ingress/ + Recent content in Configure an Ingress for OUDSM on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oudsm/create-or-update-image/index.html b/docs/23.4.1/idm-products/oudsm/create-or-update-image/index.html new file mode 100644 index 000000000..8acd06765 --- /dev/null +++ b/docs/23.4.1/idm-products/oudsm/create-or-update-image/index.html @@ -0,0 +1,4238 @@ + + + + + + + + + + + + Create or update an image :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + Create or update an image +

+ + + + + + + +

As described in Prepare Your Environment you can create your own OUDSM container image. If you have access to the My Oracle Support (MOS), and there is a need to build a new image with an interim or one off patch, it is recommended to use the WebLogic Image Tool to build an Oracle Unified Directory image for production deployments.

+

Create or update an Oracle Unified Directory Services Manager image using the WebLogic Image Tool

+

Using the WebLogic Image Tool, you can create a new Oracle Unified Directory Services Manager image with PSU’s and interim patches or update an existing image with one or more interim patches.

+
+

Recommendations:

+
    +
  • Use create for creating a new Oracle Unified Directory Services Manager image containing the Oracle Unified Directory Services Manager binaries, bundle patch and interim patches. This is the recommended approach if you have access to the OUDSM patches because it optimizes the size of the image.
  • +
  • Use update for patching an existing Oracle Unified Directory Services Manager image with a single interim patch. Note that the patched image size may increase considerably due to additional image layers introduced by the patch application tool.
  • +
+
+

Create an image

+

Set up the WebLogic Image Tool

+ +
Prerequisites
+

Verify that your environment meets the following prerequisites:

+
    +
  • Docker client and daemon on the build machine, with minimum Docker version 18.03.1.ce.
  • +
  • Bash version 4.0 or later, to enable the command complete feature.
  • +
  • JAVA_HOME environment variable set to the appropriate JDK location e.g: /scratch/export/oracle/product/jdk
  • +
+
Set up the WebLogic Image Tool
+

To set up the WebLogic Image Tool:

+
    +
  1. +

    Create a working directory and change to it:

    +
    $ mdir <workdir>
    +$ cd <workdir>
    +

    For example:

    +
    $ mkdir /scratch/imagetool-setup
    +$ cd /scratch/imagetool-setup
    +
  2. +
  3. +

    Download the latest version of the WebLogic Image Tool from the releases page.

    +
    $ wget https://github.com/oracle/weblogic-image-tool/releases/download/release-X.X.X/imagetool.zip
    +

    where X.X.X is the latest release referenced on the releases page.

    +
  4. +
  5. +

    Unzip the release ZIP file in the imagetool-setup directory.

    +
    $ unzip imagetool.zip
    +
  6. +
  7. +

    Execute the following commands to set up the WebLogic Image Tool:

    +
    $ cd <workdir>/imagetool-setup/imagetool/bin
    +$ source setup.sh
    +

    For example:

    +
    $ cd /scratch/imagetool-setup/imagetool/bin
    +$ source setup.sh
    +
  8. +
+
Validate setup
+

To validate the setup of the WebLogic Image Tool:

+
    +
  1. +

    Enter the following command to retrieve the version of the WebLogic Image Tool:

    +
    $ imagetool --version
    +
  2. +
  3. +

    Enter imagetool then press the Tab key to display the available imagetool commands:

    +
    $ imagetool <TAB>
    +cache   create  help    rebase  update
    +
  4. +
+
WebLogic Image Tool build directory
+

The WebLogic Image Tool creates a temporary Docker context directory, prefixed by wlsimgbuilder_temp, every time the tool runs. Under normal circumstances, this context directory will be deleted. However, if the process is aborted or the tool is unable to remove the directory, it is safe for you to delete it manually. By default, the WebLogic Image Tool creates the Docker context directory under the user’s home directory. If you prefer to use a different directory for the temporary context, set the environment variable WLSIMG_BLDDIR:

+
$ export WLSIMG_BLDDIR="/path/to/buid/dir"
+
WebLogic Image Tool cache
+

The WebLogic Image Tool maintains a local file cache store. This store is used to look up where the Java, WebLogic Server installers, and WebLogic Server patches reside in the local file system. By default, the cache store is located in the user’s $HOME/cache directory. Under this directory, the lookup information is stored in the .metadata file. All automatically downloaded patches also reside in this directory. You can change the default cache store location by setting the environment variable WLSIMG_CACHEDIR:

+
$ export WLSIMG_CACHEDIR="/path/to/cachedir"
+
Set up additional build scripts
+

Creating an Oracle Unified Directory Services Manager container image using the WebLogic Image Tool requires additional container scripts for Oracle Unified Directory Services Manager domains.

+
    +
  1. +

    Clone the docker-images repository to set up those scripts. In these steps, this directory is DOCKER_REPO:

    +
    $ cd <workdir>/imagetool-setup
    +$ git clone https://github.com/oracle/docker-images.git
    +

    For example:

    +
    $ cd /scratch/imagetool-setup
    +$ git clone https://github.com/oracle/docker-images.git
    +
  2. +
+
+

Note: If you want to create the image continue with the following steps, otherwise to update the image see update an image.

+
+

Create an image

+

After setting up the WebLogic Image Tool, follow these steps to use the WebLogic Image Tool to create a new Oracle Unified Directory Services Manager image.

+
Download the Oracle Unified Directory Services Manager installation binaries and patches
+

You must download the required Oracle Unified Directory Services Manager installation binaries and patches as listed below from the Oracle Software Delivery Cloud and save them in a directory of your choice.

+

The installation binaries and patches required are:

+
    +
  • +

    Oracle Unified Directory 12.2.1.4.0

    +
      +
    • fmw_12.2.1.4.0_oud.jar
    • +
    +
  • +
  • +

    Oracle Fusion Middleware 12c Infrastructure 12.2.1.4.0

    +
      +
    • fmw_12.2.1.4.0_infrastructure.jar
    • +
    +
  • +
  • +

    OUDSM and FMW Infrastructure Patches:

    +
      +
    • View document ID 2723908.1 on My Oracle Support. In the Container Image Download/Patch Details section, locate the Oracle Unified Directory Services Manager (OUDSM) table. For the latest PSU click the README link in the Documentation column. In the README, locate the “Installed Software” section. All the patch numbers to be download are listed here. Download all these individual patches from My Oracle Support.
    • +
    +
  • +
  • +

    Oracle JDK v8

    +
      +
    • jdk-8uXXX-linux-x64.tar.gz where XXX is the JDK version referenced in the README above.
    • +
    +
  • +
+
Update required build files
+

The following files in the code repository location <imagetool-setup-location>/docker-images/OracleUnifiedDirectorySM/imagetool/12.2.1.4.0 are used for creating the image:

+
    +
  • additionalBuildCmds.txt
  • +
  • buildArgs
  • +
+
    +
  1. +

    Edit the <workdir>/imagetool-setup/docker-images/OracleUnifiedDirectorySM/imagetool/12.2.1.4.0/buildArgs file and change %DOCKER_REPO%,%JDK_VERSION% and %BUILDTAG% appropriately.

    +

    For example:

    +
    create
    +--jdkVersion=8u321
    +--type oud_wls
    +--version=12.2.1.4.0
    +--tag=oudsm-latestpsu:12.2.1.4.0
    +--pull
    +--installerResponseFile /scratch/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file,/scratch/imagetool-setup/docker-images/OracleUnifiedDirectorySM/dockerfiles/12.2.1.4.0/install/oud.response
    +--additionalBuildCommands /scratch/imagetool-setup/docker-images/OracleUnifiedDirectorySM/imagetool/12.2.1.4.0/additionalBuildCmds.txt
    +--additionalBuildFiles /scratch/imagetool-setup/docker-images/OracleUnifiedDirectorySM/dockerfiles/12.2.1.4.0/container-scripts
    +
  2. +
  3. +

    The <workdir>/imagetool-setup/imagetool/docker-images/OracleUnifiedDirectorySM/imagetool/12.2.1.4.0/additionalBuildCmds.txt contains additional build commands. You may edit this file if you want to customize the image further.

    +
  4. +
  5. +

    Edit the <workdir>/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file and under the GENERIC section add the line INSTALL_TYPE="Fusion Middleware Infrastructure”. For example:

    +
    [GENERIC]
    +INSTALL_TYPE="Fusion Middleware Infrastructure"
    +DECLINE_SECURITY_UPDATES=true
    +SECURITY_UPDATES_VIA_MYORACLESUPPORT=false
    +
  6. +
+
Create the image
+
    +
  1. +

    Add a JDK package to the WebLogic Image Tool cache. For example:

    +
    $ imagetool cache addInstaller --type jdk --version 8uXXX --path <download location>/jdk-8uXXX-linux-x64.tar.gz
    +

    where XXX is the JDK version downloaded

    +
  2. +
  3. +

    Add the downloaded installation binaries to the WebLogic Image Tool cache. For example:

    +
    $ imagetool cache addInstaller --type OUD --version 12.2.1.4.0 --path <download location>/fmw_12.2.1.4.0_oud.jar
    +   
    +$ imagetool cache addInstaller --type fmw --version 12.2.1.4.0 --path <download location>/fmw_12.2.1.4.0_infrastructure.jar
    +
  4. +
  5. +

    Add the downloaded OPatch patch to the WebLogic Image Tool cache. For example:

    +
    $ imagetool cache addEntry --key 28186730_13.9.4.2.8 --value <download location>/p28186730_139428_Generic.zip
    +
  6. +
  7. +

    Add the rest of the downloaded product patches to the WebLogic Image Tool cache:

    +
    $ imagetool cache addEntry --key <patch>_12.2.1.4.0 --value <download location>/p<patch>_122140_Generic.zip
    +

    For example:

    +
    $ imagetool cache addEntry --key 33727616_12.2.1.4.0 --value <download location>/p33727616_122140_Generic.zip
    +$ imagetool cache addEntry --key 33093748_12.2.1.4.0 --value <download location>/p33093748_122140_Generic.zip
    +$ imagetool cache addEntry --key 32720458_12.2.1.4.0 --value <download location>/p32720458_122140_Generic.zip
    +$ imagetool cache addEntry --key 33791665_12.2.1.4.220105 --value <download location>/p33791665_12214220105_Generic.zip
    +$ imagetool cache addEntry --key 33723124_12.2.1.4.0 --value <download location>/p33723124_122140_Generic.zip
    +$ imagetool cache addEntry --key 32647448_12.2.1.4.0 --value <download location>/p32647448_122140_Linux-x86-64.zip
    +$ imagetool cache addEntry --key 33591019_12.2.1.4.0 --value <download location>/p33591019_122140_Generic.zip
    +$ imagetool cache addEntry --key 32999272_12.2.1.4.0 --value <download location>/p32999272_122140_Generic.zip
    +$ imagetool cache addEntry --key 33448950_12.2.1.4.0 --value <download location>/p33448950_122140_Generic.zip
    +$ imagetool cache addEntry --key 33697227_12.2.1.4.0 --value <download location>/p33697227_122140_Generic.zip
    +$ imagetool cache addEntry --key 33678607_12.2.1.4.0 --value <download location>/p33678607_122140_Generic.zip
    +$ imagetool cache addEntry --key 33735326_12.2.1.4.220105 --value <download location>/p33735326_12214220105_Generic.zip
    +
  8. +
  9. +

    Edit the <workdir>/imagetool-setup/docker-images/OracleUnifiedDirectorySM/imagetool/12.2.1.4.0/buildArgs file and append the product patches and opatch patch as follows:

    +
    --patches 33727616_12.2.1.4.0,33093748_12.2.1.4.0,32720458_12.2.1.4.0,33791665_12.2.1.4.220105,33723124_12.2.1.4.0,32647448_12.2.1.4.0,33591019_12.2.1.4.0,32999272_12.2.1.4.0,33448950_12.2.1.4.0,32999272_12.2.1.4.0,33448950_12.2.1.4.0,33697227_12.2.1.4.0,33678607_12.2.1.4.0,33735326_12.2.1.4.220105
    +--opatchBugNumber=28186730_13.9.4.2.8
    +

    An example buildArgs file is now as follows:

    +
    create
    +--jdkVersion=8u321
    +--type oud_wls
    +--version=12.2.1.4.0
    +--tag=oudsm-latestpsu:12.2.1.4.0
    +--pull
    +--installerResponseFile /scratch/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file,/scratch/imagetool-setup/docker-images/OracleUnifiedDirectorySM/dockerfiles/12.2.1.4.0/install/oud.response
    +--additionalBuildCommands /scratch/imagetool-setup/docker-images/OracleUnifiedDirectorySM/imagetool/12.2.1.4.0/additionalBuildCmds.txt
    +--additionalBuildFiles /scratch/imagetool-setup/docker-images/OracleUnifiedDirectorySM/dockerfiles/12.2.1.4.0/container-scripts
    +--patches 33727616_12.2.1.4.0,33093748_12.2.1.4.0,32720458_12.2.1.4.0,33791665_12.2.1.4.220105,33723124_12.2.1.4.0,32647448_12.2.1.4.0,33591019_12.2.1.4.0,32999272_12.2.1.4.0,33448950_12.2.1.4.0,33448950_12.2.1.4.0,33697227_12.2.1.4.0,33678607_12.2.1.4.0,33735326_12.2.1.4.220105
    +--opatchBugNumber=28186730_13.9.4.2.8
    +
    +

    Note: In the buildArgs file:

    +
      +
    • --jdkVersion value must match the --version value used in the imagetool cache addInstaller command for --type jdk.
    • +
    • --version value must match the --version value used in the imagetool cache addInstaller command for --type OUDSM.
    • +
    +
    +

    Refer to this page for the complete list of options available with the WebLogic Image Tool create command.

    +
  10. +
  11. +

    Create the Oracle Unified Directory Services Manager image:

    +
    $ imagetool @<absolute path to buildargs file> --fromImage ghcr.io/oracle/oraclelinux:7-slim
    +
    +

    Note: Make sure that the absolute path to the buildargs file is prepended with a @ character, as shown in the example above.

    +
    +

    For example:

    +
    $ imagetool @<imagetool-setup-location>/docker-images/OracleUnifiedDirectorySM/imagetool/12.2.1.4.0/buildArgs --fromImage ghcr.io/oracle/oraclelinux:7-slim
    +
  12. +
  13. +

    Check the created image using the docker images command:

    +
    $ docker images | grep oudsm
    +

    The output will look similar to the following:

    +
    oudsm-latestpsu                          12.2.1.4.0            f6dd9d2ca0e6        4 minutes ago       3.72GB
    +
  14. +
  15. +

    Run the following command to save the container image to a tar file:

    +
    $ docker save -o <path>/<file>.tar <image>
    +

    For example:

    +
    $ docker save -o $WORKDIR/oudsm-latestpsu.tar oudsm-latestpsu:12.2.1.4.0
    +
  16. +
+

Update an image

+

The steps below show how to update an existing Oracle Unified Directory Services Manager image with an interim patch.

+

The container image to be patched must be loaded in the local docker images repository before attempting these steps.

+

In the examples below the image oracle/oudsm:12.2.1.4.0 is updated with an interim patch.

+
$ docker images
+
+REPOSITORY     TAG          IMAGE ID          CREATED             SIZE
+oracle/oudsm   12.2.1.4.0   b051804ba15f      3 months ago        3.72GB
+
    +
  1. +

    Set up the WebLogic Image Tool.

    +
  2. +
  3. +

    Download the required interim patch and latest Opatch (28186730) from My Oracle Support. and save them in a directory of your choice.

    +
  4. +
  5. +

    Add the OPatch patch to the WebLogic Image Tool cache, for example:

    +
    $ imagetool cache addEntry --key 28186730_13.9.4.2.8 --value <downloaded-patches-location>/p28186730_139428_Generic.zip
    +
  6. +
  7. +

    Execute the imagetool cache addEntry command for each patch to add the required patch(es) to the WebLogic Image Tool cache. For example, to add patch p33521773_12214211008_Generic.zip:

    +
    $ imagetool cache addEntry --key=33521773_12.2.1.4.211008 --value <downloaded-patches-location>/p33521773_12214211008_Generic.zip
    +
  8. +
  9. +

    Provide the following arguments to the WebLogic Image Tool update command:

    +
      +
    • –-fromImage - Identify the image that needs to be updated. In the example below, the image to be updated is oracle/oudsm:12.2.1.4.0.
    • +
    • –-patches - Multiple patches can be specified as a comma-separated list.
    • +
    • --tag - Specify the new tag to be applied for the image being built.
    • +
    +

    Refer here for the complete list of options available with the WebLogic Image Tool update command.

    +
    +

    Note: The WebLogic Image Tool cache should have the latest OPatch zip. The WebLogic Image Tool will update the OPatch if it is not already updated in the image.

    +
    +

    For example:

    +
    $ imagetool update --fromImage oracle/oudsm:12.2.1.4.0 --tag=oracle/oudsm-new:12.2.1.4.0 --patches=33521773_12.2.1.4.211008 --opatchBugNumber=28186730_13.9.4.2.8
    +
    +

    Note: If the command fails because the files in the image being upgraded are not owned by oracle:oracle, then add the parameter --chown <userid>:<groupid> to correspond with the values returned in the error.

    +
    +
  10. +
  11. +

    Check the built image using the docker images command:

    +
    $ docker images | grep oudsm
    +

    The output will look similar to the following:

    +
    REPOSITORY         TAG          IMAGE ID        CREATED             SIZE
    +oracle/oudsm-new     12.2.1.4.0   78ccd1ad67eb    5 minutes ago       1.11GB
    +oracle/oudsm         12.2.1.4.0   b051804ba15f    3 months ago        1.04GB
    +
  12. +
  13. +

    Run the following command to save the patched container image to a tar file:

    +
    $ docker save -o <path>/<file>.tar <image>
    +

    For example:

    +
    $ docker save -o $WORKDIR/oudsm-new.tar oracle/oudsm-new:12.2.1.4.0
    +
  14. +
+ + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oudsm/create-or-update-image/index.xml b/docs/23.4.1/idm-products/oudsm/create-or-update-image/index.xml new file mode 100644 index 000000000..62f85b5f0 --- /dev/null +++ b/docs/23.4.1/idm-products/oudsm/create-or-update-image/index.xml @@ -0,0 +1,14 @@ + + + + Create or update an image on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oudsm/create-or-update-image/ + Recent content in Create or update an image on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oudsm/create-oudsm-instances/index.html b/docs/23.4.1/idm-products/oudsm/create-oudsm-instances/index.html new file mode 100644 index 000000000..269cb8856 --- /dev/null +++ b/docs/23.4.1/idm-products/oudsm/create-oudsm-instances/index.html @@ -0,0 +1,4569 @@ + + + + + + + + + + + + Create Oracle Unified Directory Services Manager Instances :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+ +
+ +
+ +
+ +

+ + Create Oracle Unified Directory Services Manager Instances +

+ + + + + + + +
    +
  1. Introduction
  2. +
  3. Create a Kubernetes namespace
  4. +
  5. Create a Kubernetes secret for the container registry
  6. +
  7. Create a persistent volume directory
  8. +
  9. The oudsm Helm chart
  10. +
  11. Create OUDSM instances
  12. +
  13. Helm command output
  14. +
  15. Verify the OUDSM deployment
  16. +
  17. Undeploy an OUDSM deployment
  18. +
  19. Appendix: Configuration parameters
  20. +
+

Introduction

+

This chapter demonstrates how to deploy Oracle Unified Directory Services Manager (OUDSM) 12c instance(s) using the Helm package manager for Kubernetes.

+

Based on the configuration, this chart deploys the following objects in the specified namespace of a Kubernetes cluster.

+
    +
  • Service Account
  • +
  • Secret
  • +
  • Persistent Volume and Persistent Volume Claim
  • +
  • Pod(s)/Container(s) for Oracle Unified Directory Services Manager Instances
  • +
  • Services for interfaces exposed through Oracle Unified Directory Services Manager Instances
  • +
  • Ingress configuration
  • +
+

Create a Kubernetes namespace

+

Create a Kubernetes namespace for the OUDSM deployment by running the following command:

+
$ kubectl create namespace <namespace>
+

For example:

+
$ kubectl create namespace oudsmns
+

The output will look similar to the following:

+
namespace/oudsmns created
+

Create a Kubernetes secret for the container registry

+

Create a Kubernetes secret that stores the credentials for the container registry where the OUDSM image is stored. This step must be followed if using Oracle Container Registry or your own private container registry. If you are not using a container registry and have loaded the images on each of the master and worker nodes, you can skip this step.

+
    +
  1. +

    Run the following command to create the secret:

    +
    kubectl create secret docker-registry "orclcred" --docker-server=<CONTAINER_REGISTRY> \
    +--docker-username="<USER_NAME>" \
    +--docker-password=<PASSWORD> --docker-email=<EMAIL_ID> \
    +--namespace=<domain_namespace>
    +

    For example, if using Oracle Container Registry:

    +
    kubectl create secret docker-registry "orclcred" --docker-server=container-registry.oracle.com \
    +--docker-username="user@example.com" \
    +--docker-password=password --docker-email=user@example.com \
    +--namespace=oudsmns
    +

    Replace <USER_NAME> and <PASSWORD> with the credentials for the registry with the following caveats:

    +
      +
    • +

      If using Oracle Container Registry to pull the OUDSM container image, this is the username and password used to login to Oracle Container Registry. Before you can use this image you must login to Oracle Container Registry, navigate to Middleware > oudsm_cpu and accept the license agreement.

      +
    • +
    • +

      If using your own container registry to store the OUDSM container image, this is the username and password (or token) for your container registry.

      +
    • +
    +

    The output will look similar to the following:

    +
    secret/orclcred created
    +
  2. +
+

Create a persistent volume directory

+

As referenced in Prerequisites the nodes in the Kubernetes cluster must have access to a persistent volume such as a Network File System (NFS) mount or a shared file system.

+

In this example /scratch/shared/ is a shared directory accessible from all nodes.

+
    +
  1. +

    On the master node run the following command to create a user_projects directory:

    +
    $ cd <persistent_volume>
    +$ mkdir oudsm_user_projects   
    +$ sudo chown -R 1000:0 oudsm_user_projects
    +

    For example:

    +
    $ cd /scratch/shared
    +$ mkdir oudsm_user_projects   
    +$ sudo chown -R 1000:0 oudsm_user_projects
    +
  2. +
  3. +

    On the master node run the following to ensure it is possible to read and write to the persistent volume:

    +
    $ cd <persistent_volume>/oudsm_user_projects
    +$ touch file.txt
    +$ ls filemaster.txt
    +

    For example:

    +
    $ cd /scratch/shared/oudsm_user_projects
    +$ touch filemaster.txt
    +$ ls filemaster.txt
    +

    On the first worker node run the following to ensure it is possible to read and write to the persistent volume:

    +
    $ cd /scratch/shared/oudsm_user_projects
    +$ ls filemaster.txt
    +$ touch fileworker1.txt
    +$ ls fileworker1.txt
    +

    Repeat the above for any other worker nodes e.g fileworker2.txt etc. Once proven that it’s possible to read and write from each node to the persistent volume, delete the files created.

    +
  4. +
+

The oudsm Helm chart

+

The oudsm Helm chart allows you to create or deploy Oracle Unified Directory Services Manager instances along with Kubernetes objects in a specified namespace.

+

The deployment can be initiated by running the following Helm command with reference to the oudsm Helm chart, along with configuration parameters according to your environment.

+
cd $WORKDIR/kubernetes/helm
+$ helm install --namespace <namespace> \
+<Configuration Parameters> \
+<deployment/release name> \
+<Helm Chart Path/Name>
+

Configuration Parameters (override values in chart) can be passed on with --set arguments on the command line and/or with -f / --values arguments when referring to files.

+

Note: The examples in Create OUDSM instances below provide values which allow the user to override the default values provided by the Helm chart. A full list of configuration parameters and their default values is shown in Appendix: Configuration parameters.

+

For more details about the helm command and parameters, please execute helm --help and helm install --help.

+

Create OUDSM instances

+

You can create OUDSM instances using one of the following methods:

+
    +
  1. Using a YAML file
  2. +
  3. Using --set argument
  4. +
+

Using a YAML file

+
    +
  1. +

    Navigate to the $WORKDIR/kubernetes/helm directory:

    +
    $ cd $WORKDIR/kubernetes/helm
    +
  2. +
  3. +

    Create an oudsm-values-override.yaml as follows:

    +
    image:
    +  repository: <image_location>
    +  tag: <image_tag>
    +  pullPolicy: IfNotPresent
    +imagePullSecrets:
    +  - name: orclcred
    +oudsm:
    +  adminUser: weblogic
    +  adminPass: <password>
    +persistence:
    +  type: filesystem
    +  filesystem:
    +    hostPath: 
    +      path: <persistent_volume>/oudsm_user_projects
    +

    For example:

    +
    image:
    +  repository: container-registry.oracle.com/middleware/oudsm_cpu
    +  tag: 12.2.1.4-jdk8-ol7-<October'23>
    +  pullPolicy: IfNotPresent
    +imagePullSecrets:
    +  - name: orclcred
    +oudsm:
    +  adminUser: weblogic
    +  adminPass: <password>
    +persistence:
    +  type: filesystem
    +  filesystem:
    +    hostPath: 
    +      path: /scratch/shared/oudsm_user_projects
    +

    The following caveats exist:

    +
      +
    • +

      Replace <password> with a the relevant passwords.

      +
    • +
    • +

      If you are not using Oracle Container Registry or your own container registry for your OUD container image, then you can remove the following:

      +
      imagePullSecrets:
      +  - name: orclcred
      +
    • +
    • +

      If using NFS for your persistent volume the change the persistence section as follows:

      +
    • +
    +
    persistence:
    +  type: networkstorage
    +  networkstorage:
    +    nfs: 
    +      path: <persistent_volume>/oudsm_user_projects
    +      server: <NFS IP address>
    +
  4. +
  5. +

    Run the following command to deploy OUDSM:

    +
    $ helm install --namespace <namespace> \
    +--values oudsm-values-override.yaml \
    +<release_name> oudsm
    +
    $ helm install --namespace oudsmns \
    +--values oudsm-values-override.yaml \
    +oudsm oudsm
    +
  6. +
  7. +

    Check the OUDSM deployment as per Verify the OUDSM deployment

    +
  8. +
+

Using --set argument

+
    +
  1. +

    Navigate to the $WORKDIR/kubernetes/helm directory:

    +
    $ cd $WORKDIR/kubernetes/helm
    +
  2. +
  3. +

    Run the following command to create OUDSM instance:

    +
    $ helm install --namespace oudsmns \
    +--set oudsm.adminUser=weblogic,oudsm.adminPass=<password>,persistence.filesystem.hostPath.path=<persistent_volume>/oudsm_user_projects,image.repository=<image_location>,image.tag=<image_tag> \
    +--set imagePullSecrets[0].name="orclcred" \
    +<release_name> oudsm
    +

    For example:

    +
    $ helm install --namespace oudsmns \
    +--set oudsm.adminUser=weblogic,oudsm.adminPass=<password>,persistence.filesystem.hostPath.path=/scratch/shared/oudsm_user_projects,image.repository=container-registry.oracle.com/middleware/oudsm_cpu,image.tag=12.2.1.4-jdk8-ol7-<October'23> \
    +--set imagePullSecrets[0].name="orclcred" \
    +oudsm oudsm
    +

    The following caveats exist:

    +
      +
    • Replace <password> with a the relevant password.
    • +
    • If you are not using Oracle Container Registry or your own container registry for your OUDSM container image, then you can remove the following: --set imagePullSecrets[0].name="orclcred"
    • +
    • If using using NFS for your persistent volume then use persistence.networkstorage.nfs.path=<persistent_volume>/oudsm_user_projects,persistence.networkstorage.nfs.server:<NFS IP address>.
    • +
    +
  4. +
  5. +

    Check the OUDSM deployment as per Verify the OUDSM deployment

    +
  6. +
+

Helm command output

+

In all the examples above, the following output is shown following a successful execution of the helm install command.

+
NAME: oudsm
+LAST DEPLOYED: <DATE>
+NAMESPACE: oudsmns
+STATUS: deployed
+REVISION: 1
+TEST SUITE: None
+

Verify the OUDSM deployment

+

Run the following command to verify the OUDSM deployment:

+
$ kubectl --namespace <namespace> get pod,service,secret,pv,pvc,ingress -o wide
+

For example:

+
$ kubectl --namespace oudsmns get pod,service,secret,pv,pvc,ingress -o wide
+

The output will look similar to the following:

+
NAME          READY   STATUS    RESTARTS   AGE   IP            NODE             NOMINATED NODE   READINESS GATES
+pod/oudsm-1   1/1     Running   0          73m   10.244.0.19   <worker-node>   <none>           <none>
+	
+NAME                TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)             AGE   SELECTOR
+service/oudsm-1     ClusterIP   10.96.108.200   <none>        7001/TCP,7002/TCP   73m   app.kubernetes.io/instance=oudsm,app.kubernetes.io/name=oudsm,oudsm/instance=oudsm-1
+service/oudsm-lbr   ClusterIP   10.96.41.201    <none>        7001/TCP,7002/TCP   73m   app.kubernetes.io/instance=oudsm,app.kubernetes.io/name=oudsm
+	
+NAME                                 TYPE                                  DATA   AGE
+secret/orclcred                      kubernetes.io/dockerconfigjson        1      3h13m
+secret/oudsm-creds                   opaque                                2      73m
+secret/oudsm-token-ksr4g             kubernetes.io/service-account-token   3      73m
+secret/sh.helm.release.v1.oudsm.v1   helm.sh/release.v1                    1      73m
+	
+NAME                            CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                   STORAGECLASS   REASON   AGE   VOLUMEMODE
+persistentvolume/oudsm-pv       30Gi       RWX            Retain           Bound    myoudsmns/oudsm-pvc     manual                  73m   Filesystem
+
+NAME                              STATUS   VOLUME     CAPACITY   ACCESS MODES   STORAGECLASS   AGE   VOLUMEMODE
+persistentvolumeclaim/oudsm-pvc   Bound    oudsm-pv   30Gi       RWX            manual         73m   Filesystem
+
+NAME                                     HOSTS                               ADDRESS          PORTS   AGE
+ingress.extensions/oudsm-ingress-nginx   oudsm-1,oudsm-2,oudsm + 1 more...   100.102.51.230   80      73m
+

Note: It will take several minutes before all the services listed above show. While the oudsm pods have a STATUS of 0/1 the pod is started but the OUDSM server associated with it is currently starting. While the pod is starting you can check the startup status in the pod logs, by running the following command:

+
$ kubectl logs oudsm-1 -n oudsmns
+

Note : If the OUDSM deployment fails additionally refer to Troubleshooting for instructions on how describe the failing pod(s). +Once the problem is identified follow Undeploy an OUDSM deployment to clean down the deployment before deploying again.

+

Kubernetes Objects

+

Kubernetes objects created by the Helm chart are detailed in the table below:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeNameExample NamePurpose
Service Account<deployment/release name>oudsmKubernetes Service Account for the Helm Chart deployment
Secret<deployment/release name>-credsoudsm-credsSecret object for Oracle Unified Directory Services Manager related critical values like passwords
Persistent Volume<deployment/release name>-pvoudsm-pvPersistent Volume for user_projects mount.
Persistent Volume Claim<deployment/release name>-pvcoudsm-pvcPersistent Volume Claim for user_projects mount.
Pod<deployment/release name>-Noudsm-1, oudsm-2, …Pod(s)/Container(s) for Oracle Unified Directory Services Manager Instances
Service<deployment/release name>-Noudsm-1, oudsm-2, …Service(s) for HTTP and HTTPS interfaces from Oracle Unified Directory Services Manager instance <deployment/release name>-N
Ingress<deployment/release name>-ingress-nginxoudsm-ingress-nginxIngress Rules for HTTP and HTTPS interfaces.
+
    +
  • In the table above, the Example Name for each Object is based on the value ‘oudsm’ as the deployment/release name for the Helm chart installation.
  • +
+

Ingress Configuration

+

With an OUDSM instance now deployed you are now ready to configure an ingress controller to direct traffic to OUDSM as per Configure an ingress for an OUDSM.

+

Undeploy an OUDSM deployment

+

Delete the OUDSM deployment

+
    +
  1. +

    Find the deployment release name:

    +
    $ helm --namespace <namespace> list
    +

    For example:

    +
    $ helm --namespace oudsmns list
    +

    The output will look similar to the following:

    +
    NAME    NAMESPACE       REVISION        UPDATED    STATUS          CHART           APP VERSION
    +oudsm   oudsmns         2               <DATE>     deployed        oudsm-0.1       12.2.1.4.0
    +
  2. +
  3. +

    Delete the deployment using the following command:

    +
    $ helm uninstall --namespace <namespace> <release>
    +

    For example:

    +
    $ helm uninstall --namespace oudsmns oudsm
    +release "oudsm" uninstalled
    +
  4. +
+

Delete the persistent volume contents

+
    +
  1. +

    Delete the contents of the oudsm_user_projects directory in the persistent volume:

    +
    $ cd <persistent_volume>/oudsm_user_projects
    +$ rm -rf *
    +

    For example:

    +
    $ cd /scratch/shared/oudsm_user_projects
    +$ rm -rf *
    +
  2. +
+

Appendix: Configuration Parameters

+

The following table lists the configurable parameters of the ‘oudsm’ chart and their default values.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterDescriptionDefault Value
replicaCountNumber of Oracle Unified Directory Services Manager instances/pods/services to be created1
restartPolicyNamerestartPolicy to be configured for each POD containing Oracle Unified Directory Services Manager instanceOnFailure
image.repositoryOracle Unified Directory Services Manager Image Registry/Repository and name. Based on this, image parameter would be configured for Oracle Unified Directory Services Manager pods/containersoracle/oudsm
image.tagOracle Unified Directory Services Manager Image Tag. Based on this, image parameter would be configured for Oracle Unified Directory Services Manager pods/containers12.2.1.4.0
image.pullPolicypolicy to pull the imageIfnotPresent
imagePullSecrets.namename of Secret resource containing private registry credentialsregcred
nameOverrideoverride the fullname with this name
fullnameOverrideOverrides the fullname with the provided string
serviceAccount.createSpecifies whether a service account should be createdtrue
serviceAccount.nameIf not set and create is true, a name is generated using the fullname templateoudsm-< fullname >-token-< randomalphanum >
podSecurityContextSecurity context policies to add to the controller pod
securityContextSecurity context policies to add by default
service.typetype of controller service to createClusterIP
nodeSelectornode labels for pod assignment
tolerationsnode taints to tolerate
affinitynode/pod affinities
ingress.enabledtrue
ingress.typeSupported value: nginxnginx
ingress.hostHostname to be used with Ingress Rules. If not set, hostname would be configured according to fullname. Hosts would be configured as < fullname >-http.< domain >, < fullname >-http-0.< domain >, < fullname >-http-1.< domain >, etc.
ingress.domainDomain name to be used with Ingress Rules. In ingress rules, hosts would be configured as < host >.< domain >, < host >-0.< domain >, < host >-1.< domain >, etc.
ingress.backendPorthttp
ingress.nginxAnnotations{ kubernetes.io/ingress.class: “nginx" nginx.ingress.kubernetes.io/affinity-mode: “persistent” nginx.ingress.kubernetes.io/affinity: “cookie” }
ingress.ingress.tlsSecretSecret name to use an already created TLS Secret. If such secret is not provided, one would be created with name < fullname >-tls-cert. If the TLS Secret is in different namespace, name can be mentioned as < namespace >/< tlsSecretName >
ingress.certCNSubject’s common name (cn) for SelfSigned Cert.< fullname >
ingress.certValidityDaysValidity of Self-Signed Cert in days365
secret.enabledIf enabled it will use the secret created with base64 encoding. if value is false, secret would not be used and input values (through –set, –values, etc.) would be used while creation of pods.true
secret.namesecret name to use an already created Secretoudsm-< fullname >-creds
secret.typeSpecifies the type of the secretOpaque
persistence.enabledIf enabled, it will use the persistent volume. if value is false, PV and PVC would not be used and pods would be using the default emptyDir mount volume.true
persistence.pvnamepvname to use an already created Persistent Volume , If blank will use the default nameoudsm-< fullname >-pv
persistence.pvcnamepvcname to use an already created Persistent Volume Claim , If blank will use default nameoudsm-< fullname >-pvc
persistence.typesupported values: either filesystem or networkstorage or customfilesystem
persistence.filesystem.hostPath.pathThe path location mentioned should be created and accessible from the local host provided with necessary privileges for the user./scratch/shared/oudsm_user_projects
persistence.networkstorage.nfs.pathPath of NFS Share location/scratch/shared/oudsm_user_projects
persistence.networkstorage.nfs.serverIP or hostname of NFS Server0.0.0.0
persistence.custom.*Based on values/data, YAML content would be included in PersistenceVolume Object
persistence.accessModeSpecifies the access mode of the location providedReadWriteMany
persistence.sizeSpecifies the size of the storage10Gi
persistence.storageClassSpecifies the storageclass of the persistence volume.empty
persistence.annotationsspecifies any annotations that will be used{ }
oudsm.adminUserWeblogic Administration Userweblogic
oudsm.adminPassPassword for Weblogic Administration User
oudsm.startupTimeExpected startup time. After specified seconds readinessProbe would start900
oudsm.livenessProbeInitialDelayParamter to decide livenessProbe initialDelaySeconds1200
elk.logStashImageThe version of logstash you want to installlogstash:8.3.1
elk.sslenabledIf SSL is enabled for ELK set the value to true, or if NON-SSL set to false. This value must be lowercaseTRUE
elk.eshostsThe URL for sending logs to Elasticsearch. HTTP if NON-SSL is usedhttps://elasticsearch.example.com:9200
elk.esuserThe name of the user for logstash to access Elasticsearchlogstash_internal
elk.espasswordThe password for ELK_USERpassword
elk.esapikeyThe API key detailsapikey
elk.esindexThe log nameoudsmlogs-00001
elk.imagePullSecretssecret to be used for pulling logstash imagedockercred
+ + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oudsm/create-oudsm-instances/index.xml b/docs/23.4.1/idm-products/oudsm/create-oudsm-instances/index.xml new file mode 100644 index 000000000..b7395da67 --- /dev/null +++ b/docs/23.4.1/idm-products/oudsm/create-oudsm-instances/index.xml @@ -0,0 +1,14 @@ + + + + Create Oracle Unified Directory Services Manager Instances on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oudsm/create-oudsm-instances/ + Recent content in Create Oracle Unified Directory Services Manager Instances on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oudsm/index.html b/docs/23.4.1/idm-products/oudsm/index.html new file mode 100644 index 000000000..a383fc88a --- /dev/null +++ b/docs/23.4.1/idm-products/oudsm/index.html @@ -0,0 +1,4141 @@ + + + + + + + + + + + + Oracle Unified Directory Services Manager :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + Oracle Unified Directory Services Manager +

+ + + + + + + +

Oracle Unified Directory Services Manager on Kubernetes

+

Oracle supports the deployment of Oracle Unified Directory Services Manager on Kubernetes. See the following sections:

+ + + + + + + + + + + + + + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oudsm/index.xml b/docs/23.4.1/idm-products/oudsm/index.xml new file mode 100644 index 000000000..7f0c32fc6 --- /dev/null +++ b/docs/23.4.1/idm-products/oudsm/index.xml @@ -0,0 +1,14 @@ + + + + Oracle Unified Directory Services Manager on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oudsm/ + Recent content in Oracle Unified Directory Services Manager on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oudsm/introduction/index.html b/docs/23.4.1/idm-products/oudsm/introduction/index.html new file mode 100644 index 000000000..ec6957cb0 --- /dev/null +++ b/docs/23.4.1/idm-products/oudsm/introduction/index.html @@ -0,0 +1,3979 @@ + + + + + + + + + + + + Introduction :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+ +
+ +
+ +
+ +

+ + Introduction +

+ + + + + + + +

Oracle Unified Directory Services Manager (OUDSM) is an interface for managing instances of Oracle Unified Directory. Oracle Unified Directory Services Manager enables you to configure the structure of the directory, define objects in the directory, add and configure users, groups, and other entries. Oracle Unified Directory Services Manager is also the interface you use to manage entries, schema, security, and other directory features.

+

This project supports deployment of Oracle Unified Directory Services Manager images based on the 12cPS4 (12.2.1.4.0) release within a Kubernetes environment. The Oracle Unified Directory Services Manager Image refers to binaries for Oracle Unified Directory Services Manager Release 12.2.1.4.0.

+

Follow the instructions in this guide to set up Oracle Unified Directory Services Manager on Kubernetes.

+

Current production release

+

The current production release for the Oracle Unified Directory 12c PS4 (12.2.1.4.0) deployment on Kubernetes is 23.4.1.

+

Recent changes and known issues

+

See the Release Notes for recent changes and known issues for Oracle Unified Directory deployment on Kubernetes.

+

Getting started

+

This documentation explains how to configure OUDSM on a Kubernetes cluster where no other Oracle Identity Management products will be deployed. For detailed information about this type of deployment, start at Prerequisites and follow this documentation sequentially. Please note that this documentation does not explain how to configure a Kubernetes cluster given the product can be deployed on any compliant Kubernetes vendor.

+

If you are deploying multiple Oracle Identity Management products on the same Kubernetes cluster, then you must follow the Enterprise Deployment Guide outlined in Enterprise Deployments. +Please note, you also have the option to follow the Enterprise Deployment Guide even if you are only installing OUDSM and no other Oracle Identity Management products.

+

Note: If you need to understand how to configure a Kubernetes cluster ready for an Oracle Unified Directory Services Manager deployment, you should follow the Enterprise Deployment Guide referenced in Enterprise Deployments. The Enterprise Deployment Automation section also contains details on automation scripts that can:

+
    +
  • Automate the creation of a Kubernetes cluster on Oracle Cloud Infrastructure (OCI), ready for the deployment of Oracle Identity Management products.
  • +
  • Automate the deployment of Oracle Identity Management products on any compliant Kubernetes cluster.
  • +
+

Documentation for earlier releases

+

To view documentation for an earlier release, see:

+ + + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oudsm/introduction/index.xml b/docs/23.4.1/idm-products/oudsm/introduction/index.xml new file mode 100644 index 000000000..6b685aa36 --- /dev/null +++ b/docs/23.4.1/idm-products/oudsm/introduction/index.xml @@ -0,0 +1,14 @@ + + + + Introduction on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oudsm/introduction/ + Recent content in Introduction on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oudsm/manage-oudsm-containers/index.html b/docs/23.4.1/idm-products/oudsm/manage-oudsm-containers/index.html new file mode 100644 index 000000000..0e3b917da --- /dev/null +++ b/docs/23.4.1/idm-products/oudsm/manage-oudsm-containers/index.html @@ -0,0 +1,4033 @@ + + + + + + + + + + + + Manage Oracle Unified Directory Services Manager Containers :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + Manage Oracle Unified Directory Services Manager Containers +

+ + + + + + + +

Important considerations for Oracle Unified Directory Services Manager instances in Kubernetes.

+ + + + + + + + + + + + + + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oudsm/manage-oudsm-containers/index.xml b/docs/23.4.1/idm-products/oudsm/manage-oudsm-containers/index.xml new file mode 100644 index 000000000..91aeeef37 --- /dev/null +++ b/docs/23.4.1/idm-products/oudsm/manage-oudsm-containers/index.xml @@ -0,0 +1,47 @@ + + + + Manage Oracle Unified Directory Services Manager Containers on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oudsm/manage-oudsm-containers/ + Recent content in Manage Oracle Unified Directory Services Manager Containers on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + a) Scaling Up/Down OUDSM Pods + /fmw-kubernetes/23.4.1/idm-products/oudsm/manage-oudsm-containers/scaling-up-down/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oudsm/manage-oudsm-containers/scaling-up-down/ + Introduction This section describes how to increase or decrease the number of OUDSM pods in the Kubernetes deployment. +View existing OUDSM pods By default the oudsm helm chart deployment starts one pod: oudsm-1. +The number of pods started is determined by the replicaCount, which is set to 1 by default. A value of 1 starts the pod above. +To scale up or down the number of OUDSM pods, set replicaCount accordingly. + + + + b) Logging and Visualization for Helm Chart oudsm Deployment + /fmw-kubernetes/23.4.1/idm-products/oudsm/manage-oudsm-containers/logging-and-visualization/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oudsm/manage-oudsm-containers/logging-and-visualization/ + Introduction This section describes how to install and configure logging and visualization for the oudsm Helm chart deployment. +The ELK stack consists of Elasticsearch, Logstash, and Kibana. Using ELK you can gain insights in real-time from the log data from your applications. + Elasticsearch is a distributed, RESTful search and analytics engine capable of solving a growing number of use cases. As the heart of the Elastic Stack, it centrally stores your data so you can discover the expected and uncover the unexpected. + + + + c) Monitoring an Oracle Unified Directory Services Manager Instance + /fmw-kubernetes/23.4.1/idm-products/oudsm/manage-oudsm-containers/monitoring-oudsm-instance/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oudsm/manage-oudsm-containers/monitoring-oudsm-instance/ + Introduction Install Prometheus and Grafana Create a Kubernetes namespace Add Prometheus and Grafana Helm repositories Install the Prometheus operator View Prometheus and Grafana Objects Created Add the NodePort Verify Using Grafana GUI Introduction After the Oracle Unified Directory Services Manager instance is set up you can monitor it using Prometheus and Grafana. +Install Prometheus and Grafana Create a Kubernetes namespace Create a Kubernetes namespace to provide a scope for Prometheus and Grafana objects such as pods and services that you create in the environment. + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oudsm/manage-oudsm-containers/logging-and-visualization/index.html b/docs/23.4.1/idm-products/oudsm/manage-oudsm-containers/logging-and-visualization/index.html new file mode 100644 index 000000000..314dae50c --- /dev/null +++ b/docs/23.4.1/idm-products/oudsm/manage-oudsm-containers/logging-and-visualization/index.html @@ -0,0 +1,4181 @@ + + + + + + + + + + + + b) Logging and Visualization for Helm Chart oudsm Deployment :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + b) Logging and Visualization for Helm Chart oudsm Deployment +

+ + + + + + +

Introduction

+

This section describes how to install and configure logging and visualization for the oudsm Helm chart deployment.

+

The ELK stack consists of Elasticsearch, Logstash, and Kibana. Using ELK you can gain insights in real-time from the log data from your applications.

+
    +
  • Elasticsearch is a distributed, RESTful search and analytics engine capable of solving a growing number of use cases. As the heart of the Elastic Stack, it centrally stores your data so you can discover the expected and uncover the unexpected.
  • +
  • Logstash is an open source, server-side data processing pipeline that ingests data from a multitude of sources simultaneously, transforms it, and then sends it to your favorite “stash.”
  • +
  • Kibana lets you visualize your Elasticsearch data and navigate the Elastic Stack. It gives you the freedom to select the way you give shape to your data. And you don’t always have to know what you’re looking for.
  • +
+

Install Elasticsearch and Kibana

+

If you do not already have a centralized Elasticsearch (ELK) stack then you must configure this first. For details on how to configure the ELK stack, follow +Installing Elasticsearch (ELK) Stack and Kibana

+

Create the logstash pod

+

Variables used in this chapter

+

In order to create the logstash pod, you must create a yaml file. This file contains variables which you must substitute with variables applicable to your ELK environment.

+

Most of the values for the variables will be based on your ELK deployment as per Installing Elasticsearch (ELK) Stack and Kibana.

+

The table below outlines the variables and values you must set:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
VariableSample ValueDescription
<ELK_VER>8.3.1The version of logstash you want to install.
<ELK_SSL>trueIf SSL is enabled for ELK set the value to true, or if NON-SSL set to false. This value must be lowercase.
<ELK_HOSTS>https://elasticsearch.example.com:9200The URL for sending logs to Elasticsearch. HTTP if NON-SSL is used.
<ELK_USER>logstash_internalThe name of the user for logstash to access Elasticsearch.
<ELK_PASSWORD>passwordThe password for ELK_USER.
<ELK_APIKEY>apikeyThe API key details.
+

You will also need the BASE64 version of the Certificate Authority (CA) certificate(s) that signed the certificate of the Elasticsearch server. If using a self-signed certificate, this is the self signed certificate of the Elasticsearch server. See Copying the Elasticsearch Certificate for details on how to get the correct certificate. In the example below the certificate is called elk.crt.

+

Create Kubernetes secrets

+
    +
  1. +

    Create a Kubernetes secret for Elasticsearch using the API Key or Password.

    +

    a) If ELK uses an API Key for authentication:

    +
    $ kubectl create secret generic elasticsearch-pw-elastic -n <domain_namespace> --from-literal password=<ELK_APIKEY>
    +

    For example:

    +
    $ kubectl create secret generic elasticsearch-pw-elastic -n oudsmns --from-literal password=<ELK_APIKEY>
    +

    The output will look similar to the following:

    +
    secret/elasticsearch-pw-elastic created
    +

    b) If ELK uses a password for authentication:

    +
    $ kubectl create secret generic elasticsearch-pw-elastic -n <domain_namespace> --from-literal password=<ELK_PASSWORD>
    +

    For example:

    +
    $ kubectl create secret generic elasticsearch-pw-elastic -n oudsmns --from-literal password=<ELK_PASSWORD>
    +

    The output will look similar to the following:

    +
    secret/elasticsearch-pw-elastic created
    +

    Note: It is recommended that the ELK Stack is created with authentication enabled. If no authentication is enabled you may create a secret using the values above.

    +
  2. +
  3. +

    Create a Kubernetes secret to access the required images on hub.docker.com:

    +

    Note: You must first have a user account on hub.docker.com:

    +
    $ kubectl create secret docker-registry "dockercred" --docker-server="https://index.docker.io/v1/" --docker-username="<docker_username>" --docker-password=<password> --docker-email=<docker_email_credentials> --namespace=<domain_namespace>
    +

    For example:

    +
    $ kubectl create secret docker-registry "dockercred" --docker-server="https://index.docker.io/v1/" --docker-username="username" --docker-password=<password> --docker-email=user@example.com --namespace=oudsmns
    +

    The output will look similar to the following:

    +
    secret/dockercred created
    +
  4. +
+

Enable logstash

+
    +
  1. +

    Navigate to the $WORKDIR/kubernetes/helm directory and create a logging-override-values.yaml file as follows:

    +
    elk:
    +  imagePullSecrets:
    +    - name: dockercred
    +  IntegrationEnabled: true
    +  logStashImage: logstash:<ELK_VER>
    +  logstashConfigMap: false
    +  esindex: oudsmlogs-00001
    +  sslenabled: <ELK_SSL>
    +  eshosts: <ELK_HOSTS>
    +  # Note: We need to provide either esuser,espassword or esapikey
    +  esuser: <ELK_USER>
    +  espassword: elasticsearch-pw-elastic
    +  esapikey: elasticsearch-pw-elastic
    +
      +
    • Change the <ELK_VER>, <ELK_SSL>, <ELK_HOSTS>, and <ELK_USER>, to match the values for your environment.
    • +
    • If using SSL, replace the elk.crt in $WORKDIR/kubernetes/helm/oudsm/certs/ with the elk.crt for your ElasticSearch server.
    • +
    • If using API KEY for your ELK authentication, leave both esuser: and espassword: with no value.
    • +
    • If using a password for ELK authentication, leave esapi_key: but delete elasticsearch-pw-elastic.
    • +
    • If no authentication is used for ELK, leave esuser, espassword, and esapi_key with no value assigned.
    • +
    • The rest of the lines in the yaml file should not be changed.
    • +
    +

    For example:

    +
    elk:
    +  imagePullSecrets:
    +    - name: dockercred
    +  IntegrationEnabled: true
    +  logStashImage: logstash:8.3.1
    +  logstashConfigMap: false
    +  esindex: oudsmlogs-00001
    +  sslenabled: true   
    +  eshosts: https://elasticsearch.example.com:9200
    +  # Note: We need to provide either esuser,espassword or esapikey
    +  esuser: logstash_internal
    +  espassword: elasticsearch-pw-elastic
    +  esapikey:
    +
  2. +
+

Upgrade oudsm deployment with ELK configuration

+
    +
  1. +

    Run the following command to upgrade the oudsm deployment with the ELK configuration:

    +
    $ helm upgrade --namespace <namespace> --values <valuesfile.yaml> <releasename> oudsm --reuse-values
    +

    For example:

    +
    $ helm upgrade --namespace oudsmns --values logging-override-values.yaml oudsm oudsm --reuse-values
    +

    The output should look similar to the following:

    +
    Release "oudsm" has been upgraded. Happy Helming!
    +NAME: oudsm
    +LAST DEPLOYED: <DATE>
    +NAMESPACE: oudsmns
    +STATUS: deployed
    +REVISION: 2
    +TEST SUITE: None
    +
  2. +
+

Verify the pods

+
    +
  1. +

    Run the following command to check the logstash pod is created correctly:

    +
    $ kubectl get pods -n <namespace>
    +

    For example:

    +
    $ kubectl get pods -n oudsmns
    +

    The output should look similar to the following:

    +
    NAME                              READY   STATUS    RESTARTS   AGE
    +oudsm-1                           1/1     Running   0          51m
    +oudsm-logstash-56dbcc6d9f-mxsgj   1/1     Running   0          2m7s
    +

    Note: Wait a couple of minutes to make sure the pod has not had any failures or restarts. If the pod fails you can view the pod log using:

    +
    $ kubectl logs -f oudsm-logstash-<pod> -n oudsmns
    +

    Most errors occur due to misconfiguration of the logging-override-values.yaml. This is usually because of an incorrect value set, or the certificate was not pasted with the correct indentation.

    +

    If the pod has errors, view the helm history to find the last working revision, for example:

    +
    $ helm history oudsm -n oudsmns
    +

    The output will look similar to the following:

    +
    REVISION        UPDATED       STATUS          CHART           APP VERSION     DESCRIPTION
    +1               <DATE>        superseded      oudsm-0.1       12.2.1.4.0      Install complete
    +2               <DATE>        deployed        oudsm-0.1       12.2.1.4.0      Upgrade complete
    +

    Rollback to the previous working revision by running:

    +
    $ helm rollback <release> <revision> -n <domain_namespace>
    +

    For example:

    +
    helm rollback oudsm 1 -n oudsmns
    +

    Once you have resolved the issue in the yaml files, run the helm upgrade command outlined earlier to recreate the logstash pod.

    +
  2. +
+

Verify and access the Kibana console

+

To access the Kibana console you will need the Kibana URL as per Installing Elasticsearch (ELK) Stack and Kibana.

+

For Kibana 7.7.x and below:

+
    +
  1. +

    Access the Kibana console with http://<hostname>:<port>/app/kibana and login with your username and password.

    +
  2. +
  3. +

    From the Navigation menu, navigate to Management > Kibana > Index Patterns.

    +
  4. +
  5. +

    In the Create Index Pattern page enter oudsmlogs* for the Index pattern and click Next Step.

    +
  6. +
  7. +

    In the Configure settings page, from the Time Filter field name drop down menu select @timestamp and click Create index pattern.

    +
  8. +
  9. +

    Once the index pattern is created click on Discover in the navigation menu to view the OUDSM logs.

    +
  10. +
+

For Kibana version 7.8.X and above:

+
    +
  1. +

    Access the Kibana console with http://<hostname>:<port>/app/kibana and login with your username and password.

    +
  2. +
  3. +

    From the Navigation menu, navigate to Management > Stack Management.

    +
  4. +
  5. +

    Click Data Views in the Kibana section.

    +
  6. +
  7. +

    Click Create Data View and enter the following information:

    +
      +
    • Name: oudsmlogs*
    • +
    • Timestamp: @timestamp
    • +
    +
  8. +
  9. +

    Click Create Data View.

    +
  10. +
  11. +

    From the Navigation menu, click Discover to view the log file entries.

    +
  12. +
  13. +

    From the drop down menu, select oudsmlogs* to view the log file entries.

    +
  14. +
+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oudsm/manage-oudsm-containers/monitoring-oudsm-instance/index.html b/docs/23.4.1/idm-products/oudsm/manage-oudsm-containers/monitoring-oudsm-instance/index.html new file mode 100644 index 000000000..c3a950f9b --- /dev/null +++ b/docs/23.4.1/idm-products/oudsm/manage-oudsm-containers/monitoring-oudsm-instance/index.html @@ -0,0 +1,4112 @@ + + + + + + + + + + + + c) Monitoring an Oracle Unified Directory Services Manager Instance :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + c) Monitoring an Oracle Unified Directory Services Manager Instance +

+ + + + + + +
    +
  1. Introduction
  2. +
  3. Install Prometheus and Grafana +
      +
    1. Create a Kubernetes namespace
    2. +
    3. Add Prometheus and Grafana Helm repositories
    4. +
    5. Install the Prometheus operator
    6. +
    7. View Prometheus and Grafana Objects Created
    8. +
    9. Add the NodePort
    10. +
    +
  4. +
  5. Verify Using Grafana GUI
  6. +
+

Introduction

+

After the Oracle Unified Directory Services Manager instance is set up you can monitor it using Prometheus and Grafana.

+

Install Prometheus and Grafana

+

Create a Kubernetes namespace

+
    +
  1. +

    Create a Kubernetes namespace to provide a scope for Prometheus and Grafana objects such as pods and services that you create in the environment. To create your namespace issue the following command:

    +
    $ kubectl create namespace <namespace>
    +

    For example:

    +
    $ kubectl create namespace monitoring
    +

    The output will look similar to the following:

    +
    namespace/monitoring created
    +
  2. +
+

Add Prometheus and Grafana Helm repositories

+
    +
  1. +

    Add the Prometheus and Grafana Helm repositories by issuing the following command:

    +
    $ helm repo add prometheus https://prometheus-community.github.io/helm-charts
    +

    The output will look similar to the following:

    +
    "prometheus" has been added to your repositories
    +
  2. +
  3. +

    Run the following command to update the repositories:

    +
    $ helm repo update
    +

    The output will look similar to the following:

    +
    Hang tight while we grab the latest from your chart repositories...
    +...Successfully got an update from the "stable" chart repository
    +...Successfully got an update from the "prometheus" chart repository
    +...Successfully got an update from the "prometheus-community" chart repository
    +
    +Update Complete.  Happy Helming!
    +
  4. +
+

Install the Prometheus operator

+
    +
  1. +

    Install the Prometheus operator using the helm command:

    +
    $ helm install <release_name> prometheus/kube-prometheus-stack -n <namespace>
    +

    For example:

    +
    $ helm install monitoring prometheus/kube-prometheus-stack -n monitoring
    +

    The output should look similar to the following:

    +
    NAME: monitoring
    +LAST DEPLOYED: <DATE>
    +NAMESPACE: monitoring
    +STATUS: deployed
    +REVISION: 1
    +NOTES:
    +kube-prometheus-stack has been installed. Check its status by running:
    +  kubectl --namespace monitoring get pods -l "release=monitoring"
    +
    +Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create & configure Alertmanager and Prometheus instances using the Operator.
    +

    Note: If your cluster does not have access to the internet to pull external images, such as prometheus or grafana, you must load the images in a local container registry. You must then set install as follows:

    +
    helm install --set grafana.image.repository==container-registry.example.com/grafana --set grafana.image.tag=8.4.2 monitoring prometheus/kube-prometheus-stack -n monitoring
    +
  2. +
+

View Prometheus and Grafana Objects created

+

View the objects created for Prometheus and Grafana by issuing the following command:

+
$ kubectl get all,service,pod -o wide -n <namespace>
+

For example:

+
$ kubectl get all,service,pod -o wide -n monitoring
+

The output will look similar to the following:

+
NAME                                                         READY   STATUS    RESTARTS   AGE   IP               NODE            NOMINATED NODE   READINESS GATES
+pod/alertmanager-monitoring-kube-prometheus-alertmanager-0   2/2     Running   0          27s   10.244.2.141     <worker-node>   <none>           <none>
+pod/monitoring-grafana-578f79599c-qqdfb                      3/3     Running   0          34s   10.244.1.127     <worker-node>   <none>           <none>
+pod/monitoring-kube-prometheus-operator-65cdf7995-w6btr      1/1     Running   0          34s   10.244.1.126     <worker-node>   <none>           <none>
+pod/monitoring-kube-state-metrics-56bfd4f44f-5ls8t           1/1     Running   0          34s   10.244.2.139     <worker-node>   <none>           <none>
+pod/monitoring-prometheus-node-exporter-5b2f6                1/1     Running   0          34s   100.102.48.84    <worker-node>   <none>           <none>
+pod/monitoring-prometheus-node-exporter-fw9xh                1/1     Running   0          34s   100.102.48.28    <worker-node>   <none>           <none>
+pod/monitoring-prometheus-node-exporter-s5n9g                1/1     Running   0          34s   100.102.48.121   <master-node>   <none>           <none>
+pod/prometheus-monitoring-kube-prometheus-prometheus-0       2/2     Running   0          26s   10.244.1.128     <worker-node>   <none>           <none>
+
+NAME                                              TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                      AGE   SELECTOR
+service/alertmanager-operated                     ClusterIP   None             <none>        9093/TCP,9094/TCP,9094/UDP   27s   app.kubernetes.io/name=alertmanager
+service/monitoring-grafana                        ClusterIP   10.110.97.252    <none>        80/TCP                       34s   app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=grafana
+service/monitoring-kube-prometheus-alertmanager   ClusterIP   10.110.82.176    <none>        9093/TCP                     34s   alertmanager=monitoring-kube-prometheus-alertmanager,app.kubernetes.io/name=alertmanager
+service/monitoring-kube-prometheus-operator       ClusterIP   10.104.147.173   <none>        443/TCP                      34s   app=kube-prometheus-stack-operator,release=monitoring
+service/monitoring-kube-prometheus-prometheus     ClusterIP   10.110.109.245   <none>        9090/TCP                     34s   app.kubernetes.io/name=prometheus,prometheus=monitoring-kube-prometheus-prometheus
+service/monitoring-kube-state-metrics             ClusterIP   10.107.111.214   <none>        8080/TCP                     34s   app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=kube-state-metrics
+service/monitoring-prometheus-node-exporter       ClusterIP   10.108.97.196    <none>        9100/TCP                     34s   app=prometheus-node-exporter,release=monitoring
+service/prometheus-operated                       ClusterIP   None             <none>        9090/TCP                     26s   app.kubernetes.io/name=prometheus
+
+NAME                                                 DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR   AGE   CONTAINERS      IMAGES                                    SELECTOR
+daemonset.apps/monitoring-prometheus-node-exporter   3         3         3       3            3           <none>          34s   node-exporter   quay.io/prometheus/node-exporter:v1.3.1   app=prometheus-node-exporter,release=monitoring
+
+NAME                                                  READY   UP-TO-DATE   AVAILABLE   AGE   CONTAINERS                                            IMAGES                                                                                          SELECTOR
+deployment.apps/monitoring-grafana                    0/1     1            0           34s   grafana-sc-dashboard,grafana-sc-datasources,grafana   quay.io/kiwigrid/k8s-sidecar:1.15.6,quay.io/kiwigrid/k8s-sidecar:1.15.6,grafana/grafana:8.4.2   app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=grafana
+deployment.apps/monitoring-kube-prometheus-operator   1/1     1            1           34s   kube-prometheus-stack                                 quay.io/prometheus-operator/prometheus-operator:v0.55.0                                         app=kube-prometheus-stack-operator,release=monitoring
+deployment.apps/monitoring-kube-state-metrics         1/1     1            1           34s   kube-state-metrics                                    k8s.gcr.io/kube-state-metrics/kube-state-metrics:v2.4.1                                         app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=kube-state-metrics
+
+NAME                                                            DESIRED   CURRENT   READY   AGE   CONTAINERS                                            IMAGES                                                                                          SELECTOR
+replicaset.apps/monitoring-grafana-578f79599c                   1         1         0       34s   grafana-sc-dashboard,grafana-sc-datasources,grafana   quay.io/kiwigrid/k8s-sidecar:1.15.6,quay.io/kiwigrid/k8s-sidecar:1.15.6,grafana/grafana:8.4.2   app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=grafana,pod-template-hash=578f79599c
+replicaset.apps/monitoring-kube-prometheus-operator-65cdf7995   1         1         1       34s   kube-prometheus-stack                                 quay.io/prometheus-operator/prometheus-operator:v0.55.0                                         app=kube-prometheus-stack-operator,pod-template-hash=65cdf7995,release=monitoring
+replicaset.apps/monitoring-kube-state-metrics-56bfd4f44f        1         1         1       34s   kube-state-metrics                                    k8s.gcr.io/kube-state-metrics/kube-state-metrics:v2.4.1                                         app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=kube-state-metrics,pod-template-hash=56bfd4f44f
+
+NAME                                                                    READY   AGE   CONTAINERS                     IMAGES
+statefulset.apps/alertmanager-monitoring-kube-prometheus-alertmanager   1/1     27s   alertmanager,config-reloader   quay.io/prometheus/alertmanager:v0.23.0,quay.io/prometheus-operator/prometheus-config-reloader:v0.55.0
+statefulset.apps/prometheus-monitoring-kube-prometheus-prometheus       1/1     26s   prometheus,config-reloader     quay.io/prometheus/prometheus:v2.33.5,quay.io/prometheus-operator/prometheus-config-reloader:v0.55.0
+

Add the NodePort

+
    +
  1. +

    Edit the grafana service to add the NodePort:

    +
    $ kubectl edit service/<deployment_name>-grafana -n <namespace>
    +

    For example:

    +
    $ kubectl edit service/monitoring-grafana -n monitoring
    +

    Note: This opens an edit session for the domain where parameters can be changed using standard vi commands.

    +

    Change the ports entry and add nodePort: 30091 and type: NodePort:

    +
      ports:
    +  - name: http-web
    +    nodePort: 30091
    +    port: 80
    +    protocol: TCP
    +    targetPort: 3000
    +  selector:
    +    app.kubernetes.io/instance: monitoring
    +    app.kubernetes.io/name: grafana
    +  sessionAffinity: None
    +  type: NodePort
    +
  2. +
  3. +

    Save the file and exit (:wq).

    +
  4. +
+

Verify Using Grafana GUI

+
    +
  1. +

    Access the Grafana GUI using http://<HostIP>:<nodeport> and login with admin/prom-operator. Change the password when prompted.

    +
  2. +
  3. +

    Download the K8 Cluster Detail Dashboard json file from: https://grafana.com/grafana/dashboards/10856.

    +
  4. +
  5. +

    Import the Grafana dashboard by navigating on the left hand menu to Dashboards > Import. Click Upload JSON file and select the json downloaded file. In the Prometheus drop down box select Prometheus. Click Import. The dashboard should be displayed.

    +
  6. +
  7. +

    Verify your installation by viewing some of the customized dashboard views.

    +
  8. +
+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oudsm/manage-oudsm-containers/scaling-up-down/index.html b/docs/23.4.1/idm-products/oudsm/manage-oudsm-containers/scaling-up-down/index.html new file mode 100644 index 000000000..1fb4bf173 --- /dev/null +++ b/docs/23.4.1/idm-products/oudsm/manage-oudsm-containers/scaling-up-down/index.html @@ -0,0 +1,4047 @@ + + + + + + + + + + + + a) Scaling Up/Down OUDSM Pods :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+ +
+ +
+ +
+ +

+ + a) Scaling Up/Down OUDSM Pods +

+ + + + + + +

Introduction

+

This section describes how to increase or decrease the number of OUDSM pods in the Kubernetes deployment.

+

View existing OUDSM pods

+

By default the oudsm helm chart deployment starts one pod: oudsm-1.

+

The number of pods started is determined by the replicaCount, which is set to 1 by default. A value of 1 starts the pod above.

+

To scale up or down the number of OUDSM pods, set replicaCount accordingly.

+

Run the following command to view the number of pods in the OUDSM deployment:

+
$ kubectl --namespace <namespace> get pods -o wide
+

For example:

+
$ kubectl --namespace oudsmns get pods -o wide
+

The output will look similar to the following:

+
NAME          READY   STATUS    RESTARTS   AGE   IP            NODE             NOMINATED NODE   READINESS GATES
+pod/oudsm-1   1/1     Running   0          73m   10.244.0.19   <worker-node>   <none>           <none>
+

Scaling up OUDSM pods

+

In this example, replicaCount is increased to 2 which creates a new OUDSM pod oudsm-2 with associated services created.

+

You can scale up the number of OUDSM pods using one of the following methods:

+
    +
  1. Using a YAML file
  2. +
  3. Using --set argument
  4. +
+

Using a YAML file

+
    +
  1. +

    Navigate to the $WORKDIR/kubernetes/helm directory:

    +
    $ cd $WORKDIR/kubernetes/helm
    +
  2. +
  3. +

    Create a oudsm-scaleup-override.yaml file that contains:

    +
    replicaCount: 2
    +
  4. +
  5. +

    Run the following command to scale up the OUDSM pods:

    +
    $ helm upgrade --namespace <namespace> \
    +--values oudsm-scaleup-override.yaml \
    +<release_name> oudsm --reuse-values
    +

    For example:

    +
    $ helm upgrade --namespace oudsmns \
    +--values oudsm-scaleup-override.yaml \
    +oudsm oudsm --reuse-values
    +
  6. +
+

Using --set argument

+
    +
  1. +

    Run the following command to scale up the OUDSM pods:

    +
    $ helm upgrade --namespace <namespace> \
    +--set replicaCount=2 \
    +<release_name> oudsm --reuse-values
    +

    For example:

    +
    $ helm upgrade --namespace oudsmns \
    +--set replicaCount=2 \
    +oudsm oudsm --reuse-values
    +
  2. +
+

Verify the pods

+
    +
  1. +

    Verify the new OUDSM pod oudsm-2 has started:

    +
    $ kubectl get pod,service -o wide -n <namespace> 
    +

    For example:

    +
    $ kubectl get pods,service -n oudsmns
    +

    The output will look similar to the following:

    +
    NAME          READY   STATUS    RESTARTS   AGE   IP            NODE            NOMINATED NODE   READINESS GATES
    +pod/oudsm-1   1/1     Running   0          88m   10.244.0.19   <worker-node>   <none>           <none>
    +pod/oudsm-2   1/1     Running   0          15m   10.245.3.45   <worker-node>   <none>           <none>
    +	
    +NAME                TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)             AGE   SELECTOR
    +service/oudsm-1     ClusterIP   10.96.108.200   <none>        7001/TCP,7002/TCP   88m   app.kubernetes.io/instance=oudsm,app.kubernetes.io/name=oudsm,oudsm/instance=oudsm-1
    +service/oudsm-2     ClusterIP   10.96.31.201    <none>        7001/TCP,7002/TCP   15m   app.kubernetes.io/instance=oudsm,app.kubernetes.io/name=oudsm,oudsm/instance=oudsm-2
    +service/oudsm-lbr   ClusterIP   10.96.41.201    <none>        7001/TCP,7002/TCP   73m   app.kubernetes.io/instance=oudsm,app.kubernetes.io/name=oudsm
    +

    Note: It will take several minutes before all the services listed above show. While the oudsm-2 pod has a STATUS of 0/1 the pod is started but the OUDSM server associated with it is currently starting. While the pod is starting you can check the startup status in the pod log, by running the following command:

    +
    $ kubectl logs oudsm-2 -n oudsmns
    +
  2. +
+

Scaling down OUDSM pods

+

Scaling down OUDSM pods is performed in exactly the same as in Scaling up OUDSM pods except the replicaCount is reduced to the required number of pods.

+

Once the kubectl command is executed the pod(s) will move to a Terminating state. In the example below replicaCount was reduced from 2 to 1 and hence oudsm-2 has moved to Terminating:

+
$ kubectl get pods -n oudsmns
+   
+NAME          READY   STATUS        RESTARTS   AGE   IP            NODE            NOMINATED NODE   READINESS GATES
+pod/oudsm-1   1/1     Running       0          92m   10.244.0.19   <worker-node>   <none>           <none>
+pod/oudsm-2   1/1     Terminating   0          19m   10.245.3.45   <worker-node>   <none>           <none>
+

The pod will take a minute or two to stop and then will disappear:

+
$ kubectl get pods -n oudsmns
+   
+NAME          READY   STATUS    RESTARTS   AGE   IP            NODE            NOMINATED NODE   READINESS GATES
+pod/oudsm-1   1/1     Running   0          94m   10.244.0.19   <worker-node>   <none>           <none>
+
+ +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oudsm/patch-and-upgrade/index.html b/docs/23.4.1/idm-products/oudsm/patch-and-upgrade/index.html new file mode 100644 index 000000000..d78c1b413 --- /dev/null +++ b/docs/23.4.1/idm-products/oudsm/patch-and-upgrade/index.html @@ -0,0 +1,4014 @@ + + + + + + + + + + + + Patch and upgrade :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + Patch and upgrade +

+ + + + + + + +

This section shows you how to upgrade the OUDSM image, and how to upgrade the Elasticsearch and Kibana stack to April 23 (23.2.1).

+

The upgrade path taken depends on the version you are upgrading from.

+

Please refer to the Release Notes for information on which upgrade steps are necessary for the version you are upgrading to.

+ + + + + + + +
    + + + + + + + + + + + + + + + + + +

    +a. Patch an image +

    + + + + + +

    Instructions on how to update your OUDSM Kubernetes cluster with a new OUDSM container image.

    + + + + + + + + + + + + +

    +b. Upgrade Elasticsearch and Kibana +

    + + + + + +

    Instructions on how to upgrade Elastic Search and Kibana.

    + + + + + + + + +
+ + + + + + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oudsm/patch-and-upgrade/index.xml b/docs/23.4.1/idm-products/oudsm/patch-and-upgrade/index.xml new file mode 100644 index 000000000..7a42a9ce2 --- /dev/null +++ b/docs/23.4.1/idm-products/oudsm/patch-and-upgrade/index.xml @@ -0,0 +1,40 @@ + + + + Patch and upgrade on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oudsm/patch-and-upgrade/ + Recent content in Patch and upgrade on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + a. Patch an image + /fmw-kubernetes/23.4.1/idm-products/oudsm/patch-and-upgrade/patch-an-oudsm-image/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oudsm/patch-and-upgrade/patch-an-oudsm-image/ + Introduction In this section the Oracle Unified Directory Services Manager (OUDSM) deployment is updated with a new OUDSM container image. +Note: If you are not using Oracle Container Registry or your own container registry, then you must first load the new container image on all nodes in your Kubernetes cluster. +You can update the deployment with a new OUDSM container image using one of the following methods: + Using a YAML file Using --set argument Using a YAML file Navigate to the $WORKDIR/kubernetes/helm directory: + + + + b. Upgrade Elasticsearch and Kibana + /fmw-kubernetes/23.4.1/idm-products/oudsm/patch-and-upgrade/upgrade-elk/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oudsm/patch-and-upgrade/upgrade-elk/ + This section shows how to upgrade Elasticsearch and Kibana. +To determine if this step is required for the version you are upgrading from, refer to the Release Notes. +Download the latest code repository Download the latest code repository as follows: + Create a working directory to setup the source code. +$ mkdir &lt;workdir&gt; For example: +$ mkdir /scratch/OUDSMK8SOctober23 Download the latest OUDSM deployment scripts from the OUDSM repository. + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oudsm/patch-and-upgrade/patch-an-oudsm-image/index.html b/docs/23.4.1/idm-products/oudsm/patch-and-upgrade/patch-an-oudsm-image/index.html new file mode 100644 index 000000000..e2c269760 --- /dev/null +++ b/docs/23.4.1/idm-products/oudsm/patch-and-upgrade/patch-an-oudsm-image/index.html @@ -0,0 +1,4073 @@ + + + + + + + + + + + + a. Patch an image :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + a. Patch an image +

+ + + + + + +

Introduction

+

In this section the Oracle Unified Directory Services Manager (OUDSM) deployment is updated with a new OUDSM container image.

+

Note: If you are not using Oracle Container Registry or your own container registry, then you must first load the new container image on all nodes in your Kubernetes cluster.

+

You can update the deployment with a new OUDSM container image using one of the following methods:

+
    +
  1. Using a YAML file
  2. +
  3. Using --set argument
  4. +
+

Using a YAML file

+
    +
  1. +

    Navigate to the $WORKDIR/kubernetes/helm directory:

    +
    $ cd $WORKDIR/kubernetes/helm
    +
  2. +
  3. +

    Create a oudsm-patch-override.yaml file that contains:

    +
    image:
    +  repository: <image_location>
    +  tag: <image_tag>
    + imagePullSecrets:
    +   - name: orclcred
    +

    For example:

    +
    image:
    +  repository: container-registry.oracle.com/middleware/oudsm_cpu
    +  tag: 12.2.1.4-jdk8-ol7-<October'23>
    +imagePullSecrets:
    +  - name: orclcred
    +

    The following caveats exist:

    +
      +
    • +

      If you are not using Oracle Container Registry or your own container registry for your oudsm container image, then you can remove the following:

      +
      imagePullSecrets:
      +  - name: orclcred
      +
    • +
    +
  4. +
  5. +

    Run the following command to upgrade the deployment:

    +
    $ helm upgrade --namespace <namespace> \
    +--values oudsm-patch-override.yaml \
    +<release_name> oudsm --reuse-values
    +

    For example:

    +
    $ helm upgrade --namespace oudsmns \
    +--values oudsm-patch-override.yaml \
    +oudsm oudsm --reuse-values
    +
  6. +
+

Using --set argument

+
    +
  1. +

    Navigate to the $WORKDIR/kubernetes/helm directory:

    +
    $ cd $WORKDIR/kubernetes/helm
    +
  2. +
  3. +

    Run the following command to update the deployment with a new OUDSM container image:

    +
    $ helm upgrade --namespace <namespace> \
    +--set image.repository=<image_location>,image.tag=<image_tag> \
    +--set imagePullSecrets[0].name="orclcred" \
    +<release_name> oudsm --reuse-values
    +

    For example:

    +
    $ helm upgrade --namespace oudsmns \
    +--set image.repository=container-registry.oracle.com/middleware/oudsm_cpu,image.tag=12.2.1.4-jdk8-ol7-<October'23> \
    +--set imagePullSecrets[0].name="orclcred" \
    +oudsm oudsm --reuse-values
    +

    The following caveats exist:

    +
      +
    • If you are not using Oracle Container Registry or your own container registry for your OUDSM container image, then you can remove the following: --set imagePullSecrets[0].name="orclcred".
    • +
    +
  4. +
+

Verify the pods

+
    +
  1. +

    After updating with the new image the pod will restart. Verify the pod is running:

    +
    $ kubectl --namespace <namespace> get pods
    +

    For example:

    +
    $ kubectl --namespace oudsmns get pods
    +

    The output will look similar to the following:

    +
    NAME          READY   STATUS    RESTARTS   AGE   IP            NODE             NOMINATED NODE   READINESS GATES
    +pod/oudsm-1   1/1     Running   0          73m   10.244.0.19   <worker-node>   <none>           <none>
    +

    Note: It will take several minutes before the pod starts. While the oudsm pods have a STATUS of 0/1 the pod is started but the OUDSM server associated with it is currently starting. While the pod is starting you can check the startup status in the pod logs, by running the following command:

    +
  2. +
  3. +

    Verify the pod is using the new image by running the following command:

    +
    $ kubectl describe pod <pod> -n <namespace>
    +

    For example:

    +
    $ kubectl describe pod oudsm-1 -n oudsmns
    +

    The output will look similar to the following:

    +
    Name:         oudsm-1
    +Namespace:    oudsmns
    +Priority:     0
    +Node:         <worker-node>/100.102.48.28
    +Start Time:   <DATE>
    +Labels:       app.kubernetes.io/instance=oudsm
    +              app.kubernetes.io/managed-by=Helm
    +              app.kubernetes.io/name=oudsm
    +              app.kubernetes.io/version=12.2.1.4.0
    +              helm.sh/chart=oudsm-0.1
    +              oudsm/instance=oudsm-1
    +Annotations:  meta.helm.sh/release-name: oudsm
    +              meta.helm.sh/release-namespace: oudsmns
    +Status:       Running
    +IP:           10.244.1.90
    +
    +
    +etc...
    +
    +Events:
    +  Type     Reason     Age                From     Message
    +  ----     ------     ----               ----     -------
    +  Normal   Killing    22m                kubelet  Container oudsm definition changed, will be restarted
    +  Normal   Created    21m (x2 over 61m)  kubelet  Created container oudsm
    +  Normal   Pulling    21m                kubelet  Container image "container-registry.oracle.com/middleware/oudsm_cpu:12.2.1.4-jdk8-ol7-<October'23>"
    +  Normal   Started    21m (x2 over 61m)  kubelet  Started container oudsm
    +
  4. +
+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oudsm/patch-and-upgrade/upgrade-elk/index.html b/docs/23.4.1/idm-products/oudsm/patch-and-upgrade/upgrade-elk/index.html new file mode 100644 index 000000000..0623a23b3 --- /dev/null +++ b/docs/23.4.1/idm-products/oudsm/patch-and-upgrade/upgrade-elk/index.html @@ -0,0 +1,4008 @@ + + + + + + + + + + + + b. Upgrade Elasticsearch and Kibana :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + b. Upgrade Elasticsearch and Kibana +

+ + + + + + +

This section shows how to upgrade Elasticsearch and Kibana.

+

To determine if this step is required for the version you are upgrading from, refer to the Release Notes.

+

Download the latest code repository

+

Download the latest code repository as follows:

+
    +
  1. +

    Create a working directory to setup the source code.

    +
    $ mkdir <workdir>
    +

    For example:

    +
    $ mkdir /scratch/OUDSMK8SOctober23
    +
  2. +
  3. +

    Download the latest OUDSM deployment scripts from the OUDSM repository.

    +
    $ cd <workdir>
    +$ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1
    +

    For example:

    +
    $ cd /scratch/OUDSMK8SOctober23
    +$ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1
    +
  4. +
  5. +

    Set the $WORKDIR environment variable as follows:

    +
    $ export WORKDIR=<workdir>/fmw-kubernetes/OracleUnifiedDirectorySM
    +

    For example:

    +
    $ export WORKDIR=/scratch/OUDSMK8SOctober23/fmw-kubernetes/OracleUnifiedDirectorySM
    +
  6. +
+

Undeploy Elasticsearch and Kibana

+

From October 22 (22.4.1) onwards, OUDSM logs should be stored on a centralized Elasticsearch and Kibana (ELK) stack.

+

Deployments prior to October 22 (22.4.1) used local deployments of Elasticsearch and Kibana.

+

If you are upgrading from July 22 (22.3.1) or earlier, to October 23 (23.4.1), you must first undeploy Elasticsearch and Kibana using the steps below:

+
    +
  1. +

    Navigate to the $WORKDIR/kubernetes/helm directory and create a logging-override-values-uninstall.yaml with the following:

    +
    elk:
    +  enabled: false
    +
  2. +
  3. +

    Run the following command to remove the existing ELK deployment:

    +
    $ helm upgrade --namespace <domain_namespace> --values <valuesfile.yaml> <releasename> oudsm --reuse-values
    +

    For example:

    +
    $ helm upgrade --namespace oudsmns --values logging-override-values-uninstall.yaml oudsm oudsm --reuse-values
    +
  4. +
+

Deploy Elasticsearch and Kibana in centralized stack

+
    +
  1. Follow Install Elasticsearch stack and Kibana to deploy Elasticsearch and Kibana in a centralized stack.
  2. +
+ + +
+ +
+ + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oudsm/prepare-your-environment/index.html b/docs/23.4.1/idm-products/oudsm/prepare-your-environment/index.html new file mode 100644 index 000000000..bf3ca47d0 --- /dev/null +++ b/docs/23.4.1/idm-products/oudsm/prepare-your-environment/index.html @@ -0,0 +1,4033 @@ + + + + + + + + + + + + Prepare Your Environment :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+ +
+ +
+ +
+ +

+ + Prepare Your Environment +

+ + + + + + + +
    +
  1. Check the Kubernetes cluster is ready
  2. +
  3. Obtain the OUDSM container image
  4. +
  5. Setup the code repository to deploy OUDSM
  6. +
+

Check the Kubernetes cluster is ready

+

As per the Prerequisites a Kubernetes cluster should have already been configured.

+
    +
  1. +

    Run the following command on the master node to check the cluster and worker nodes are running:

    +
    $ kubectl get nodes,pods -n kube-system
    +

    The output will look similar to the following:

    +
    NAME                  STATUS   ROLES    AGE   VERSION
    +node/worker-node1     Ready    <none>   17h   v1.26.6+1.el8
    +node/worker-node2     Ready    <none>   17h   v1.26.6+1.el8
    +node/master-node      Ready    master   23h   v1.26.6+1.el8
    +
    +NAME                                     READY   STATUS    RESTARTS   AGE
    +pod/coredns-66bff467f8-fnhbq             1/1     Running   0          23h
    +pod/coredns-66bff467f8-xtc8k             1/1     Running   0          23h
    +pod/etcd-master                          1/1     Running   0          21h
    +pod/kube-apiserver-master-node           1/1     Running   0          21h
    +pod/kube-controller-manager-master-node  1/1     Running   0          21h
    +pod/kube-flannel-ds-amd64-lxsfw          1/1     Running   0          17h
    +pod/kube-flannel-ds-amd64-pqrqr          1/1     Running   0          17h
    +pod/kube-flannel-ds-amd64-wj5nh          1/1     Running   0          17h
    +pod/kube-proxy-2kxv2                     1/1     Running   0          17h
    +pod/kube-proxy-82vvj                     1/1     Running   0          17h
    +pod/kube-proxy-nrgw9                     1/1     Running   0          23h
    +pod/kube-scheduler-master                1/1     Running   0          21$
    +
  2. +
+

Obtain the OUDSM container image

+

The Oracle Unified Directory Services Manager (OUDSM) Kubernetes deployment requires access to an OUDSM container image. The image can be obtained in the following ways:

+
    +
  • Prebuilt OUDSM container image
  • +
  • Build your own OUDSM container image using WebLogic Image Tool
  • +
+

Prebuilt OUDSM container image

+

The prebuilt OUDSM April 2023 container image can be downloaded from Oracle Container Registry. This image is prebuilt by Oracle and includes Oracle Unified Directory Services Manager 12.2.1.4.0, the April Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program..

+

Note: Before using this image you must login to Oracle Container Registry, navigate to Middleware > oudsm_cpu and accept the license agreement.

+

You can use this image in the following ways:

+
    +
  • Pull the container image from the Oracle Container Registry automatically during the OUDSM Kubernetes deployment.
  • +
  • Manually pull the container image from the Oracle Container Registry and then upload it to your own container registry.
  • +
  • Manually pull the container image from the Oracle Container Registry and manually stage it on the master node and each worker node.
  • +
+

Build your own OUDSM container image using WebLogic Image Tool

+

You can build your own OUDSM container image using the WebLogic Image Tool. This is recommended if you need to apply one off patches to a Prebuilt OUDSM container image. For more information about building your own container image with WebLogic Image Tool, see Create or update image

+

You can use an image built with WebLogic Image Tool in the following ways:

+
    +
  • Manually upload them to your own container registry.
  • +
  • Manually stage them on the master node and each worker node.
  • +
+

Note: This documentation does not tell you how to pull or push the above images into a private container registry, or stage them on the master and worker nodes. Details of this can be found in the Enterprise Deployment Guide.

+

Setup the code repository to deploy OUDSM

+

Oracle Unified Directory Services Manager deployment on Kubernetes leverages deployment scripts provided by Oracle for creating Oracle Unified Directory Services Manager containers using the Helm charts provided. To deploy Oracle Unified Directory Services Manager on Kubernetes you should set up the deployment scripts on the master node as below:

+
    +
  1. +

    Create a working directory to setup the source code.

    +
    $ mkdir <workdir>
    +

    For example:

    +
    $ mkdir /scratch/OUDSMContainer
    +
  2. +
  3. +

    Download the latest OUDSM deployment scripts from the OUDSM repository:

    +
    $ cd <workdir>
    +$ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1
    +

    For example:

    +
    $ cd /scratch/OUDSMContainer
    +$ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1
    +
  4. +
  5. +

    Set the $WORKDIR environment variable as follows:

    +
    $ export WORKDIR=<workdir>/fmw-kubernetes/OracleUnifiedDirectorySM
    +

    For example:

    +
    $ export WORKDIR=/scratch/OUDSMContainer/fmw-kubernetes/OracleUnifiedDirectorySM
    +

    You are now ready to create the OUDSM deployment as per Create OUDSM instances.

    +
  6. +
+ + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oudsm/prepare-your-environment/index.xml b/docs/23.4.1/idm-products/oudsm/prepare-your-environment/index.xml new file mode 100644 index 000000000..a106b175a --- /dev/null +++ b/docs/23.4.1/idm-products/oudsm/prepare-your-environment/index.xml @@ -0,0 +1,14 @@ + + + + Prepare Your Environment on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oudsm/prepare-your-environment/ + Recent content in Prepare Your Environment on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oudsm/prerequisites/index.html b/docs/23.4.1/idm-products/oudsm/prerequisites/index.html new file mode 100644 index 000000000..e85a9fd96 --- /dev/null +++ b/docs/23.4.1/idm-products/oudsm/prerequisites/index.html @@ -0,0 +1,3968 @@ + + + + + + + + + + + + Prerequisites :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+ +
+ +
+ +
+ +

+ + Prerequisites +

+ + + + + + + +

Introduction

+

This document provides information about the system requirements for deploying and running Oracle Unified Directory Services Manager 12c PS4 (12.2.1.4.0) in a Kubernetes environment.

+

System Requirements for Oracle Unified Directory Services Manager on Kubernetes

+
    +
  • A running Kubernetes cluster that meets the following requirements: +
      +
    • The Kubernetes cluster must have sufficient nodes and resources.
    • +
    • An installation of Helm is required on the Kubernetes cluster. Helm is used to create and deploy the necessary resources on the Kubernetes cluster.
    • +
    • A supported container engine must be installed and running on the Kubernetes cluster.
    • +
    • The Kubernetes cluster and container engine must meet the minimum version requirements outlined in document ID 2723908.1 on My Oracle Support.
    • +
    • The nodes in the Kubernetes cluster must have access to a persistent volume such as a Network File System (NFS) mount or a shared file system.
    • +
    +
  • +
+

Note: This documentation does not tell you how to install a Kubernetes cluster, Helm, the container engine, or how to push container images to a container registry. +Please refer to your vendor specific documentation for this information. Also see Getting Started.

+ + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oudsm/prerequisites/index.xml b/docs/23.4.1/idm-products/oudsm/prerequisites/index.xml new file mode 100644 index 000000000..c2ebbb11a --- /dev/null +++ b/docs/23.4.1/idm-products/oudsm/prerequisites/index.xml @@ -0,0 +1,14 @@ + + + + Prerequisites on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oudsm/prerequisites/ + Recent content in Prerequisites on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oudsm/release-notes/index.html b/docs/23.4.1/idm-products/oudsm/release-notes/index.html new file mode 100644 index 000000000..a4d1527c4 --- /dev/null +++ b/docs/23.4.1/idm-products/oudsm/release-notes/index.html @@ -0,0 +1,4164 @@ + + + + + + + + + + + + Release Notes :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + Release Notes +

+ + + + + + + +

Review the latest changes and known issues for Oracle Unified Directory Services Manager on Kubernetes.

+

Recent changes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
DateVersionChange
October, 202323.4.1Supports Oracle Unified Directory Services Manager 12.2.1.4 domain deployment using the October 2023 container image which contains the October Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program.
If upgrading to October 23 (23.3.1) from October 22 (22.4.1) or later, upgrade as follows:
1. Patch the OUDSM container image to October 23
If upgrading to October 23 (23.3.1) from July 22 (22.3.1) or earlier, you must upgrade the following in order:
1. Patch the OUDSM container image to October 23
2. Upgrade Elasticsearch and Kibana.
To upgrade to October 23 (23.4.1) you must follow the instructions in Patch and Upgrade.
July, 202323.3.1Supports Oracle Unified Directory Services Manager 12.2.1.4 domain deployment using the July 2023 container image which contains the July Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program.
If upgrading to July 23 (23.3.1) from October 22 (22.4.1) or later, upgrade as follows:
1. Patch the OUDSM container image to July 23
If upgrading to July 23 (23.3.1) from July 22 (22.3.1) or earlier, you must upgrade the following in order:
1. Patch the OUDSM container image to July 23
2. Upgrade Elasticsearch and Kibana.
See Patch and Upgrade for these instructions.
April, 202323.2.1Supports Oracle Unified Directory Services Manager 12.2.1.4 domain deployment using the April 2023 container image which contains the April Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program.
If upgrading to April 23 (23.2.1) from October 22 (22.4.1), upgrade as follows:
1. Patch the OUDSM container image to April 23
If upgrading to April 23 (23.2.1) from July 22 (22.3.1) or earlier, you must upgrade the following in order:
1. Patch the OUDSM container image to April 23
2. Upgrade Elasticsearch and Kibana.
See Patch and Upgrade for these instructions.
January, 202323.1.1Supports Oracle Unified Directory Services Manager 12.2.1.4 domain deployment using the January 2023 container image which contains the January Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program.
If upgrading to January 23 (23.1.1) from October 22 (22.4.1) upgrade as follows:
1. Patch the OUDSM container image to January 23
If upgrading to January 23 (23.1.1) from July 22 (22.3.1) or earlier, you must upgrade the following in order:
1. Patch the OUDSM container image to October 23
2. Upgrade Elasticsearch and Kibana.
See Patch and Upgrade for these instructions.
October, 202222.4.1Supports Oracle Unified Directory Services Manager 12.2.1.4 domain deployment using the October 2022 container image which contains the October Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program.
Changes to deployment of Logging and Visualization with Elasticsearch and Kibana.
OUDSM container images are now only available from container-registry.oracle.com and are no longer available from My Oracle Support.
If upgrading to October 22 (22.4.1) from a previous release, you must upgrade the following in order:
1. Patch the OUDSM container image to October 22
2. Upgrade Elasticsearch and Kibana.
See Patch and Upgrade for these instructions.
July, 202222.3.1Supports Oracle Unified Directory Services Manager 12.2.1.4 domain deployment using the July 2022 container image which contains the July Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program.
April, 202222.2.1Updated for CRI-O support.
November 202121.4.2Voyager ingress removed as no longer supported.
October 202121.4.1A) References to supported Kubernetes, Helm and Docker versions removed and replaced with Support note reference. B) Namespace and domain names changed to be consistent with Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster. C) Upgrading a Kubernetes Cluster and Security Hardening removed as vendor specific.
November 202020.4.1Initial release of Oracle Unified Directory Services Manager on Kubernetes.
+ + + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oudsm/release-notes/index.xml b/docs/23.4.1/idm-products/oudsm/release-notes/index.xml new file mode 100644 index 000000000..01b0a0abf --- /dev/null +++ b/docs/23.4.1/idm-products/oudsm/release-notes/index.xml @@ -0,0 +1,14 @@ + + + + Release Notes on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oudsm/release-notes/ + Recent content in Release Notes on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/23.4.1/idm-products/oudsm/troubleshooting/index.html b/docs/23.4.1/idm-products/oudsm/troubleshooting/index.html new file mode 100644 index 000000000..7d62128f0 --- /dev/null +++ b/docs/23.4.1/idm-products/oudsm/troubleshooting/index.html @@ -0,0 +1,4068 @@ + + + + + + + + + + + + Troubleshooting :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + +
+
+ +
+ +
+ +
+ +

+ + Troubleshooting +

+ + + + + + + +
    +
  1. Check the status of a namespace
  2. +
  3. View pod logs
  4. +
  5. View pod description
  6. +
+

Check the status of a namespace

+

To check the status of objects in a namespace use the following command:

+
$ kubectl --namespace <namespace> get nodes,pod,service,secret,pv,pvc,ingress -o wide
+

For example:

+
$ kubectl --namespace oudsmns get nodes,pod,service,secret,pv,pvc,ingress -o wide
+

The output will look similar to the following:

+
$ kubectl --namespace oudsmns get pod,service,secret,pv,pvc,ingress -o wide
+
+NAME          READY   STATUS    RESTARTS   AGE   IP            NODE            NOMINATED NODE   READINESS GATES
+pod/oudsm-1   1/1     Running   0          18m   10.244.1.89   <worker-node>   <none>           <none>
+
+NAME                TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)             AGE   SELECTOR
+service/oudsm-1     ClusterIP   10.101.79.110    <none>        7001/TCP,7002/TCP   18m   app.kubernetes.io/instance=oudsm,app.kubernetes.io/name=oudsm,oudsm/instance=oudsm-1
+service/oudsm-lbr   ClusterIP   10.106.241.204   <none>        7001/TCP,7002/TCP   18m   app.kubernetes.io/instance=oudsm,app.kubernetes.io/name=oudsm
+
+NAME                                 TYPE                                  DATA   AGE
+secret/default-token-jtwn2           kubernetes.io/service-account-token   3      22h
+secret/orclcred                      kubernetes.io/dockerconfigjson        1      22h
+secret/oudsm-creds                   opaque                                2      18m
+secret/oudsm-tls-cert                kubernetes.io/tls                     2      18m
+secret/oudsm-token-7kjff             kubernetes.io/service-account-token   3      18m
+secret/sh.helm.release.v1.oudsm.v1   helm.sh/release.v1                    1      18m
+
+NAME                                 CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                       STORAGECLASS        REASON   AGE   VOLUMEMODE
+persistentvolume/oudsm-pv            20Gi       RWX            Delete           Bound    oudsmns/oudsm-pvc           manual                       18m   Filesystem
+
+NAME                              STATUS   VOLUME     CAPACITY   ACCESS MODES   STORAGECLASS   AGE   VOLUMEMODE
+persistentvolumeclaim/oudsm-pvc   Bound    oudsm-pv   20Gi       RWX            manual         18m   Filesystem
+
+NAME                                            CLASS    HOSTS           ADDRESS   PORTS     AGE
+ingress.networking.k8s.io/oudsm-ingress-nginx   <none>   oudsm-1,oudsm             80, 443   18m
+

Include/exclude elements (nodes,pod,service,secret,pv,pvc,ingress) as required.

+

View pod logs

+

To view logs for a pod use the following command:

+
$ kubectl logs <pod> -n <namespace>
+

For example:

+
$ kubectl logs oudsm-1 -n oudsmns
+

View pod description

+

Details about a pod can be viewed using the kubectl describe command:

+
$ kubectl describe pod <pod> -n <namespace>
+

For example:

+
$ kubectl describe pod oudsm-1 -n oudsmns
+

The output will look similar to the following:

+
Name:         oudsm-1
+Namespace:    oudsmns
+Priority:     0
+Node:         <worker-node>/100.102.48.28
+Start Time:   <DATE>
+Labels:       app.kubernetes.io/instance=oudsm
+              app.kubernetes.io/managed-by=Helm
+              app.kubernetes.io/name=oudsm
+              app.kubernetes.io/version=12.2.1.4.0
+              helm.sh/chart=oudsm-0.1
+              oudsm/instance=oudsm-1
+Annotations:  meta.helm.sh/release-name: oudsm
+              meta.helm.sh/release-namespace: oudsmns
+Status:       Running
+IP:           10.244.1.89
+IPs:
+  IP:  10.244.1.89
+Containers:
+  oudsm:
+    Container ID:   cri-o://37dbe00257095adc0a424b8841db40b70bbb65645451e0bc53718a0fd7ce22e4
+    Image:          container-registry.oracle.com/middleware/oudsm_cpu:12.2.1.4-jdk8-ol7-<October'23>
+    Image ID:       container-registry.oracle.com/middleware/oudsm_cpu@sha256:47960d36d502d699bfd8f9b1be4c9216e302db95317c288f335f9c8a32974f2c
+    Ports:          7001/TCP, 7002/TCP
+    Host Ports:     0/TCP, 0/TCP
+    State:          Running
+      Started:      <DATE>
+    Ready:          True
+    Restart Count:  0
+    Liveness:       http-get http://:7001/oudsm delay=1200s timeout=15s period=60s #success=1 #failure=3
+    Readiness:      http-get http://:7001/oudsm delay=900s timeout=15s period=30s #success=1 #failure=3
+    Environment:
+      DOMAIN_NAME:         oudsmdomain-1
+      ADMIN_USER:          <set to the key 'adminUser' in secret 'oudsm-creds'>  Optional: false
+      ADMIN_PASS:          <set to the key 'adminPass' in secret 'oudsm-creds'>  Optional: false
+      ADMIN_PORT:          7001
+      ADMIN_SSL_PORT:      7002
+      WLS_PLUGIN_ENABLED:  true
+    Mounts:
+      /u01/oracle/user_projects from oudsm-pv (rw)
+      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-9ht84 (ro)
+Conditions:
+  Type              Status
+  Initialized       True
+  Ready             True
+  ContainersReady   True
+  PodScheduled      True
+Volumes:
+  oudsm-pv:
+    Type:       PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
+    ClaimName:  oudsm-pvc
+    ReadOnly:   false
+  kube-api-access-9ht84:
+    Type:                    Projected (a volume that contains injected data from multiple sources)
+    TokenExpirationSeconds:  3607
+    ConfigMapName:           kube-root-ca.crt
+    ConfigMapOptional:       <nil>
+    DownwardAPI:             true
+QoS Class:                   BestEffort
+Node-Selectors:              <none>
+Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
+                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
+Events:
+  Type     Reason            Age   From               Message
+  ----     ------            ----  ----               -------
+  Warning  FailedScheduling  39m   default-scheduler  0/3 nodes are available: 3 pod has unbound immediate PersistentVolumeClaims.
+  Normal   Scheduled         39m   default-scheduler  Successfully assigned oudsmns/oudsm-1 to <worker-node>
+  Normal   Pulled            39m   kubelet            Container image "container-registry.oracle.com/middleware/oudsm_cpu:12.2.1.4-jdk8-ol7-<October'23>" already present on machine
+  Normal   Created           39m   kubelet            Created container oudsm
+  Normal   Started           39m   kubelet            Started container oudsm
+
+
+ + + + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/idm-products/oudsm/troubleshooting/index.xml b/docs/23.4.1/idm-products/oudsm/troubleshooting/index.xml new file mode 100644 index 000000000..5b301e7a7 --- /dev/null +++ b/docs/23.4.1/idm-products/oudsm/troubleshooting/index.xml @@ -0,0 +1,14 @@ + + + + Troubleshooting on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/idm-products/oudsm/troubleshooting/ + Recent content in Troubleshooting on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/23.4.1/images/.gitkeep b/docs/23.4.1/images/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/docs/23.4.1/images/clippy.svg b/docs/23.4.1/images/clippy.svg new file mode 100644 index 000000000..1c8abc2fd --- /dev/null +++ b/docs/23.4.1/images/clippy.svg @@ -0,0 +1 @@ + diff --git a/docs/23.4.1/images/favicon.png b/docs/23.4.1/images/favicon.png new file mode 100644 index 000000000..df06e35d6 Binary files /dev/null and b/docs/23.4.1/images/favicon.png differ diff --git a/docs/23.4.1/images/fmw_12c_12_2_1_4_0-logo.png b/docs/23.4.1/images/fmw_12c_12_2_1_4_0-logo.png new file mode 100644 index 000000000..6a2d34fff Binary files /dev/null and b/docs/23.4.1/images/fmw_12c_12_2_1_4_0-logo.png differ diff --git a/docs/23.4.1/images/gopher-404.jpg b/docs/23.4.1/images/gopher-404.jpg new file mode 100644 index 000000000..2a5054389 Binary files /dev/null and b/docs/23.4.1/images/gopher-404.jpg differ diff --git a/docs/23.4.1/images/logo.png b/docs/23.4.1/images/logo.png new file mode 100644 index 000000000..6bfe10627 Binary files /dev/null and b/docs/23.4.1/images/logo.png differ diff --git a/docs/23.4.1/images/soa-domains/CreateApplicationServerConnection.jpg b/docs/23.4.1/images/soa-domains/CreateApplicationServerConnection.jpg new file mode 100644 index 000000000..e69f13ac0 Binary files /dev/null and b/docs/23.4.1/images/soa-domains/CreateApplicationServerConnection.jpg differ diff --git a/docs/23.4.1/images/soa-domains/CreateApplicationServerConnectionTestConnection.jpg b/docs/23.4.1/images/soa-domains/CreateApplicationServerConnectionTestConnection.jpg new file mode 100644 index 000000000..84796fec9 Binary files /dev/null and b/docs/23.4.1/images/soa-domains/CreateApplicationServerConnectionTestConnection.jpg differ diff --git a/docs/23.4.1/images/soa-domains/ExposeSOAMST3.png b/docs/23.4.1/images/soa-domains/ExposeSOAMST3.png new file mode 100644 index 000000000..119d72c67 Binary files /dev/null and b/docs/23.4.1/images/soa-domains/ExposeSOAMST3.png differ diff --git a/docs/23.4.1/images/soa-domains/JDEV_OSB_Deploy_Success_Status.png b/docs/23.4.1/images/soa-domains/JDEV_OSB_Deploy_Success_Status.png new file mode 100644 index 000000000..26adedcf5 Binary files /dev/null and b/docs/23.4.1/images/soa-domains/JDEV_OSB_Deploy_Success_Status.png differ diff --git a/docs/23.4.1/images/soa-domains/JDEV_OSB_Deploying_Progress.png b/docs/23.4.1/images/soa-domains/JDEV_OSB_Deploying_Progress.png new file mode 100644 index 000000000..c834a7852 Binary files /dev/null and b/docs/23.4.1/images/soa-domains/JDEV_OSB_Deploying_Progress.png differ diff --git a/docs/23.4.1/images/soa-domains/JDEV_OSB_Deployment_Start.png b/docs/23.4.1/images/soa-domains/JDEV_OSB_Deployment_Start.png new file mode 100644 index 000000000..e15c9d7a4 Binary files /dev/null and b/docs/23.4.1/images/soa-domains/JDEV_OSB_Deployment_Start.png differ diff --git a/docs/23.4.1/images/soa-domains/JDEV_OSB_Deployment_Summary.jpg b/docs/23.4.1/images/soa-domains/JDEV_OSB_Deployment_Summary.jpg new file mode 100644 index 000000000..303ab5151 Binary files /dev/null and b/docs/23.4.1/images/soa-domains/JDEV_OSB_Deployment_Summary.jpg differ diff --git a/docs/23.4.1/images/soa-domains/JDEV_OSB_Deployment_Summary.png b/docs/23.4.1/images/soa-domains/JDEV_OSB_Deployment_Summary.png new file mode 100644 index 000000000..303ab5151 Binary files /dev/null and b/docs/23.4.1/images/soa-domains/JDEV_OSB_Deployment_Summary.png differ diff --git a/docs/23.4.1/images/soa-domains/JDEV_OSB_Select_Application_Server.png b/docs/23.4.1/images/soa-domains/JDEV_OSB_Select_Application_Server.png new file mode 100644 index 000000000..1ea311e9c Binary files /dev/null and b/docs/23.4.1/images/soa-domains/JDEV_OSB_Select_Application_Server.png differ diff --git a/docs/23.4.1/images/soa-domains/JDEV_OSB_Select_Deployment_Action.png b/docs/23.4.1/images/soa-domains/JDEV_OSB_Select_Deployment_Action.png new file mode 100644 index 000000000..ae3fecbf8 Binary files /dev/null and b/docs/23.4.1/images/soa-domains/JDEV_OSB_Select_Deployment_Action.png differ diff --git a/docs/23.4.1/images/soa-domains/JDEV_OSB_servicebus_launch_test_console.png b/docs/23.4.1/images/soa-domains/JDEV_OSB_servicebus_launch_test_console.png new file mode 100644 index 000000000..a01fe9f55 Binary files /dev/null and b/docs/23.4.1/images/soa-domains/JDEV_OSB_servicebus_launch_test_console.png differ diff --git a/docs/23.4.1/images/soa-domains/JDEV_Reference_Config_Settings.png b/docs/23.4.1/images/soa-domains/JDEV_Reference_Config_Settings.png new file mode 100644 index 000000000..549fa9390 Binary files /dev/null and b/docs/23.4.1/images/soa-domains/JDEV_Reference_Config_Settings.png differ diff --git a/docs/23.4.1/images/soa-domains/JDEV_SOA_Deploy_Configuration.png b/docs/23.4.1/images/soa-domains/JDEV_SOA_Deploy_Configuration.png new file mode 100644 index 000000000..8d4320237 Binary files /dev/null and b/docs/23.4.1/images/soa-domains/JDEV_SOA_Deploy_Configuration.png differ diff --git a/docs/23.4.1/images/soa-domains/JDEV_SOA_Deploy_Success_Status.png b/docs/23.4.1/images/soa-domains/JDEV_SOA_Deploy_Success_Status.png new file mode 100644 index 000000000..84a132f0b Binary files /dev/null and b/docs/23.4.1/images/soa-domains/JDEV_SOA_Deploy_Success_Status.png differ diff --git a/docs/23.4.1/images/soa-domains/JDEV_SOA_Deploying_Progress.png b/docs/23.4.1/images/soa-domains/JDEV_SOA_Deploying_Progress.png new file mode 100644 index 000000000..ef49c887c Binary files /dev/null and b/docs/23.4.1/images/soa-domains/JDEV_SOA_Deploying_Progress.png differ diff --git a/docs/23.4.1/images/soa-domains/JDEV_SOA_Deployment_Start.png b/docs/23.4.1/images/soa-domains/JDEV_SOA_Deployment_Start.png new file mode 100644 index 000000000..45cb532c7 Binary files /dev/null and b/docs/23.4.1/images/soa-domains/JDEV_SOA_Deployment_Start.png differ diff --git a/docs/23.4.1/images/soa-domains/JDEV_SOA_Deployment_Summary.png b/docs/23.4.1/images/soa-domains/JDEV_SOA_Deployment_Summary.png new file mode 100644 index 000000000..02ac26fdc Binary files /dev/null and b/docs/23.4.1/images/soa-domains/JDEV_SOA_Deployment_Summary.png differ diff --git a/docs/23.4.1/images/soa-domains/JDEV_SOA_Select_Application_Server.png b/docs/23.4.1/images/soa-domains/JDEV_SOA_Select_Application_Server.png new file mode 100644 index 000000000..f751e0c56 Binary files /dev/null and b/docs/23.4.1/images/soa-domains/JDEV_SOA_Select_Application_Server.png differ diff --git a/docs/23.4.1/images/soa-domains/JDEV_SOA_Select_Deployment_Action.png b/docs/23.4.1/images/soa-domains/JDEV_SOA_Select_Deployment_Action.png new file mode 100644 index 000000000..a4eaf58fb Binary files /dev/null and b/docs/23.4.1/images/soa-domains/JDEV_SOA_Select_Deployment_Action.png differ diff --git a/docs/23.4.1/images/soa-domains/JDEV_SOA_Server_Lookup.png b/docs/23.4.1/images/soa-domains/JDEV_SOA_Server_Lookup.png new file mode 100644 index 000000000..fae2f2378 Binary files /dev/null and b/docs/23.4.1/images/soa-domains/JDEV_SOA_Server_Lookup.png differ diff --git a/docs/23.4.1/images/soa-domains/JDEV_SOA_Target_soa_servers.png b/docs/23.4.1/images/soa-domains/JDEV_SOA_Target_soa_servers.png new file mode 100644 index 000000000..5b8cd53f6 Binary files /dev/null and b/docs/23.4.1/images/soa-domains/JDEV_SOA_Target_soa_servers.png differ diff --git a/docs/23.4.1/images/soa-domains/JDEV_SOA_soainfra_server1.png b/docs/23.4.1/images/soa-domains/JDEV_SOA_soainfra_server1.png new file mode 100644 index 000000000..9f225df7c Binary files /dev/null and b/docs/23.4.1/images/soa-domains/JDEV_SOA_soainfra_server1.png differ diff --git a/docs/23.4.1/images/soa-domains/JDEV_SOA_soainfra_server2.png b/docs/23.4.1/images/soa-domains/JDEV_SOA_soainfra_server2.png new file mode 100644 index 000000000..4e5e1f768 Binary files /dev/null and b/docs/23.4.1/images/soa-domains/JDEV_SOA_soainfra_server2.png differ diff --git a/docs/23.4.1/images/soa-domains/SOA_Composites_Deploy_using_Jdev.png b/docs/23.4.1/images/soa-domains/SOA_Composites_Deploy_using_Jdev.png new file mode 100644 index 000000000..baaf4f82b Binary files /dev/null and b/docs/23.4.1/images/soa-domains/SOA_Composites_Deploy_using_Jdev.png differ diff --git a/docs/23.4.1/images/soa-domains/SOA_EDG_FrontEndAddress.png b/docs/23.4.1/images/soa-domains/SOA_EDG_FrontEndAddress.png new file mode 100644 index 000000000..ea899fede Binary files /dev/null and b/docs/23.4.1/images/soa-domains/SOA_EDG_FrontEndAddress.png differ diff --git a/docs/23.4.1/images/soa-domains/SOA_EDG_PersistentVolume.png b/docs/23.4.1/images/soa-domains/SOA_EDG_PersistentVolume.png new file mode 100644 index 000000000..1a3ad2146 Binary files /dev/null and b/docs/23.4.1/images/soa-domains/SOA_EDG_PersistentVolume.png differ diff --git a/docs/23.4.1/images/soa-domains/SOA_EDG_Topology.png b/docs/23.4.1/images/soa-domains/SOA_EDG_Topology.png new file mode 100644 index 000000000..68a1f0d1f Binary files /dev/null and b/docs/23.4.1/images/soa-domains/SOA_EDG_Topology.png differ diff --git a/docs/23.4.1/images/soa-domains/custIdentity-custTrust-keystores.png b/docs/23.4.1/images/soa-domains/custIdentity-custTrust-keystores.png new file mode 100644 index 000000000..290492743 Binary files /dev/null and b/docs/23.4.1/images/soa-domains/custIdentity-custTrust-keystores.png differ diff --git a/docs/23.4.1/images/soasuite-logo.png b/docs/23.4.1/images/soasuite-logo.png new file mode 100644 index 000000000..347663f5b Binary files /dev/null and b/docs/23.4.1/images/soasuite-logo.png differ diff --git a/docs/23.4.1/index.html b/docs/23.4.1/index.html new file mode 100644 index 000000000..fd8c4a72a --- /dev/null +++ b/docs/23.4.1/index.html @@ -0,0 +1,3921 @@ + + + + + + + + + + + + Oracle Fusion Middleware on Kubernetes :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+ +
+ + + + + + + navigation + + + +

Oracle Fusion Middleware on Kubernetes

+

Oracle supports the deployment of the following Oracle Fusion Middleware products on Kubernetes. Click on the appropriate document link below to get started on setting up the product.

+ + + + + + + +
    + + + + + + + + + + + + + + + + + + + +

    +Oracle Identity Management on Kubernetes +

    + + + + + +

    This document lists all the Oracle Identity Management products deployment supported on Kubernetes.

    + + + + + + + + +
+ + + + + + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/23.4.1/index.json b/docs/23.4.1/index.json new file mode 100644 index 000000000..3d5b8784c --- /dev/null +++ b/docs/23.4.1/index.json @@ -0,0 +1,666 @@ +[ +{ + "uri": "/fmw-kubernetes/23.4.1/", + "title": "Oracle Fusion Middleware on Kubernetes", + "tags": [], + "description": "This document lists all the Oracle Fusion Middleware products deployment supported on Kubernetes.", + "content": "Oracle Fusion Middleware on Kubernetes Oracle supports the deployment of the following Oracle Fusion Middleware products on Kubernetes. Click on the appropriate document link below to get started on setting up the product.\n Oracle Identity Management on Kubernetes This document lists all the Oracle Identity Management products deployment supported on Kubernetes.\n " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oig/configure-design-console/using-the-design-console-with-nginx-non-ssl/", + "title": "a. Using Design Console with NGINX(non-SSL)", + "tags": [], + "description": "Configure Design Console with NGINX(non-SSL).", + "content": "Configure an NGINX ingress (non-SSL) to allow Design Console to connect to your Kubernetes cluster.\n Prerequisites\n Setup routing rules for the Design Console ingress\n Create the ingress\n Update the T3 channel\n Restart the OIG Managed Server\n Design Console client\na. Using an on-premises installed Design Console\nb. Using a container image for Design Console\n Login to the Design Console\n Prerequisites If you haven\u0026rsquo;t already configured an NGINX ingress controller (Non-SSL) for OIG, follow Using an Ingress with NGINX (non-SSL).\nMake sure you know the master hostname and ingress port for NGINX before proceeding e.g http://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}.\nNote: In all steps below if you are using a load balancer for your ingress instead of NodePort then replace ${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT} with `${LOADBALANCER-HOSTNAME}:${LOADBALANCER-PORT}.\nSetup routing rules for the Design Console ingress Setup routing rules by running the following commands:\n$ cd $WORKDIR/kubernetes/design-console-ingress Edit values.yaml and ensure that tls: NONSSL and domainUID: governancedomain are set, for example:\n# Load balancer type. Supported values are: NGINX type: NGINX # Type of Configuration Supported Values are : NONSSL,SSL # tls: NONSSL tls: NONSSL # TLS secret name if the mode is SSL secretName: dc-tls-cert # WLS domain as backend to the load balancer wlsDomain: domainUID: governancedomain oimClusterName: oim_cluster oimServerT3Port: 14002 Create the ingress Run the following command to create the ingress:\n$ cd $WORKDIR $ helm install governancedomain-nginx-designconsole kubernetes/design-console-ingress --namespace oigns --values kubernetes/design-console-ingress/values.yaml For example:\nThe output will look similar to the following:\nNAME: governancedomain-nginx-designconsole LAST DEPLOYED: \u0026lt;DATE\u0026gt; NAMESPACE: oigns STATUS: deployed REVISION: 1 TEST SUITE: None Run the following command to show the ingress is created successfully:\n$ kubectl describe ing governancedomain-nginx-designconsole -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl describe ing governancedomain-nginx-designconsole -n oigns The output will look similar to the following:\nName: governancedomain-nginx-designconsole Namespace: oigns Address: Default backend: default-http-backend:80 (\u0026lt;error: endpoints \u0026quot;default-http-backend\u0026quot; not found\u0026gt;) Rules: Host Path Backends ---- ---- -------- * governancedomain-cluster-oim-cluster:14002 (10.244.1.25:14002) Annotations: kubernetes.io/ingress.class: nginx meta.helm.sh/release-name: governancedomain-nginx-designconsole meta.helm.sh/release-namespace: oigns nginx.ingress.kubernetes.io/affinity: cookie nginx.ingress.kubernetes.io/enable-access-log: false Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Sync 13s nginx-ingress-controller Scheduled for sync Update the T3 channel Log in to the WebLogic Console using http://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console.\n Navigate to Environment, click Servers, and then select oim_server1.\n Click Protocols, and then Channels.\n Click the default T3 channel called T3Channel.\n Click Lock and Edit.\n Set the External Listen Address to the ingress controller hostname ${MASTERNODE-HOSTNAME}.\n Set the External Listen Port to the ingress controller port ${MASTERNODE-PORT}.\n Click Save.\n Click Activate Changes.\n Restart the OIG Managed Server Restart the OIG Managed Server for the above changes to take effect:\n$ cd $WORKDIR/kubernetes/domain-lifecycle $ ./restartServer.sh -s oim_server1 -d \u0026lt;domain_uid\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ cd $WORKDIR/kubernetes/domain-lifecycle ./restartServer.sh -s oim_server1 -d governancedomain -n oigns Make sure the \u0026lt;domain_uid\u0026gt;-oim-server1 has a READY status of 1/1 before continuing:\n$ kubectl get pods -n oigns | grep oim-server1 The output will look similar to the following:\ngovernancedomain-oim-server1 1/1 Running 0 8m Design Console client It is possible to use Design Console from an on-premises install, or from a container image.\nUsing an on-premises installed Design Console Install Design Console on an on-premises machine\n Follow Login to the Design Console.\n Using a container image for Design Console Using Docker The Design Console can be run from a container using X windows emulation.\n On the parent machine where the Design Console is to be displayed, run xhost +.\n Find which worker node the \u0026lt;domain\u0026gt;-oim-server1 pod is running. For example:\n$ kubectl get pods -n oigns -o wide | grep governancedomain-oim-server1 The output will look similar to the following:\ngovernancedomain-oim-server1 1/1 Running 0 31m 10.244.2.98 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; On the worker node returned above e.g worker-node2, execute the following command to find the OIG container image name:\n$ docker images Then execute the following command to start a container to run Design Console:\n$ docker run -u root --name oigdcbase -it \u0026lt;image\u0026gt; bash For example:\n$ docker run -u root -it --name oigdcbase container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-\u0026lt;January\u0026#39;23\u0026gt; bash This will take you into a bash shell inside the container:\nbash-4.2# Inside the container set the proxy, for example:\nbash-4.2# export https_proxy=http://proxy.example.com:80 Install the relevant X windows packages in the container:\nbash-4.2# yum install libXext libXrender libXtst Execute the following outside the container to create a new Design Console image from the container:\n$ docker commit \u0026lt;container_name\u0026gt; \u0026lt;design_console_image_name\u0026gt; For example:\n$ docker commit oigdcbase oigdc Exit the container bash session:\nbash-4.2# exit Start a new container using the Design Console image:\n$ docker run --name oigdc -it oigdc /bin/bash This will take you into a bash shell for the container:\nbash-4.2# In the container run the following to export the DISPLAY:\n$ export DISPLAY=\u0026lt;parent_machine_hostname:1\u0026gt; Start the Design Console from the container:\nbash-4.2# cd idm/designconsole bash-4.2# sh xlclient.sh The Design Console login should be displayed. Now follow Login to the Design Console.\n Using podman On the parent machine where the Design Console is to be displayed, run xhost +.\n Find which worker node the \u0026lt;domain\u0026gt;-oim-server1 pod is running. For example:\n$ kubectl get pods -n oigns -o wide | grep governancedomain-oim-server1 The output will look similar to the following:\ngovernancedomain-oim-server1 1/1 Running 0 31m 10.244.2.98 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; On the worker node returned above e.g worker-node2, execute the following command to find the OIG container image name:\n$ podman images Then execute the following command to start a container to run Design Console:\n$ podman run -u root --name oigdcbase -it \u0026lt;image\u0026gt; bash For example:\n$ podman run -u root -it --name oigdcbase container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-\u0026lt;January\u0026#39;23\u0026gt; bash This will take you into a bash shell inside the container:\nbash-4.2# Inside the container set the proxy, for example:\nbash-4.2# export https_proxy=http://proxy.example.com:80 Install the relevant X windows packages in the container:\nbash-4.2# yum install libXext libXrender libXtst Execute the following outside the container to create a new Design Console image from the container:\n$ podman commit \u0026lt;container_name\u0026gt; \u0026lt;design_console_image_name\u0026gt; For example:\n$ podman commit oigdcbase oigdc Exit the container bash session:\nbash-4.2# exit Start a new container using the Design Console image:\n$ podman run --name oigdc -it oigdc /bin/bash This will take you into a bash shell for the container:\nbash-4.2# In the container run the following to export the DISPLAY:\n$ export DISPLAY=\u0026lt;parent_machine_hostname:1\u0026gt; Start the Design Console from the container:\nbash-4.2# cd idm/designconsole bash-4.2# sh xlclient.sh The Design Console login should be displayed. Now follow Login to the Design Console.\n Login to the Design Console Launch the Design Console and in the Oracle Identity Manager Design Console login page enter the following details:\nEnter the following details and click Login:\n Server URL: \u0026lt;url\u0026gt; User ID: xelsysadm Password: \u0026lt;password\u0026gt;. where \u0026lt;url\u0026gt; is http://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}\n If successful the Design Console will be displayed.\n " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oam/introduction/", + "title": "Introduction", + "tags": [], + "description": "The WebLogic Kubernetes Operator supports deployment of Oracle Access Management (OAM). Follow the instructions in this guide to set up these Oracle Access Management domains on Kubernetes.", + "content": "The WebLogic Kubernetes Operator supports deployment of Oracle Access Management (OAM).\nIn this release, OAM domains are supported using the “domain on a persistent volume” model only, where the domain home is located in a persistent volume (PV).\nThe WebLogic Kubernetes Operator has several key features to assist you with deploying and managing Oracle Access Management domains in a Kubernetes environment. You can:\n Create OAM instances in a Kubernetes persistent volume. This persistent volume can reside in an NFS file system or other Kubernetes volume types. Start servers based on declarative startup parameters and desired states. Expose the OAM Services through external access. Scale OAM domains by starting and stopping Managed Servers on demand. Publish operator and WebLogic Server logs into Elasticsearch and interact with them in Kibana. Monitor the OAM instance using Prometheus and Grafana. Current production release The current production release for the Oracle Access Management domain deployment on Kubernetes is 23.4.1. This release uses the WebLogic Kubernetes Operator version 4.1.2.\nFor 4.0.X WebLogic Kubernetes Operator refer to Version 23.3.1\nFor 3.4.X WebLogic Kubernetes Operator refer to Version 23.1.1\nRecent changes and known issues See the Release Notes for recent changes and known issues for Oracle Access Management domain deployment on Kubernetes.\nLimitations See here for limitations in this release.\nGetting started This documentation explains how to configure OAM on a Kubernetes cluster where no other Oracle Identity Management products will be deployed. For detailed information about this type of deployment, start at Prerequisites and follow this documentation sequentially. Please note that this documentation does not explain how to configure a Kubernetes cluster given the product can be deployed on any compliant Kubernetes vendor.\nIf you are deploying multiple Oracle Identity Management products on the same Kubernetes cluster, then you must follow the Enterprise Deployment Guide outlined in Enterprise Deployments. Please note, you also have the option to follow the Enterprise Deployment Guide even if you are only installing OAM and no other Oracle Identity Management products.\nNote: If you need to understand how to configure a Kubernetes cluster ready for an Oracle Access Management deployment, you should follow the Enterprise Deployment Guide referenced in Enterprise Deployments. The Enterprise Deployment Automation section also contains details on automation scripts that can:\n Automate the creation of a Kubernetes cluster on Oracle Cloud Infrastructure (OCI), ready for the deployment of Oracle Identity Management products. Automate the deployment of Oracle Identity Management products on any compliant Kubernetes cluster. Documentation for earlier releases To view documentation for an earlier release, see:\n Version 23.3.1 Version 23.2.1 Version 23.1.1 Version 22.4.1 Version 22.3.1 Version 22.2.1 Version 21.4.2 Version 21.4.1 " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oig/introduction/", + "title": "Introduction", + "tags": [], + "description": "The WebLogic Kubernetes Operator supports deployment of Oracle Identity Governance. Follow the instructions in this guide to set up Oracle Identity Governance domains on Kubernetes.", + "content": "The WebLogic Kubernetes Operator supports deployment of Oracle Identity Governance (OIG).\nIn this release, OIG domains are supported using the “domain on a persistent volume” model only, where the domain home is located in a persistent volume (PV).\nThe operator has several key features to assist you with deploying and managing OIG domains in a Kubernetes environment. You can:\n Create OIG instances in a Kubernetes persistent volume. This persistent volume can reside in an NFS file system or other Kubernetes volume types. Start servers based on declarative startup parameters and desired states. Expose the OIG Services for external access. Scale OIG domains by starting and stopping Managed Servers on demand. Publish operator and WebLogic Server logs into Elasticsearch and interact with them in Kibana. Monitor the OIG instance using Prometheus and Grafana. Current production release The current production release for the Oracle Identity Governance domain deployment on Kubernetes is 23.4.1. This release uses the WebLogic Kubernetes Operator version 4.1.2.\nFor 4.0.X WebLogic Kubernetes Operator refer to Version 23.3.1\nFor 3.4.X WebLogic Kubernetes Operator refer to Version 23.1.1\nRecent changes and known issues See the Release Notes for recent changes and known issues for Oracle Identity Governance domain deployment on Kubernetes.\nLimitations See here for limitations in this release.\nGetting started This documentation explains how to configure OIG on a Kubernetes cluster where no other Oracle Identity Management products will be deployed. For detailed information about this type of deployment, start at Prerequisites and follow this documentation sequentially. Please note that this documentation does not explain how to configure a Kubernetes cluster given the product can be deployed on any compliant Kubernetes vendor.\nIf you are deploying multiple Oracle Identity Management products on the same Kubernetes cluster, then you must follow the Enterprise Deployment Guide outlined in Enterprise Deployments. Please note, you also have the option to follow the Enterprise Deployment Guide even if you are only installing OIG and no other Oracle Identity Management products.\nNote: If you need to understand how to configure a Kubernetes cluster ready for an Oracle Identity Governance deployment, you should follow the Enterprise Deployment Guide referenced in Enterprise Deployments. The Enterprise Deployment Automation section also contains details on automation scripts that can:\n Automate the creation of a Kubernetes cluster on Oracle Cloud Infrastructure (OCI), ready for the deployment of Oracle Identity Management products. Automate the deployment of Oracle Identity Management products on any compliant Kubernetes cluster. Documentation for earlier releases To view documentation for an earlier release, see:\n Version 23.3.1 Version 23.2.1 Version 23.1.1 Version 22.4.1 Version 22.3.1 Version 22.2.1 Version 21.4.2 Version 21.4.1 " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oud/introduction/", + "title": "Introduction", + "tags": [], + "description": "Oracle Unified Directory provides a comprehensive Directory Solution for robust Identity Management", + "content": "Oracle Unified Directory provides a comprehensive Directory Solution for robust Identity Management. Oracle Unified Directory is an all-in-one directory solution with storage, proxy, synchronization and virtualization capabilities. While unifying the approach, it provides all the services required for high-performance Enterprise and carrier-grade environments. Oracle Unified Directory ensures scalability to billions of entries, ease of installation, elastic deployments, enterprise manageability and effective monitoring.\nThis project supports deployment of Oracle Unified Directory (OUD) container images based on the 12cPS4 (12.2.1.4.0) release within a Kubernetes environment. The OUD container image refers to binaries for OUD Release 12.2.1.4.0 and it has the capability to create different types of OUD Instances (Directory Service, Proxy, Replication) in containers.\nThis project has several key features to assist you with deploying and managing Oracle Unified Directory in a Kubernetes environment. You can:\n Create Oracle Unified Directory instances in a Kubernetes persistent volume (PV). This PV can reside in an NFS file system, block storage device, or other Kubernetes volume types. Start servers based on declarative startup parameters and desired states. Expose the Oracle Unified Directory services for external access. Scale Oracle Unified Directory by starting and stopping servers on demand. Monitor the Oracle Unified Directory instance using Prometheus and Grafana. Current production release The current production release for the Oracle Unified Directory 12c PS4 (12.2.1.4.0) deployment on Kubernetes is 23.4.1.\nRecent changes and known issues See the Release Notes for recent changes and known issues for Oracle Unified Directory deployment on Kubernetes.\nGetting started This documentation explains how to configure OUD on a Kubernetes cluster where no other Oracle Identity Management products will be deployed. For detailed information about this type of deployment, start at Prerequisites and follow this documentation sequentially. Please note that this documentation does not explain how to configure a Kubernetes cluster given the product can be deployed on any compliant Kubernetes vendor.\nIf you are deploying multiple Oracle Identity Management products on the same Kubernetes cluster, then you must follow the Enterprise Deployment Guide outlined in Enterprise Deployments. Please note, you also have the option to follow the Enterprise Deployment Guide even if you are only installing OUD and no other Oracle Identity Management products.\nNote: If you need to understand how to configure a Kubernetes cluster ready for an Oracle Unified Directory deployment, you should follow the Enterprise Deployment Guide referenced in Enterprise Deployments. The Enterprise Deployment Automation section also contains details on automation scripts that can:\n Automate the creation of a Kubernetes cluster on Oracle Cloud Infrastructure (OCI), ready for the deployment of Oracle Identity Management products. Automate the deployment of Oracle Identity Management products on any compliant Kubernetes cluster. Documentation for earlier releases To view documentation for an earlier release, see:\n Version 23.3.1 Version 23.2.1 Version 23.1.1 Version 22.4.1 Version 22.3.1 Version 22.2.1 Version 21.4.2 Version 21.4.1 " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oudsm/introduction/", + "title": "Introduction", + "tags": [], + "description": "Oracle Unified Directory Services Manager provides an interface for managing instances of Oracle Unified Directory", + "content": "Oracle Unified Directory Services Manager (OUDSM) is an interface for managing instances of Oracle Unified Directory. Oracle Unified Directory Services Manager enables you to configure the structure of the directory, define objects in the directory, add and configure users, groups, and other entries. Oracle Unified Directory Services Manager is also the interface you use to manage entries, schema, security, and other directory features.\nThis project supports deployment of Oracle Unified Directory Services Manager images based on the 12cPS4 (12.2.1.4.0) release within a Kubernetes environment. The Oracle Unified Directory Services Manager Image refers to binaries for Oracle Unified Directory Services Manager Release 12.2.1.4.0.\nFollow the instructions in this guide to set up Oracle Unified Directory Services Manager on Kubernetes.\nCurrent production release The current production release for the Oracle Unified Directory 12c PS4 (12.2.1.4.0) deployment on Kubernetes is 23.4.1.\nRecent changes and known issues See the Release Notes for recent changes and known issues for Oracle Unified Directory deployment on Kubernetes.\nGetting started This documentation explains how to configure OUDSM on a Kubernetes cluster where no other Oracle Identity Management products will be deployed. For detailed information about this type of deployment, start at Prerequisites and follow this documentation sequentially. Please note that this documentation does not explain how to configure a Kubernetes cluster given the product can be deployed on any compliant Kubernetes vendor.\nIf you are deploying multiple Oracle Identity Management products on the same Kubernetes cluster, then you must follow the Enterprise Deployment Guide outlined in Enterprise Deployments. Please note, you also have the option to follow the Enterprise Deployment Guide even if you are only installing OUDSM and no other Oracle Identity Management products.\nNote: If you need to understand how to configure a Kubernetes cluster ready for an Oracle Unified Directory Services Manager deployment, you should follow the Enterprise Deployment Guide referenced in Enterprise Deployments. The Enterprise Deployment Automation section also contains details on automation scripts that can:\n Automate the creation of a Kubernetes cluster on Oracle Cloud Infrastructure (OCI), ready for the deployment of Oracle Identity Management products. Automate the deployment of Oracle Identity Management products on any compliant Kubernetes cluster. Documentation for earlier releases To view documentation for an earlier release, see:\n Version 23.3.1 Version 23.2.1 Version 23.1.1 Version 22.4.1 Version 22.3.1 Version 22.2.1 Version 21.4.2 Version 21.4.1 " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oid/release-notes/", + "title": "Release Notes", + "tags": [], + "description": "", + "content": "Review the latest changes and known issues for Oracle Internet Directory on Kubernetes.\nRecent changes Date Version Change July, 2022 22.3.1 As of July 2022, Container support has been removed for Oracle Internet Directory. Refer to document ID 2723908.1 on My Oracle Support for more details. April, 2022 22.2.1 Updated for CRI-O support. October, 2021 21.4.1 Initial release of Oracle Identity Directory on Kubernetes. " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oig/configure-design-console/using-the-design-console-with-nginx-ssl/", + "title": "b. Using Design Console with NGINX(SSL)", + "tags": [], + "description": "Configure Design Console with NGINX(SSL).", + "content": "Configure an NGINX ingress (SSL) to allow Design Console to connect to your Kubernetes cluster.\n Prerequisites\n Setup routing rules for the Design Console ingress\n Create the ingress\n Update the T3 channel\n Restart the OIG Managed Server\n Design Console client\na. Using an on-premises installed Design Console\nb. Using a container image for Design Console\n Login to the Design Console\n Prerequisites If you haven\u0026rsquo;t already configured an NGINX ingress controller (SSL) for OIG, follow Using an Ingress with NGINX (SSL).\nMake sure you know the master hostname and ingress port for NGINX before proceeding e.g https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}. Also make sure you know the Kubernetes secret for SSL that was generated e.g governancedomain-tls-cert.\nSetup routing rules for the Design Console ingress Setup routing rules by running the following commands:\n$ cd $WORKDIR/kubernetes/design-console-ingress Edit values.yaml and ensure that tls: SSL is set. Change domainUID: and secretName: to match the values for your \u0026lt;domain_uid\u0026gt; and your SSL Kubernetes secret, for example:\n# Load balancer type. Supported values are: NGINX type: NGINX # Type of Configuration Supported Values are : NONSSL,SSL # tls: NONSSL tls: SSL # TLS secret name if the mode is SSL secretName: governancedomain-tls-cert # WLS domain as backend to the load balancer wlsDomain: domainUID: governancedomain oimClusterName: oim_cluster oimServerT3Port: 14002 Create the ingress Run the following command to create the ingress:\n$ cd $WORKDIR $ helm install governancedomain-nginx-designconsole kubernetes/design-console-ingress --namespace oigns --values kubernetes/design-console-ingress/values.yaml The output will look similar to the following:\nNAME: governancedomain-nginx-designconsole \u0026lt;DATE\u0026gt; NAMESPACE: oigns STATUS: deployed REVISION: 1 TEST SUITE: None Run the following command to show the ingress is created successfully:\n$ kubectl describe ing governancedomain-nginx-designconsole -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl describe ing governancedomain-nginx-designconsole -n oigns The output will look similar to the following:\nName: governancedomain-nginx-designconsole Namespace: oigns Address: Default backend: default-http-backend:80 (\u0026lt;error: endpoints \u0026quot;default-http-backend\u0026quot; not found\u0026gt;) Rules: Host Path Backends ---- ---- -------- * governancedomain-cluster-oim-cluster:14002 (10.244.2.103:14002) Annotations: kubernetes.io/ingress.class: nginx meta.helm.sh/release-name: governancedomain-nginx-designconsole meta.helm.sh/release-namespace: oigns nginx.ingress.kubernetes.io/affinity: cookie nginx.ingress.kubernetes.io/configuration-snippet: more_set_input_headers \u0026quot;X-Forwarded-Proto: https\u0026quot;; more_set_input_headers \u0026quot;WL-Proxy-SSL: true\u0026quot;; nginx.ingress.kubernetes.io/enable-access-log: false nginx.ingress.kubernetes.io/ingress.allow-http: false nginx.ingress.kubernetes.io/proxy-buffer-size: 2000k Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Sync 6s nginx-ingress-controller Scheduled for sync Update the T3 channel Log in to the WebLogic Console using https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console.\n Navigate to Environment, click Servers, and then select oim_server1.\n Click Protocols, and then Channels.\n Click the default T3 channel called T3Channel.\n Click Lock and Edit.\n Set the External Listen Address to the ingress controller hostname ${MASTERNODE-HOSTNAME}.\n Set the External Listen Port to the ingress controller port ${MASTERNODE-PORT}.\n Click Save.\n Click Activate Changes.\n Restart the OIG Managed Server Restart the OIG Managed Server for the above changes to take effect:\n$ cd $WORKDIR/kubernetes/domain-lifecycle $ ./restartServer.sh -s oim_server1 -d \u0026lt;domain_uid\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ cd $WORKDIR/kubernetes/domain-lifecycle ./restartServer.sh -s oim_server1 -d governancedomain -n oigns Make sure the \u0026lt;domain_uid\u0026gt;-oim-server1 has a READY status of 1/1 before continuing:\n$ kubectl get pods -n oigns | grep oim-server1 The output will look similar to the following:\ngovernancedomain-oim-server1 1/1 Running 0 8m Design Console Client It is possible to use Design Console from an on-premises install, or from a container image.\nUsing an on-premises installed Design Console The instructions below should be performed on the client where Design Console is installed.\n Import the CA certificate into the java keystore\nIf in Generate SSL Certificate you requested a certificate from a Certificate Authority (CA), then you must import the CA certificate (e.g cacert.crt) that signed your certificate, into the java truststore used by Design Console.\nIf in Generate SSL Certificate you generated a self-signed certicate (e.g tls.crt), you must import the self-signed certificate into the java truststore used by Design Console.\nImport the certificate using the following command:\n$ keytool -import -trustcacerts -alias dc -file \u0026lt;certificate\u0026gt; -keystore $JAVA_HOME/jre/lib/security/cacerts where \u0026lt;certificate\u0026gt; is the CA certificate, or self-signed certicate.\n Once complete follow Login to the Design Console.\n Using a container image for Design Console Using Docker The Design Console can be run from a container using X windows emulation.\n On the parent machine where the Design Console is to be displayed, run xhost +.\n Find which worker node the \u0026lt;domain\u0026gt;-oim-server1 pod is running. For example:\n$ kubectl get pods -n oigns -o wide | grep governancedomain-oim-server1 The output will look similar to the following:\ngovernancedomain-oim-server1 1/1 Running 0 31m 10.244.2.98 worker-node2 On the worker node returned above e.g worker-node2, execute the following command to find the OIG container image name:\n$ docker images Then execute the following command to start a container to run Design Console:\n$ docker run -u root --name oigdcbase -it \u0026lt;image\u0026gt; bash For example:\n$ docker run -u root -it --name oigdcbase container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-\u0026lt;January\u0026#39;23\u0026gt; bash This will take you into a bash shell inside the container:\nbash-4.2# Inside the container set the proxy, for example:\nbash-4.2# export https_proxy=http://proxy.example.com:80 Install the relevant X windows packages in the container:\nbash-4.2# yum install libXext libXrender libXtst Execute the following outside the container to create a new Design Console image from the container:\n$ docker commit \u0026lt;container_name\u0026gt; \u0026lt;design_console_image_name\u0026gt; For example:\n$ docker commit oigdcbase oigdc Exit the container bash session:\nbash-4.2# exit Start a new container using the Design Console image:\n$ docker run --name oigdc -it oigdc /bin/bash This will take you into a bash shell for the container:\nbash-4.2# Copy the Ingress CA certificate into the container\nIf in Generate SSL Certificate you requested a certificate from a Certificate Authority (CA), then you must copy the CA certificate (e.g cacert.crt) that signed your certificate, into the container\nIf in Generate SSL Certificate you generated a self-signed certicate (e.g tls.crt), you must copy the self-signed certificate into the container\nNote: You will have to copy the certificate over to the worker node where the oigdc image is created before running the following.\nRun the following command outside the container:\n$ cd \u0026lt;workdir\u0026gt;/ssl $ docker cp \u0026lt;certificate\u0026gt; \u0026lt;container_name\u0026gt;:/u01/jdk/jre/lib/security/\u0026lt;certificate\u0026gt; For example:\n$ cd /scratch/OIGK8S/ssl $ docker cp tls.crt oigdc:/u01/jdk/jre/lib/security/tls.crt Import the certificate using the following command:\nbash-4.2# /u01/jdk/bin/keytool -import -trustcacerts -alias dc -file /u01/jdk/jre/lib/security/\u0026lt;certificate\u0026gt; -keystore /u01/jdk/jre/lib/security/cacerts For example:\nbash-4.2# /u01/jdk/bin/keytool -import -trustcacerts -alias dc -file /u01/jdk/jre/lib/security/tls.crt -keystore /u01/jdk/jre/lib/security/cacerts In the container run the following to export the DISPLAY:\n$ export DISPLAY=\u0026lt;parent_machine_hostname:1\u0026gt; Start the Design Console from the container:\nbash-4.2# cd idm/designconsole bash-4.2# sh xlclient.sh The Design Console login should be displayed. Now follow Login to the Design Console.\n Using podman On the parent machine where the Design Console is to be displayed, run xhost +.\n Find which worker node the \u0026lt;domain\u0026gt;-oim-server1 pod is running. For example:\n$ kubectl get pods -n oigns -o wide | grep governancedomain-oim-server1 The output will look similar to the following:\ngovernancedomain-oim-server1 1/1 Running 0 19h 10.244.2.55 worker-node2 \u0026lt;none\u0026gt; On the worker node returned above e.g worker-node2, execute the following command to find the OIG container image name:\n$ podman images Then execute the following command to start a container to run Design Console:\n$ podman run -u root --name oigdcbase -it \u0026lt;image\u0026gt; bash For example:\n$ podman run -u root -it --name oigdcbase container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-\u0026lt;January\u0026#39;23\u0026gt; bash This will take you into a bash shell inside the container:\nbash-4.2# Inside the container set the proxy, for example:\nbash-4.2# export https_proxy=http://proxy.example.com:80 Install the relevant X windows packages in the container:\nbash-4.2# yum install libXext libXrender libXtst Execute the following outside the container to create a new Design Console image from the container:\n$ podman commit \u0026lt;container_name\u0026gt; \u0026lt;design_console_image_name\u0026gt; For example:\n$ podman commit oigdcbase oigdc Exit the container bash session:\nbash-4.2# exit Start a new container using the Design Console image:\n$ podman run --name oigdc -it oigdc /bin/bash This will take you into a bash shell for the container:\nbash-4.2# Copy the Ingress CA certificate into the container\nIf in Generate SSL Certificate you requested a certificate from a Certificate Authority (CA), then you must copy the CA certificate (e.g cacert.crt) that signed your certificate, into the container\nIf in Generate SSL Certificate you generated a self-signed certicate (e.g tls.crt), you must copy the self-signed certificate into the container\nNote: You will have to copy the certificate over to the worker node where the oigdc image is created before running the following.\nRun the following command outside the container:\n$ cd \u0026lt;workdir\u0026gt;/ssl $ podman cp \u0026lt;certificate\u0026gt; \u0026lt;container_name\u0026gt;:/u01/jdk/jre/lib/security/\u0026lt;certificate\u0026gt; For example:\n$ cd /scratch/OIGK8S/ssl $ podman cp tls.crt oigdc:/u01/jdk/jre/lib/security/tls.crt Inside the container, import the certificate using the following command:\nbash-4.2# /u01/jdk/bin/keytool -import -trustcacerts -alias dc -file /u01/jdk/jre/lib/security/\u0026lt;certificate\u0026gt; -keystore /u01/jdk/jre/lib/security/cacerts For example:\nbash-4.2# /u01/jdk/bin/keytool -import -trustcacerts -alias dc -file /u01/jdk/jre/lib/security/tls.crt -keystore /u01/jdk/jre/lib/security/cacerts In the container run the following to export the DISPLAY:\n$ export DISPLAY=\u0026lt;parent_machine_hostname:1\u0026gt; Start the Design Console from the container:\nbash-4.2# cd idm/designconsole bash-4.2# sh xlclient.sh The Design Console login should be displayed. Now follow Login to the Design Console.\n Login to the Design Console Launch the Design Console and in the Oracle Identity Manager Design Console login page enter the following details:\nEnter the following details and click Login:\n Server URL: \u0026lt;url\u0026gt; User ID: xelsysadm Password: \u0026lt;password\u0026gt;. where \u0026lt;url\u0026gt; is where \u0026lt;url\u0026gt; is https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}.\n If successful the Design Console will be displayed.\n " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oam/release-notes/", + "title": "Release Notes", + "tags": [], + "description": "", + "content": "Review the latest changes and known issues for Oracle Access Management on Kubernetes.\nRecent changes Date Version Change October, 2023 23.4.1 Supports Oracle Access Management 12.2.1.4 domain deployment using the October 2023 container image which contains the October Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. This release contains the following changes: + Support for WebLogic Kubernetes Operator 4.1.2. + Ability to set resource requests and limits for CPU and memory on a cluster resource. See, Set the OAM server memory parameters. + Support for the Kubernetes Horizontal Pod Autoscaler (HPA). See, Kubernetes Horizontal Pod Autoscaler. + The default domain now only starts one OAM Managed Server (oam_server1) and one Policy Managed Server (policy_mgr1). If upgrading to October 23 (23.4.1) from October 22 (22.4.1) or later, you must upgrade the following in order: 1. WebLogic Kubernetes Operator to 4.1.2 2. Patch the OAM container image to October 23 If upgrading to October 23 (23.4.1) from a release prior to October 22 (22.4.1), you must upgrade the following in order: 1. WebLogic Kubernetes Operator to 4.1.2 2. Patch the OAM container image to October 23 3. Upgrade the Ingress 4. Upgrade Elasticsearch and Kibana See Patch and Upgrade for these instructions. July, 2023 23.3.1 Supports Oracle Access Management 12.2.1.4 domain deployment using the July 2023 container image which contains the July Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. If upgrading to July 23 (23.3.1) from April 23 (23.2.1), upgrade as follows: 1. Patch the OAM container image to July 23 If upgrading to July 23 (23.3.1) from October 22 (22.4.1), or January 23 (23.1.1) release, you must upgrade the following in order: 1. WebLogic Kubernetes Operator to 4.0.4 2. Patch the OAM container image to July 23 If upgrading to July 23 (23.3.1) from a release prior to October 22 (22.4.1) release, you must upgrade the following in order: 1. WebLogic Kubernetes Operator to 4.0.4 2. Patch the OAM container image to July 23 3. Upgrade the Ingress 4. Upgrade Elasticsearch and Kibana See Patch and Upgrade for these instructions. April, 2023 23.2.1 Supports Oracle Access Management 12.2.1.4 domain deployment using the April 2023 container image which contains the April Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. Support for WebLogic Kubernetes Operator 4.0.4. Changes to stopping/starting pods due to domain and cluster configuration being separated and parameter changes (IF_NEEDED, NEVER to IfNeeded, Never). If upgrading to April 23 (23.2.1) from October 22 (22.4.1) or later, you must upgrade in the following order: 1. WebLogic Kubernetes Operator to 4.0.4 2. Patch the OAM container image to April 23 If upgrading to April 23 (23.2.1) from a release prior to October 22 (22.4.1) release, you must upgrade the following in order: 1. WebLogic Kubernetes Operator to 4.0.4 2. Patch the OAM container image to April 23 3. Upgrade the Ingress 4. Upgrade Elasticsearch and Kibana See Patch and Upgrade for these instructions. January, 2023 23.1.1 Supports Oracle Access Management 12.2.1.4 domain deployment using the January 2023 container image which contains the January Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. If upgrading to January 23 (23.1.1) from October 22 (22.4.1) release, you only need to patch the OAM container image to January 23. If upgrading to January 23 (23.1.1) from a release prior to October 22 (22.4.1) release, you must upgrade the following in order: 1. WebLogic Kubernetes Operator to 3.4.2 2. Patch the OAM container image to January 23 3. Upgrade the Ingress 4. Upgrade Elasticsearch and Kibana See Patch and Upgrade for these instructions. October, 2022 22.4.1 Supports Oracle Access Management 12.2.1.4 domain deployment using the October 2022 container image which contains the October Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. Support for WebLogic Kubernetes Operator 3.4.2. Additional Ingress mappings added. Changes to deployment of Logging and Visualization with Elasticsearch and Kibana. OAM container images are now only available from container-registry.oracle.com and are no longer available from My Oracle Support. If upgrading to October 22 (22.4.1) from a previous release, you must upgrade the following in order: 1. WebLogic Kubernetes Operator to 3.4.2 2. Patch the OAM container image to October 22 3. Upgrade the Ingress 4. Upgrade Elasticsearch and Kibana See Patch and Upgrade for these instructions. July, 2022 22.3.1 Supports Oracle Access Management 12.2.1.4 domain deployment using the July 2022 container image which contains the July Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. April, 2022 22.2.1 Updated for CRI-O support. November, 2021 21.4.2 Supports Oracle Access Management domain deployment using WebLogic Kubernetes Operator 3.3.0. Voyager ingress removed as no longer supported. October 2021 21.4.1 A) References to supported Kubernetes, Helm and Docker versions removed and replaced with Support note reference. B) Namespace and domain names changed to be consistent with Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster. C) Additional post configuration tasks added. D) Upgrading a Kubernetes Cluster and Security Hardening removed as vendor specific. November 2020 20.4.1 Initial release of Oracle Access Management on Kubernetes. " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oig/release-notes/", + "title": "Release Notes", + "tags": [], + "description": "", + "content": "Review the latest changes and known issues for Oracle Identity Governance on Kubernetes.\nRecent changes Date Version Change October, 2023 23.4.1 Supports Oracle Identity Governance 12.2.1.4 domain deployment using the October 2023 container image which contains the October Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. This release contains the following changes: + Support for WebLogic Kubernetes Operator 4.1.2. + Ability to set resource requests and limits for CPU and memory on a cluster resource. See, Setting the OIM server memory parameters. + Support for the Kubernetes Horizontal Pod Autoscaler (HPA). See, Kubernetes Horizontal Pod Autoscaler. If upgrading to October 23 (23.4.1) from October 22 (22.4.1) or later, you must upgrade the following in order: 1. WebLogic Kubernetes Operator to 4.1.2 2. Patch the OIG container image to October 23 If upgrading to October 23 (23.4.1) from a release prior to October 22 (22.4.1), you must upgrade the following in order: 1. WebLogic Kubernetes Operator to 4.1.2 2. Patch the OIG container image to October 23 3. Upgrade the Ingress 4. Upgrade Elasticsearch and Kibana See Patch and Upgrade for these instructions. July, 2023 23.3.1 Supports Oracle Identity Governance 12.2.1.4 domain deployment using the July 2023 container image which contains the July Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. If upgrading to July 23 (23.3.1) from April 23 (23.2.1), upgrade as follows: 1. Patch the OIG container image to July 23 If upgrading to July 23 (23.3.1) from October 22 (22.4.1), or January 23 (23.1.1) release, you must upgrade the following in order: 1. WebLogic Kubernetes Operator to 4.0.4 2. Patch the OIG container image to July 23 If upgrading to July 23 (23.3.1) from a release prior to October 22 (22.4.1) release, you must upgrade the following in order: 1. WebLogic Kubernetes Operator to 4.0.4 2. Patch the OIG container image to July 23 3. Upgrade the Ingress 4. Upgrade Elasticsearch and Kibana See Patch and Upgrade for these instructions. April, 2023 23.2.1 Supports Oracle Identity Governance 12.2.1.4 domain deployment using the April 2023 container image which contains the April Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. Support for WebLogic Kubernetes Operator 4.0.4. Changes to stopping/starting pods due to domain and cluster configuration being separated and parameter changes (IF_NEEDED, NEVER to IfNeeded, Never). If upgrading to April 23 (23.2.1) from October 22 (22.4.1) or later, you must upgrade in the following order: 1. WebLogic Kubernetes Operator to 4.0.4 2. Patch the OIG container image to April 23 If upgrading to April 23 (23.2.1) from a release prior to October 22 (22.4.1), you must upgrade the following in order: 1. WebLogic Kubernetes Operator to 4.0.4 2. Patch the OIG container image to April 23 3. Upgrade the Ingress 4. Upgrade Elasticsearch and Kibana See Patch and Upgrade for these instructions. January, 2023 23.1.1 Supports Oracle Identity Governance 12.2.1.4 domain deployment using the January 2023 container image which contains the January Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. If upgrading to January 23 (23.1.1) from October 22 (22.4.1) release, you only need to patch the OIG container image to January 23. If upgrading to January 23 (23.1.1) from a release prior to October 22 (22.4.1) release, you must upgrade the following in order: 1. WebLogic Kubernetes Operator to 3.4.2 2. Patch the OIG container image to January 23 3. Upgrade the Ingress 4. Upgrade Elasticsearch and Kibana See Patch and Upgrade for these instructions. October, 2022 22.4.1 Supports Oracle Identity Governance 12.2.1.4 domain deployment using the October 2022 container image which contains the October Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. Support for WebLogic Kubernetes Operator 3.4.2. Additional Ingress mappings added. Changes to deployment of Logging and Visualization with Elasticsearch and Kibana. OIG container images are now only available from container-registry.oracle.com and are no longer available from My Oracle Support. If upgrading to October 22 (22.4.1) from a previous release, you must upgrade the following in order: 1. WebLogic Kubernetes Operator to 3.4.2 2. Patch the OIG container image to October 22 3. Upgrade the Ingress 4. Upgrade Elasticsearch and Kibana See Patch and Upgrade for these instructions. July, 2022 22.3.1 Supports Oracle Identity Governance 12.2.1.4 domain deployment using the July 2022 container image which contains the July Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. April, 2022 22.2.1 Updated for CRI-O support. November, 2021 21.4.2 Supports Oracle Identity Governance domain deployment using WebLogic Kubernetes Operator 3.3.0. Voyager ingress removed as no longer supported. October 2021 21.4.1 A) References to supported Kubernetes, Helm and Docker versions removed and replaced with Support note reference. B) Namespace and domain names changed to be consistent with Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster. C) Addtional post configuration tasks added. D) New section on how to start Design Console in a container. E) Upgrading a Kubernetes Cluster and Security Hardening removed as vendor specific. November 2020 20.4.1 Initial release of Identity Governance on Kubernetes. " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oud/release-notes/", + "title": "Release Notes", + "tags": [], + "description": "", + "content": "Review the latest changes and known issues for Oracle Unified Directory on Kubernetes.\nRecent changes Date Version Change October, 2023 23.4.1 Supports Oracle Unified Directory 12.2.1.4 domain deployment using the October 2023 container image which contains the October Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. + Support for Block Device Storage. See, Create OUD Instances. + Ability to set resource requests and limits for CPU and memory on an OUD instance. See, Create OUD Instances. + Support for Assured Replication. See, Create OUD Instances. + Support for the Kubernetes Horizontal Pod Autoscaler (HPA). See, Kubernetes Horizontal Pod Autoscaler. + Supports integration options such as Enterprise User Security (EUS), EBusiness Suite (EBS), and Directory Integration Platform (DIP). To upgrade to October 23 (23.4.1) you must follow the instructions in Patch and Upgrade. July, 2023 23.3.1 Supports Oracle Unified Directory 12.2.1.4 domain deployment using the July 2023 container image which contains the July Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. To upgrade to July 23 (23.3.1) you must follow the instructions in Patch and Upgrade. April, 2023 23.2.1 Supports Oracle Unified Directory 12.2.1.4 domain deployment using the April 2023 container image which contains the April Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. To upgrade to April 23 (23.2.1) you must follow the instructions in Patch and Upgrade. January, 2023 23.1.1 Supports Oracle Unified Directory 12.2.1.4 domain deployment using the January 2023 container image which contains the January Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. October, 2022 22.4.1 Supports Oracle Unified Directory 12.2.1.4 domain deployment using the October 2022 container image which contains the October Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. Changes to deployment of Logging and Visualization with Elasticsearch and Kibana. OUD container images are now only available from container-registry.oracle.com and are no longer available from My Oracle Support. July, 2022 22.3.1 Supports Oracle Unified Directory 12.2.1.4 domain deployment using the July 2022 container image which contains the July Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. From July 2022 onwards OUD deployment is performed using StatefulSets. April, 2022 22.2.1 Updated for CRI-O support. November 2021 21.4.2 Voyager ingress removed as no longer supported. October 2021 21.4.1 A) References to supported Kubernetes, Helm and Docker versions removed and replaced with Support note reference. B) Namespace and domain names changed to be consistent with Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster. C) Upgrading a Kubernetes Cluster and Security Hardening removed as vendor specific. November 2020 20.4.1 Initial release of Oracle Unified Directory on Kubernetes. " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oudsm/release-notes/", + "title": "Release Notes", + "tags": [], + "description": "", + "content": "Review the latest changes and known issues for Oracle Unified Directory Services Manager on Kubernetes.\nRecent changes Date Version Change October, 2023 23.4.1 Supports Oracle Unified Directory Services Manager 12.2.1.4 domain deployment using the October 2023 container image which contains the October Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. If upgrading to October 23 (23.3.1) from October 22 (22.4.1) or later, upgrade as follows: 1. Patch the OUDSM container image to October 23 If upgrading to October 23 (23.3.1) from July 22 (22.3.1) or earlier, you must upgrade the following in order: 1. Patch the OUDSM container image to October 23 2. Upgrade Elasticsearch and Kibana. To upgrade to October 23 (23.4.1) you must follow the instructions in Patch and Upgrade. July, 2023 23.3.1 Supports Oracle Unified Directory Services Manager 12.2.1.4 domain deployment using the July 2023 container image which contains the July Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. If upgrading to July 23 (23.3.1) from October 22 (22.4.1) or later, upgrade as follows: 1. Patch the OUDSM container image to July 23 If upgrading to July 23 (23.3.1) from July 22 (22.3.1) or earlier, you must upgrade the following in order: 1. Patch the OUDSM container image to July 23 2. Upgrade Elasticsearch and Kibana. See Patch and Upgrade for these instructions. April, 2023 23.2.1 Supports Oracle Unified Directory Services Manager 12.2.1.4 domain deployment using the April 2023 container image which contains the April Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. If upgrading to April 23 (23.2.1) from October 22 (22.4.1), upgrade as follows: 1. Patch the OUDSM container image to April 23 If upgrading to April 23 (23.2.1) from July 22 (22.3.1) or earlier, you must upgrade the following in order: 1. Patch the OUDSM container image to April 23 2. Upgrade Elasticsearch and Kibana. See Patch and Upgrade for these instructions. January, 2023 23.1.1 Supports Oracle Unified Directory Services Manager 12.2.1.4 domain deployment using the January 2023 container image which contains the January Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. If upgrading to January 23 (23.1.1) from October 22 (22.4.1) upgrade as follows: 1. Patch the OUDSM container image to January 23 If upgrading to January 23 (23.1.1) from July 22 (22.3.1) or earlier, you must upgrade the following in order: 1. Patch the OUDSM container image to October 23 2. Upgrade Elasticsearch and Kibana. See Patch and Upgrade for these instructions. October, 2022 22.4.1 Supports Oracle Unified Directory Services Manager 12.2.1.4 domain deployment using the October 2022 container image which contains the October Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. Changes to deployment of Logging and Visualization with Elasticsearch and Kibana. OUDSM container images are now only available from container-registry.oracle.com and are no longer available from My Oracle Support. If upgrading to October 22 (22.4.1) from a previous release, you must upgrade the following in order: 1. Patch the OUDSM container image to October 22 2. Upgrade Elasticsearch and Kibana. See Patch and Upgrade for these instructions. July, 2022 22.3.1 Supports Oracle Unified Directory Services Manager 12.2.1.4 domain deployment using the July 2022 container image which contains the July Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. April, 2022 22.2.1 Updated for CRI-O support. November 2021 21.4.2 Voyager ingress removed as no longer supported. October 2021 21.4.1 A) References to supported Kubernetes, Helm and Docker versions removed and replaced with Support note reference. B) Namespace and domain names changed to be consistent with Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster. C) Upgrading a Kubernetes Cluster and Security Hardening removed as vendor specific. November 2020 20.4.1 Initial release of Oracle Unified Directory Services Manager on Kubernetes. " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oam/prerequisites/", + "title": "Prerequisites", + "tags": [], + "description": "System requirements and limitations for deploying and running an OAM domain home", + "content": "Introduction This document provides information about the system requirements and limitations for deploying and running OAM domains with the WebLogic Kubernetes Operator 4.1.2.\nSystem requirements for oam domains A running Kubernetes cluster that meets the following requirements:\n The Kubernetes cluster must have sufficient nodes and resources. An installation of Helm is required on the Kubernetes cluster. Helm is used to create and deploy the necessary resources and run the WebLogic Kubernetes Operator in a Kubernetes cluster A supported container engine must be installed and running on the Kubernetes cluster. The Kubernetes cluster and container engine must meet the minimum version requirements outlined in document ID 2723908.1 on My Oracle Support. You must have the cluster-admin role to install the WebLogic Kubernetes Operator. The nodes in the Kubernetes cluster must have access to a persistent volume such as a Network File System (NFS) mount or a shared file system. The system clocks on node of the Kubernetes cluster must be synchronized. Run the date command simultaneously on all the nodes in each cluster and then syncrhonize accordingly. A running Oracle Database 12.2.0.1 or later. The database must be a supported version for OAM as outlined in Oracle Fusion Middleware 12c certifications. It must meet the requirements as outlined in About Database Requirements for an Oracle Fusion Middleware Installation and in RCU Requirements for Oracle Databases. It is recommended that the database initialization parameters are set as per Minimum Initialization Parameters.\n Note: This documentation does not tell you how to install a Kubernetes cluster, Helm, the container engine, or how to push container images to a container registry. Please refer to your vendor specific documentation for this information. Also see Getting Started.\nLimitations Compared to running a WebLogic Server domain in Kubernetes using the operator, the following limitations currently exist for OAM domains:\n In this release, OAM domains are supported using the “domain on a persistent volume” model only, where the domain home is located in a persistent volume (PV).The \u0026ldquo;domain in image\u0026rdquo; model is not supported. Only configured clusters are supported. Dynamic clusters are not supported for OAM domains. Note that you can still use all of the scaling features, but you need to define the maximum size of your cluster at domain creation time, using the parameter configuredManagedServerCount. For more details on this parameter, see Prepare the create domain script. It is recommended to pre-configure your cluster so it\u0026rsquo;s sized a little larger than the maximum size you plan to expand it to. You must rigorously test at this maximum size to make sure that your system can scale as expected. The WebLogic Monitoring Exporter currently supports the WebLogic MBean trees only. Support for JRF MBeans has not been added yet. We do not currently support running OAM in non-Linux containers. " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oig/prerequisites/", + "title": "Prerequisites", + "tags": [], + "description": "System requirements and limitations for deploying and running an OIG domain", + "content": "Introduction This document provides information about the system requirements and limitations for deploying and running OIG domains with the WebLogic Kubernetes Operator 4.1.2.\nSystem requirements for OIG domains A running Kubernetes cluster that meets the following requirements:\n The Kubernetes cluster must have sufficient nodes and resources. An installation of Helm is required on the Kubernetes cluster. Helm is used to create and deploy the necessary resources and run the WebLogic Kubernetes Operator in a Kubernetes cluster A supported container engine must be installed and running on the Kubernetes cluster. The Kubernetes cluster and container engine must meet the minimum version requirements outlined in document ID 2723908.1 on My Oracle Support. You must have the cluster-admin role to install the WebLogic Kubernetes Operator. The nodes in the Kubernetes cluster must have access to a persistent volume such as a Network File System (NFS) mount or a shared file system. The system clocks on node of the Kubernetes cluster must be synchronized. Run the date command simultaneously on all the nodes in each cluster and then syncrhonize accordingly. A running Oracle Database 12.2.0.1 or later. The database must be a supported version for OIG as outlined in Oracle Fusion Middleware 12c certifications. It must meet the requirements as outlined in About Database Requirements for an Oracle Fusion Middleware Installation and in RCU Requirements for Oracle Databases.\n Note: This documentation does not tell you how to install a Kubernetes cluster, Helm, the container engine, or how to push container images to a container registry. Please refer to your vendor specific documentation for this information. Also see Getting Started.\nLimitations Compared to running a WebLogic Server domain in Kubernetes using the operator, the following limitations currently exist for OIG domains:\n In this release, OIG domains are supported using the “domain on a persistent volume” model only, where the domain home is located in a persistent volume (PV). The \u0026ldquo;domain in image\u0026rdquo; model is not supported. Only configured clusters are supported. Dynamic clusters are not supported for OIG domains. Note that you can still use all of the scaling features, you just need to define the maximum size of your cluster at domain creation time. The WebLogic Monitoring Exporter currently supports the WebLogic MBean trees only. Support for JRF MBeans has not been added yet. We do not currently support running OIG in non-Linux containers. " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oud/prerequisites/", + "title": "Prerequisites", + "tags": [], + "description": "Oracle Unified Directory Prerequisites.", + "content": "Introduction This document provides information about the system requirements for deploying and running Oracle Unified Directory 12c PS4 (12.2.1.4.0) in a Kubernetes environment.\nSystem Requirements for Oracle Unified Directory on Kubernetes A running Kubernetes cluster that meets the following requirements: The Kubernetes cluster must have sufficient nodes and resources. An installation of Helm is required on the Kubernetes cluster. Helm is used to create and deploy the necessary resources on the Kubernetes cluster. A supported container engine must be installed and running on the Kubernetes cluster. The Kubernetes cluster and container engine must meet the minimum version requirements outlined in document ID 2723908.1 on My Oracle Support. The nodes in the Kubernetes cluster must have access to a persistent volume such as a Network File System (NFS) mount, a shared file system, or block storage. If you intend to use assured replication in OUD, you must have a persistent volume available that uses a Network File System (NFS) mount, or a shared file system for the config volume. See Enabling Assured Replication. Note: This documentation does not tell you how to install a Kubernetes cluster, Helm, the container engine, or how to push container images to a container registry. Please refer to your vendor specific documentation for this information. Also see Getting Started.\n" +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oudsm/prerequisites/", + "title": "Prerequisites", + "tags": [], + "description": "Oracle Unified Directory Services Manager Prerequisites.", + "content": "Introduction This document provides information about the system requirements for deploying and running Oracle Unified Directory Services Manager 12c PS4 (12.2.1.4.0) in a Kubernetes environment.\nSystem Requirements for Oracle Unified Directory Services Manager on Kubernetes A running Kubernetes cluster that meets the following requirements: The Kubernetes cluster must have sufficient nodes and resources. An installation of Helm is required on the Kubernetes cluster. Helm is used to create and deploy the necessary resources on the Kubernetes cluster. A supported container engine must be installed and running on the Kubernetes cluster. The Kubernetes cluster and container engine must meet the minimum version requirements outlined in document ID 2723908.1 on My Oracle Support. The nodes in the Kubernetes cluster must have access to a persistent volume such as a Network File System (NFS) mount or a shared file system. Note: This documentation does not tell you how to install a Kubernetes cluster, Helm, the container engine, or how to push container images to a container registry. Please refer to your vendor specific documentation for this information. Also see Getting Started.\n" +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oud/prepare-your-environment/", + "title": "Prepare Your Environment", + "tags": [], + "description": "Prepare your environment", + "content": " Check the Kubernetes cluster is ready Obtain the OUD container image Create a persistent volume directory Setup the code repository to deploy OUD Check the Kubernetes cluster is ready As per the Prerequisites a Kubernetes cluster should have already been configured.\n Run the following command on the master node to check the cluster and worker nodes are running:\n$ kubectl get nodes,pods -n kube-system The output will look similar to the following:\nNAME STATUS ROLES AGE VERSION node/worker-node1 Ready \u0026lt;none\u0026gt; 17h v1.26.6+1.el8 node/worker-node2 Ready \u0026lt;none\u0026gt; 17h v1.26.6+1.el8 node/master-node Ready control-plane,master 23h v1.26.6+1.el8 NAME READY STATUS RESTARTS AGE pod/coredns-66bff467f8-fnhbq 1/1 Running 0 23h pod/coredns-66bff467f8-xtc8k 1/1 Running 0 23h pod/etcd-master 1/1 Running 0 21h pod/kube-apiserver-master-node 1/1 Running 0 21h pod/kube-controller-manager-master-node 1/1 Running 0 21h pod/kube-flannel-ds-amd64-lxsfw 1/1 Running 0 17h pod/kube-flannel-ds-amd64-pqrqr 1/1 Running 0 17h pod/kube-flannel-ds-amd64-wj5nh 1/1 Running 0 17h pod/kube-proxy-2kxv2 1/1 Running 0 17h pod/kube-proxy-82vvj 1/1 Running 0 17h pod/kube-proxy-nrgw9 1/1 Running 0 23h pod/kube-scheduler-master 1/1 Running 0 21h Obtain the OUD container image The OUD Kubernetes deployment requires access to an OUD container image. The image can be obtained in the following ways:\n Prebuilt OUD container image Build your own OUD container image using WebLogic Image Tool Prebuilt OUD container image The prebuilt OUD October 2023 container image can be downloaded from Oracle Container Registry. This image is prebuilt by Oracle and includes Oracle Unified Directory 12.2.1.4.0, the October Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program..\nNote: Before using this image you must login to Oracle Container Registry, navigate to Middleware \u0026gt; oud_cpu and accept the license agreement.\nYou can use this image in the following ways:\n Pull the container image from the Oracle Container Registry automatically during the OUD Kubernetes deployment. Manually pull the container image from the Oracle Container Registry and then upload it to your own container registry. Manually pull the container image from the Oracle Container Registry and manually stage it on the master node and each worker node. Build your own OUD container image using WebLogic Image Tool You can build your own OUD container image using the WebLogic Image Tool. This is recommended if you need to apply one off patches to a Prebuilt OUD container image. For more information about building your own container image with WebLogic Image Tool, see Create or update image.\nYou can use an image built with WebLogic Image Tool in the following ways:\n Manually upload them to your own container registry. Manually stage them on the master node and each worker node. Note: This documentation does not tell you how to pull or push the above images into a private container registry, or stage them on the master and worker nodes. Details of this can be found in the Enterprise Deployment Guide.\nCreate a persistent volume directory Note: This section should not be followed if using block storage.\nAs referenced in Prerequisites the nodes in the Kubernetes cluster must have access to a persistent volume such as a Network File System (NFS) mount or a shared file system.\nIn this example /scratch/shared/ is a shared directory accessible from all nodes.\n On the master node run the following command to create a user_projects directory:\n$ cd \u0026lt;persistent_volume\u0026gt; $ mkdir oud_user_projects $ sudo chown -R 1000:0 oud_user_projects For example:\n$ cd /scratch/shared $ mkdir oud_user_projects $ sudo chown -R 1000:0 oud_user_projects On the master node run the following to ensure it is possible to read and write to the persistent volume:\n$ cd \u0026lt;persistent_volume\u0026gt;/oud_user_projects $ touch file.txt $ ls filemaster.txt For example:\n$ cd /scratch/shared/oud_user_projects $ touch filemaster.txt $ ls filemaster.txt On the first worker node run the following to ensure it is possible to read and write to the persistent volume:\n$ cd /scratch/shared/oud_user_projects $ ls filemaster.txt $ touch fileworker1.txt $ ls fileworker1.txt Repeat the above for any other worker nodes e.g fileworker2.txt etc. Once proven that it\u0026rsquo;s possible to read and write from each node to the persistent volume, delete the files created.\n Setup the code repository to deploy OUD Oracle Unified Directory deployment on Kubernetes leverages deployment scripts provided by Oracle for creating Oracle Unified Directory containers using the Helm charts provided. To deploy Oracle Unified Directory on Kubernetes you should set up the deployment scripts as below:\n Create a working directory to setup the source code.\n$ mkdir \u0026lt;workdir\u0026gt; For example:\n$ mkdir /scratch/shared/OUDContainer Download the latest OUD deployment scripts from the OUD repository:\n$ cd \u0026lt;workdir\u0026gt; $ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1 For example:\n$ cd /scratch/shared/OUDContainer $ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1 Set the $WORKDIR environment variable as follows:\n$ export WORKDIR=\u0026lt;workdir\u0026gt;/fmw-kubernetes/OracleUnifiedDirectory For example:\n$ export WORKDIR=/scratch/shared/OUDContainer/fmw-kubernetes/OracleUnifiedDirectory You are now ready to create the OUD deployment as per Create OUD instances.\n " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oudsm/prepare-your-environment/", + "title": "Prepare Your Environment", + "tags": [], + "description": "Prepare your environment", + "content": " Check the Kubernetes cluster is ready Obtain the OUDSM container image Setup the code repository to deploy OUDSM Check the Kubernetes cluster is ready As per the Prerequisites a Kubernetes cluster should have already been configured.\n Run the following command on the master node to check the cluster and worker nodes are running:\n$ kubectl get nodes,pods -n kube-system The output will look similar to the following:\nNAME STATUS ROLES AGE VERSION node/worker-node1 Ready \u0026lt;none\u0026gt; 17h v1.26.6+1.el8 node/worker-node2 Ready \u0026lt;none\u0026gt; 17h v1.26.6+1.el8 node/master-node Ready master 23h v1.26.6+1.el8 NAME READY STATUS RESTARTS AGE pod/coredns-66bff467f8-fnhbq 1/1 Running 0 23h pod/coredns-66bff467f8-xtc8k 1/1 Running 0 23h pod/etcd-master 1/1 Running 0 21h pod/kube-apiserver-master-node 1/1 Running 0 21h pod/kube-controller-manager-master-node 1/1 Running 0 21h pod/kube-flannel-ds-amd64-lxsfw 1/1 Running 0 17h pod/kube-flannel-ds-amd64-pqrqr 1/1 Running 0 17h pod/kube-flannel-ds-amd64-wj5nh 1/1 Running 0 17h pod/kube-proxy-2kxv2 1/1 Running 0 17h pod/kube-proxy-82vvj 1/1 Running 0 17h pod/kube-proxy-nrgw9 1/1 Running 0 23h pod/kube-scheduler-master 1/1 Running 0 21$ Obtain the OUDSM container image The Oracle Unified Directory Services Manager (OUDSM) Kubernetes deployment requires access to an OUDSM container image. The image can be obtained in the following ways:\n Prebuilt OUDSM container image Build your own OUDSM container image using WebLogic Image Tool Prebuilt OUDSM container image The prebuilt OUDSM April 2023 container image can be downloaded from Oracle Container Registry. This image is prebuilt by Oracle and includes Oracle Unified Directory Services Manager 12.2.1.4.0, the April Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program..\nNote: Before using this image you must login to Oracle Container Registry, navigate to Middleware \u0026gt; oudsm_cpu and accept the license agreement.\nYou can use this image in the following ways:\n Pull the container image from the Oracle Container Registry automatically during the OUDSM Kubernetes deployment. Manually pull the container image from the Oracle Container Registry and then upload it to your own container registry. Manually pull the container image from the Oracle Container Registry and manually stage it on the master node and each worker node. Build your own OUDSM container image using WebLogic Image Tool You can build your own OUDSM container image using the WebLogic Image Tool. This is recommended if you need to apply one off patches to a Prebuilt OUDSM container image. For more information about building your own container image with WebLogic Image Tool, see Create or update image\nYou can use an image built with WebLogic Image Tool in the following ways:\n Manually upload them to your own container registry. Manually stage them on the master node and each worker node. Note: This documentation does not tell you how to pull or push the above images into a private container registry, or stage them on the master and worker nodes. Details of this can be found in the Enterprise Deployment Guide.\nSetup the code repository to deploy OUDSM Oracle Unified Directory Services Manager deployment on Kubernetes leverages deployment scripts provided by Oracle for creating Oracle Unified Directory Services Manager containers using the Helm charts provided. To deploy Oracle Unified Directory Services Manager on Kubernetes you should set up the deployment scripts on the master node as below:\n Create a working directory to setup the source code.\n$ mkdir \u0026lt;workdir\u0026gt; For example:\n$ mkdir /scratch/OUDSMContainer Download the latest OUDSM deployment scripts from the OUDSM repository:\n$ cd \u0026lt;workdir\u0026gt; $ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1 For example:\n$ cd /scratch/OUDSMContainer $ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1 Set the $WORKDIR environment variable as follows:\n$ export WORKDIR=\u0026lt;workdir\u0026gt;/fmw-kubernetes/OracleUnifiedDirectorySM For example:\n$ export WORKDIR=/scratch/OUDSMContainer/fmw-kubernetes/OracleUnifiedDirectorySM You are now ready to create the OUDSM deployment as per Create OUDSM instances.\n " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oam/prepare-your-environment/", + "title": "Prepare your environment", + "tags": [], + "description": "Sample for creating an OAM domain home on an existing PV or PVC, and the domain resource YAML file for deploying the generated OAM domain.", + "content": "To prepare for Oracle Access Management deployment in a Kubernetes environment, complete the following steps:\n Check the Kubernetes cluster is ready\n Obtain the OAM container image\n Set up the code repository to deploy OAM domains\n Install the WebLogic Kubernetes Operator\n Create a namespace for Oracle Access Management\n Create a Kubernetes secret for the container registry\n RCU schema creation\n Preparing the environment for domain creation\na. Creating Kubernetes secrets for the domain and RCU\nb. Create a Kubernetes persistent volume and persistent volume claim\n Check the Kubernetes cluster is ready As per the Prerequisites a Kubernetes cluster should have already been configured.\nCheck that all the nodes in the Kubernetes cluster are running.\n Run the following command on the master node to check the cluster and worker nodes are running:\n$ kubectl get nodes,pods -n kube-system The output will look similar to the following:\nNAME STATUS ROLES AGE VERSION node/worker-node1 Ready \u0026lt;none\u0026gt; 17h v1.26.6+1.el8 node/worker-node2 Ready \u0026lt;none\u0026gt; 17h v1.26.6+1.el8 node/master-node Ready control-plane,master 23h v1.26.6+1.el8 NAME READY STATUS RESTARTS AGE pod/coredns-66bff467f8-fnhbq 1/1 Running 0 23h pod/coredns-66bff467f8-xtc8k 1/1 Running 0 23h pod/etcd-master 1/1 Running 0 21h pod/kube-apiserver-master-node 1/1 Running 0 21h pod/kube-controller-manager-master-node 1/1 Running 0 21h pod/kube-flannel-ds-amd64-lxsfw 1/1 Running 0 17h pod/kube-flannel-ds-amd64-pqrqr 1/1 Running 0 17h pod/kube-flannel-ds-amd64-wj5nh 1/1 Running 0 17h pod/kube-proxy-2kxv2 1/1 Running 0 17h pod/kube-proxy-82vvj 1/1 Running 0 17h pod/kube-proxy-nrgw9 1/1 Running 0 23h pod/kube-scheduler-master 1/1 Running 0 21h Obtain the OAM container image The OAM Kubernetes deployment requires access to an OAM container image. The image can be obtained in the following ways:\n Prebuilt OAM container image Build your own OAM container image using WebLogic Image Tool Prebuilt OAM container image The prebuilt OAM October 2023 container image can be downloaded from Oracle Container Registry. This image is prebuilt by Oracle and includes Oracle Access Management 12.2.1.4.0, the October Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program.\nNote: Before using this image you must login to Oracle Container Registry, navigate to Middleware \u0026gt; oam_cpu and accept the license agreement.\nYou can use this image in the following ways:\n Pull the container image from the Oracle Container Registry automatically during the OAM Kubernetes deployment. Manually pull the container image from the Oracle Container Registry and then upload it to your own container registry. Manually pull the container image from the Oracle Container Registry and manually stage it on the master node and each worker node. Build your own OAM container image using WebLogic Image Tool You can build your own OAM container image using the WebLogic Image Tool. This is recommended if you need to apply one off patches to a Prebuilt OAM container image. For more information about building your own container image with WebLogic Image Tool, see Create or update image.\nYou can use an image built with WebLogic Image Tool in the following ways:\n Manually upload them to your own container registry. Manually stage them on the master node and each worker node. Note: This documentation does not tell you how to pull or push the above images into a private container registry, or stage them on the master and worker nodes. Details of this can be found in the Enterprise Deployment Guide.\nSet up the code repository to deploy OAM domains OAM domain deployment on Kubernetes leverages the WebLogic Kubernetes Operator infrastructure. For deploying the OAM domains, you need to set up the deployment scripts on the master node as below:\n Create a working directory to setup the source code.\n$ mkdir \u0026lt;workdir\u0026gt; For example:\n$ mkdir /scratch/OAMK8S Download the latest OAM deployment scripts from the OAM repository.\n$ cd \u0026lt;workdir\u0026gt; $ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1 For example:\n$ cd /scratch/OAMK8S $ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1 Set the $WORKDIR environment variable as follows:\n$ export WORKDIR=\u0026lt;workdir\u0026gt;/fmw-kubernetes/OracleAccessManagement For example:\n$ export WORKDIR=/scratch/OAMK8S/fmw-kubernetes/OracleAccessManagement Run the following command and see if the WebLogic custom resource definition name already exists:\n$ kubectl get crd In the output you should see:\nNo resources found If you see any of the following:\nNAME AGE clusters.weblogic.oracle 5d domains.weblogic.oracle 5d then run the following command to delete the existing crd\u0026rsquo;s:\n$ kubectl delete crd clusters.weblogic.oracle $ kubectl delete crd domains.weblogic.oracle Install the WebLogic Kubernetes Operator On the master node run the following command to create a namespace for the operator:\n$ kubectl create namespace \u0026lt;sample-kubernetes-operator-ns\u0026gt; For example:\n$ kubectl create namespace opns The output will look similar to the following:\nnamespace/opns created Create a service account for the operator in the operator\u0026rsquo;s namespace by running the following command:\n$ kubectl create serviceaccount -n \u0026lt;sample-kubernetes-operator-ns\u0026gt; \u0026lt;sample-kubernetes-operator-sa\u0026gt; For example:\n$ kubectl create serviceaccount -n opns op-sa The output will look similar to the following:\nserviceaccount/op-sa created Run the following helm command to install and start the operator:\n$ cd $WORKDIR $ helm install weblogic-kubernetes-operator kubernetes/charts/weblogic-operator \\ --namespace \u0026lt;sample-kubernetes-operator-ns\u0026gt; \\ --set image=ghcr.io/oracle/weblogic-kubernetes-operator:4.1.2 \\ --set serviceAccount=\u0026lt;sample-kubernetes-operator-sa\u0026gt; \\ --set “enableClusterRoleBinding=true” \\ --set \u0026#34;domainNamespaceSelectionStrategy=LabelSelector\u0026#34; \\ --set \u0026#34;domainNamespaceLabelSelector=weblogic-operator\\=enabled\u0026#34; \\ --set \u0026#34;javaLoggingLevel=FINE\u0026#34; --wait For example:\n$ cd $WORKDIR $ helm install weblogic-kubernetes-operator kubernetes/charts/weblogic-operator \\ --namespace opns \\ --set image=ghcr.io/oracle/weblogic-kubernetes-operator:4.1.2 \\ --set serviceAccount=op-sa \\ --set \u0026#34;enableClusterRoleBinding=true\u0026#34; \\ --set \u0026#34;domainNamespaceSelectionStrategy=LabelSelector\u0026#34; \\ --set \u0026#34;domainNamespaceLabelSelector=weblogic-operator\\=enabled\u0026#34; \\ --set \u0026#34;javaLoggingLevel=FINE\u0026#34; --wait The output will look similar to the following:\nNAME: weblogic-kubernetes-operator LAST DEPLOYED: \u0026lt;DATE\u0026gt; NAMESPACE: opns STATUS: deployed REVISION: 1 TEST SUITE: None Verify that the operator\u0026rsquo;s pod and services are running by executing the following command:\n$ kubectl get all -n \u0026lt;sample-kubernetes-operator-ns\u0026gt; For example:\n$ kubectl get all -n opns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE pod/weblogic-operator-676d5cc6f4-wct7b 1/1 Running 0 40s pod/weblogic-operator-webhook-7996b8b58b-9sfhd 1/1 Running 0 40s NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/weblogic-operator-webhook-svc ClusterIP 10.100.91.237 \u0026lt;none\u0026gt; 8083/TCP,8084/TCP 47s NAME READY UP-TO-DATE AVAILABLE AGE deployment.apps/weblogic-operator 1/1 1 1 40s deployment.apps/weblogic-operator-webhook 1/1 1 1 40s NAME DESIRED CURRENT READY AGE replicaset.apps/weblogic-operator-676d5cc6f4 1 1 1 40s replicaset.apps/weblogic-operator-webhook-7996b8b58b 1 1 1 46s Verify the operator pod\u0026rsquo;s log:\n$ kubectl logs -n \u0026lt;sample-kubernetes-operator-ns\u0026gt; -c weblogic-operator deployments/weblogic-operator For example:\n$ kubectl logs -n opns -c weblogic-operator deployments/weblogic-operator The output will look similar to the following:\n... {\u0026quot;timestamp\u0026quot;:\u0026quot;\u0026lt;DATE\u0026gt;\u0026quot;,\u0026quot;thread\u0026quot;:21,\u0026quot;fiber\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;namespace\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;domainUID\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;level\u0026quot;:\u0026quot;FINE\u0026quot;,\u0026quot;class\u0026quot;:\u0026quot;oracle.kubernetes.operator.DeploymentLiveness\u0026quot;,\u0026quot;method\u0026quot;:\u0026quot;run\u0026quot;,\u0026quot;timeInMillis\u0026quot;:1678183291191,\u0026quot;message\u0026quot;:\u0026quot;Liveness file last modified time set\u0026quot;,\u0026quot;exception\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;code\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;headers\u0026quot;:{},\u0026quot;body\u0026quot;:\u0026quot;\u0026quot;} {\u0026quot;timestamp\u0026quot;:\u0026quot;\u0026lt;DATE\u0026gt;\u0026quot;,\u0026quot;thread\u0026quot;:37,\u0026quot;fiber\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;namespace\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;domainUID\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;level\u0026quot;:\u0026quot;FINE\u0026quot;,\u0026quot;class\u0026quot;:\u0026quot;oracle.kubernetes.operator.DeploymentLiveness\u0026quot;,\u0026quot;method\u0026quot;:\u0026quot;run\u0026quot;,\u0026quot;timeInMillis\u0026quot;:1678183296193,\u0026quot;message\u0026quot;:\u0026quot;Liveness file last modified time set\u0026quot;,\u0026quot;exception\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;code\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;headers\u0026quot;:{},\u0026quot;body\u0026quot;:\u0026quot;\u0026quot;} {\u0026quot;timestamp\u0026quot;:\u0026quot;\u0026lt;DATE\u0026gt;\u0026quot;,\u0026quot;thread\u0026quot;:31,\u0026quot;fiber\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;namespace\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;domainUID\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;level\u0026quot;:\u0026quot;FINE\u0026quot;,\u0026quot;class\u0026quot;:\u0026quot;oracle.kubernetes.operator.DeploymentLiveness\u0026quot;,\u0026quot;method\u0026quot;:\u0026quot;run\u0026quot;,\u0026quot;timeInMillis\u0026quot;:1678183301194,\u0026quot;message\u0026quot;:\u0026quot;Liveness file last modified time set\u0026quot;,\u0026quot;exception\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;code\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;headers\u0026quot;:{},\u0026quot;body\u0026quot;:\u0026quot;\u0026quot;} {\u0026quot;timestamp\u0026quot;:\u0026quot;\u0026lt;DATE\u0026gt;\u0026quot;,\u0026quot;thread\u0026quot;:31,\u0026quot;fiber\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;namespace\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;domainUID\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;level\u0026quot;:\u0026quot;FINE\u0026quot;,\u0026quot;class\u0026quot;:\u0026quot;oracle.kubernetes.operator.DeploymentLiveness\u0026quot;,\u0026quot;method\u0026quot;:\u0026quot;run\u0026quot;,\u0026quot;timeInMillis\u0026quot;:1678183306195,\u0026quot;message\u0026quot;:\u0026quot;Liveness file last modified time set\u0026quot;,\u0026quot;exception\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;code\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;headers\u0026quot;:{},\u0026quot;body\u0026quot;:\u0026quot;\u0026quot;} Create a namespace for Oracle Access Management Run the following command to create a namespace for the domain:\n$ kubectl create namespace \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl create namespace oamns The output will look similar to the following:\nnamespace/oamns created Run the following command to tag the namespace so the WebLogic Kubernetes Operator can manage it:\n$ kubectl label namespaces \u0026lt;domain_namespace\u0026gt; weblogic-operator=enabled For example:\n$ kubectl label namespaces oamns weblogic-operator=enabled The output will look similar to the following:\nnamespace/oamns labeled Run the following command to check the label was created:\n$ kubectl describe namespace \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl describe namespace oamns The output will look similar to the following:\nName: oamns Labels: kubernetes.io/metadata.name=oamns weblogic-operator=enabled Annotations: \u0026lt;none\u0026gt; Status: Active No resource quota. No LimitRange resource. Create a Kubernetes secret for the container registry In this section you create a secret that stores the credentials for the container registry where the OAM image is stored.\nIf you are not using a container registry and have loaded the images on each of the master and worker nodes, then there is no need to create the registry secret.\n Run the following command to create the secret:\nkubectl create secret docker-registry \u0026#34;orclcred\u0026#34; --docker-server=\u0026lt;CONTAINER_REGISTRY\u0026gt; \\ --docker-username=\u0026#34;\u0026lt;USER_NAME\u0026gt;\u0026#34; \\ --docker-password=\u0026lt;PASSWORD\u0026gt; --docker-email=\u0026lt;EMAIL_ID\u0026gt; \\ --namespace=\u0026lt;domain_namespace\u0026gt; For example, if using Oracle Container Registry:\nkubectl create secret docker-registry \u0026#34;orclcred\u0026#34; --docker-server=container-registry.oracle.com \\ --docker-username=\u0026#34;user@example.com\u0026#34; \\ --docker-password=password --docker-email=user@example.com \\ --namespace=oamns Replace \u0026lt;USER_NAME\u0026gt; and \u0026lt;PASSWORD\u0026gt; with the credentials for the registry with the following caveats:\n If using Oracle Container Registry to pull the OAM container image, this is the username and password used to login to Oracle Container Registry. Before you can use this image you must login to Oracle Container Registry, navigate to Middleware \u0026gt; oam_cpu and accept the license agreement.\n If using your own container registry to store the OAM container image, this is the username and password (or token) for your container registry.\n The output will look similar to the following:\nsecret/orclcred created RCU schema creation In this section you create the RCU schemas in the Oracle Database.\nBefore following the steps in this section, make sure that the database and listener are up and running and you can connect to the database via SQL*Plus or other client tool.\n If using Oracle Container Registry or your own container registry for your OAM container image, run the following command to create a helper pod to run RCU:\n$ kubectl run --image=\u0026lt;image_name-from-registry\u0026gt;:\u0026lt;tag\u0026gt; --image-pull-policy=\u0026#34;IfNotPresent\u0026#34; --overrides=\u0026#39;{\u0026#34;apiVersion\u0026#34;: \u0026#34;v1\u0026#34;, \u0026#34;spec\u0026#34;:{\u0026#34;imagePullSecrets\u0026#34;: [{\u0026#34;name\u0026#34;: \u0026#34;orclcred\u0026#34;}]}}\u0026#39; helper -n \u0026lt;domain_namespace\u0026gt; -- sleep infinity For example:\n$ kubectl run --image=container-registry.oracle.com/middleware/oam_cpu:12.2.1.4-jdk8-ol7-\u0026lt;October`23\u0026gt; --image-pull-policy=\u0026#34;IfNotPresent\u0026#34; --overrides=\u0026#39;{\u0026#34;apiVersion\u0026#34;: \u0026#34;v1\u0026#34;,\u0026#34;spec\u0026#34;:{\u0026#34;imagePullSecrets\u0026#34;: [{\u0026#34;name\u0026#34;: \u0026#34;orclcred\u0026#34;}]}}\u0026#39; helper -n oamns -- sleep infinity If you are not using a container registry and have loaded the image on each of the master and worker nodes, run the following command:\n$ kubectl run helper --image \u0026lt;image\u0026gt;:\u0026lt;tag\u0026gt; -n oamns -- sleep infinity For example:\n$ kubectl run helper --image oracle/oam:12.2.1.4-jdk8-ol7-\u0026lt;October`23\u0026gt; -n oamns -- sleep infinity The output will look similar to the following:\npod/helper created Run the following command to check the pod is running:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oamns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE helper 1/1 Running 0 3m Note: If you are pulling the image from a container registry it may take several minutes before the pod has a STATUS of 1\\1. While the pod is starting you can check the status of the pod, by running the following command:\n$ kubectl describe pod helper -n oamns Run the following command to start a bash shell in the helper pod:\n$ kubectl exec -it helper -n \u0026lt;domain_namespace\u0026gt; -- /bin/bash For example:\n$ kubectl exec -it helper -n oamns -- /bin/bash This will take you into a bash shell in the running helper pod:\n[oracle@helper ~]$ In the helper bash shell run the following commands to set the environment:\n[oracle@helper ~]$ export CONNECTION_STRING=\u0026lt;db_host.domain\u0026gt;:\u0026lt;db_port\u0026gt;/\u0026lt;service_name\u0026gt; [oracle@helper ~]$ export RCUPREFIX=\u0026lt;rcu_schema_prefix\u0026gt; [oracle@helper ~]$ echo -e \u0026lt;db_pwd\u0026gt;\u0026#34;\\n\u0026#34;\u0026lt;rcu_schema_pwd\u0026gt; \u0026gt; /tmp/pwd.txt [oracle@helper ~]$ cat /tmp/pwd.txt where:\n\u0026lt;db_host.domain\u0026gt;:\u0026lt;db_port\u0026gt;/\u0026lt;service_name\u0026gt;\tis your database connect string\n\u0026lt;rcu_schema_prefix\u0026gt; is the RCU schema prefix you want to set\n\u0026lt;db_pwd\u0026gt; is the SYS password for the database\n\u0026lt;rcu_schema_pwd\u0026gt; is the password you want to set for the \u0026lt;rcu_schema_prefix\u0026gt;\nFor example:\n[oracle@helper ~]$ export CONNECTION_STRING=mydatabasehost.example.com:1521/orcl.example.com [oracle@helper ~]$ export RCUPREFIX=OAMK8S [oracle@helper ~]$ echo -e \u0026lt;password\u0026gt;\u0026#34;\\n\u0026#34;\u0026lt;password\u0026gt; \u0026gt; /tmp/pwd.txt [oracle@helper ~]$ cat /tmp/pwd.txt \u0026lt;password\u0026gt; \u0026lt;password\u0026gt; In the helper bash shell run the following command to create the RCU schemas in the database:\n$ [oracle@helper ~]$ /u01/oracle/oracle_common/bin/rcu -silent -createRepository -databaseType ORACLE -connectString \\ $CONNECTION_STRING -dbUser sys -dbRole sysdba -useSamePasswordForAllSchemaUsers true \\ -selectDependentsForComponents true -schemaPrefix $RCUPREFIX -component MDS -component IAU \\ -component IAU_APPEND -component IAU_VIEWER -component OPSS -component WLS -component STB -component OAM -f \u0026lt; /tmp/pwd.txt The output will look similar to the following:\nRCU Logfile: /tmp/RCU\u0026lt;DATE\u0026gt;/logs/rcu.log Processing command line .... Repository Creation Utility - Checking Prerequisites Checking Global Prerequisites Repository Creation Utility - Checking Prerequisites Checking Component Prerequisites Repository Creation Utility - Creating Tablespaces Validating and Creating Tablespaces Create tablespaces in the repository database Repository Creation Utility - Create Repository Create in progress. Executing pre create operations Percent Complete: 18 Percent Complete: 18 Percent Complete: 19 Percent Complete: 20 Percent Complete: 21 Percent Complete: 21 Percent Complete: 22 Percent Complete: 22 Creating Common Infrastructure Services(STB) Percent Complete: 30 Percent Complete: 30 Percent Complete: 39 Percent Complete: 39 Percent Complete: 39 Creating Audit Services Append(IAU_APPEND) Percent Complete: 46 Percent Complete: 46 Percent Complete: 55 Percent Complete: 55 Percent Complete: 55 Creating Audit Services Viewer(IAU_VIEWER) Percent Complete: 62 Percent Complete: 62 Percent Complete: 63 Percent Complete: 63 Percent Complete: 64 Percent Complete: 64 Creating Metadata Services(MDS) Percent Complete: 73 Percent Complete: 73 Percent Complete: 73 Percent Complete: 74 Percent Complete: 74 Percent Complete: 75 Percent Complete: 75 Percent Complete: 75 Creating Weblogic Services(WLS) Percent Complete: 80 Percent Complete: 80 Percent Complete: 83 Percent Complete: 83 Percent Complete: 91 Percent Complete: 98 Percent Complete: 98 Creating Audit Services(IAU) Percent Complete: 100 Creating Oracle Platform Security Services(OPSS) Creating Oracle Access Manager(OAM) Executing post create operations Repository Creation Utility: Create - Completion Summary Database details: ----------------------------- Host Name : mydatabasehost.example.com Port : 1521 Service Name : ORCL.EXAMPLE.COM Connected As : sys Prefix for (prefixable) Schema Owners : OAMK8S RCU Logfile : /tmp/RCU\u0026lt;DATE\u0026gt;/logs/rcu.log Component schemas created: ----------------------------- Component Status Logfile Common Infrastructure Services Success /tmp/RCU\u0026lt;DATE\u0026gt;/logs/stb.log Oracle Platform Security Services Success /tmp/RCU\u0026lt;DATE\u0026gt;/logs/opss.log Oracle Access Manager Success /tmp/RCU\u0026lt;DATE\u0026gt;/logs/oam.log Audit Services Success /tmp/RCU\u0026lt;DATE\u0026gt;/logs/iau.log Audit Services Append Success /tmp/RCU\u0026lt;DATE\u0026gt;/logs/iau_append.log Audit Services Viewer Success /tmp/RCU\u0026lt;DATE\u0026gt;/logs/iau_viewer.log Metadata Services Success /tmp/RCU\u0026lt;DATE\u0026gt;/logs/mds.log WebLogic Services Success /tmp/RCU\u0026lt;DATE\u0026gt;/logs/wls.log Repository Creation Utility - Create : Operation Completed [oracle@helper ~]$ Exit the helper bash shell by issuing the command exit.\n Preparing the environment for domain creation In this section you prepare the environment for the OAM domain creation. This involves the following steps:\na. Creating Kubernetes secrets for the domain and RCU\nb. Create a Kubernetes persistent volume and persistent volume claim\nCreating Kubernetes secrets for the domain and RCU Create a Kubernetes secret for the domain using the create-weblogic-credentials script in the same Kubernetes namespace as the domain:\n$ cd $WORKDIR/kubernetes/create-weblogic-domain-credentials $ ./create-weblogic-credentials.sh -u weblogic -p \u0026lt;pwd\u0026gt; -n \u0026lt;domain_namespace\u0026gt; -d \u0026lt;domain_uid\u0026gt; -s \u0026lt;kubernetes_domain_secret\u0026gt; where:\n-u weblogic is the WebLogic username\n-p \u0026lt;pwd\u0026gt; is the password for the weblogic user\n-n \u0026lt;domain_namespace\u0026gt; is the domain namespace\n-d \u0026lt;domain_uid\u0026gt; is the domain UID to be created. The default is domain1 if not specified\n-s \u0026lt;kubernetes_domain_secret\u0026gt; is the name you want to create for the secret for this namespace. The default is to use the domainUID if not specified\nFor example:\n$ cd $WORKDIR/kubernetes/create-weblogic-domain-credentials $ ./create-weblogic-credentials.sh -u weblogic -p \u0026lt;password\u0026gt; -n oamns -d accessdomain -s accessdomain-credentials The output will look similar to the following:\nsecret/accessdomain-credentials created secret/accessdomain-credentials labeled The secret accessdomain-credentials has been successfully created in the oamns namespace. Verify the secret is created using the following command:\n$ kubectl get secret \u0026lt;kubernetes_domain_secret\u0026gt; -o yaml -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get secret accessdomain-credentials -o yaml -n oamns The output will look similar to the following:\napiVersion: v1 data: password: V2VsY29tZTE= username: d2VibG9naWM= kind: Secret metadata: creationTimestamp: \u0026quot;\u0026lt;DATE\u0026gt;\u0026quot; labels: weblogic.domainName: accessdomain weblogic.domainUID: accessdomain name: accessdomain-credentials namespace: oamns resourceVersion: \u0026quot;29428101\u0026quot; uid: 6dac0561-d157-4144-9ed7-c475a080eb3a type: Opaque Create a Kubernetes secret for RCU using the create-weblogic-credentials script in the same Kubernetes namespace as the domain:\n$ cd $WORKDIR/kubernetes/create-rcu-credentials $ ./create-rcu-credentials.sh -u \u0026lt;rcu_prefix\u0026gt; -p \u0026lt;rcu_schema_pwd\u0026gt; -a sys -q \u0026lt;sys_db_pwd\u0026gt; -d \u0026lt;domain_uid\u0026gt; -n \u0026lt;domain_namespace\u0026gt; -s \u0026lt;kubernetes_rcu_secret\u0026gt; where:\n-u \u0026lt;rcu_prefix\u0026gt; is the name of the RCU schema prefix created previously\n-p \u0026lt;rcu_schema_pwd\u0026gt; is the password for the RCU schema prefix\n-q \u0026lt;sys_db_pwd\u0026gt; is the sys database password\n-d \u0026lt;domain_uid\u0026gt; is the domain_uid that you created earlier\n-n \u0026lt;domain_namespace\u0026gt; is the domain namespace\n-s \u0026lt;kubernetes_rcu_secret\u0026gt; is the name of the rcu secret to create\nFor example:\n$ cd $WORKDIR/kubernetes/create-rcu-credentials $ ./create-rcu-credentials.sh -u OAMK8S -p \u0026lt;password\u0026gt; -a sys -q \u0026lt;password\u0026gt; -d accessdomain -n oamns -s accessdomain-rcu-credentials The output will look similar to the following:\nsecret/accessdomain-rcu-credentials created secret/accessdomain-rcu-credentials labeled The secret accessdomain-rcu-credentials has been successfully created in the oamns namespace. Verify the secret is created using the following command:\n$ kubectl get secret \u0026lt;kubernetes_rcu_secret\u0026gt; -o yaml -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get secret accessdomain-rcu-credentials -o yaml -n oamns The output will look similar to the following:\napiVersion: v1 data: password: T3JhY2xlXzEyMw== sys_password: T3JhY2xlXzEyMw== sys_username: c3lz username: T0FNSzhT kind: Secret metadata: creationTimestamp: \u0026quot;\u0026lt;DATE\u0026gt;\u0026quot; labels: weblogic.domainName: accessdomain weblogic.domainUID: accessdomain name: accessdomain-rcu-credentials namespace: oamns resourceVersion: \u0026quot;29428242\u0026quot; uid: 1b81b6e0-fd7d-40b8-a060-454c8d23f4dc type: Opaque Create a Kubernetes persistent volume and persistent volume claim As referenced in Prerequisites the nodes in the Kubernetes cluster must have access to a persistent volume such as a Network File System (NFS) mount or a shared file system.\nA persistent volume is the same as a disk mount but is inside a container. A Kubernetes persistent volume is an arbitrary name (determined in this case, by Oracle) that is mapped to a physical volume on a disk.\nWhen a container is started, it needs to mount that volume. The physical volume should be on a shared disk accessible by all the Kubernetes worker nodes because it is not known on which worker node the container will be started. In the case of Identity and Access Management, the persistent volume does not get erased when a container stops. This enables persistent configurations.\nThe example below uses an NFS mounted volume (\u0026lt;persistent_volume\u0026gt;/accessdomainpv). Other volume types can also be used. See the official Kubernetes documentation for Volumes.\nNote: The persistent volume directory needs to be accessible to both the master and worker node(s). In this example /scratch/shared/accessdomainpv is accessible from all nodes via NFS.\nTo create a Kubernetes persistent volume, perform the following steps:\n Make a backup copy of the create-pv-pvc-inputs.yaml file and create required directories:\n$ cd $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc $ cp create-pv-pvc-inputs.yaml create-pv-pvc-inputs.yaml.orig $ mkdir output $ mkdir -p \u0026lt;persistent_volume\u0026gt;/accessdomainpv $ sudo chown -R 1000:0 \u0026lt;persistent_volume\u0026gt;/accessdomainpv For example:\n$ cd $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc $ cp create-pv-pvc-inputs.yaml create-pv-pvc-inputs.yaml.orig $ mkdir output $ mkdir -p /scratch/shared/accessdomainpv $ sudo chown -R 1000:0 /scratch/shared/accessdomainpv On the master node run the following command to ensure it is possible to read and write to the persistent volume:\ncd \u0026lt;persistent_volume\u0026gt;/accessdomainpv touch filemaster.txt ls filemaster.txt For example:\ncd /scratch/shared/accessdomainpv touch filemaster.txt ls filemaster.txt On the first worker node run the following to ensure it is possible to read and write to the persistent volume:\ncd /scratch/shared/accessdomainpv ls filemaster.txt touch fileworker1.txt ls fileworker1.txt Repeat the above for any other worker nodes e.g fileworker2.txt etc. Once proven that it\u0026rsquo;s possible to read and write from each node to the persistent volume, delete the files created.\n Navigate to $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc:\n$ cd $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc and edit the create-pv-pvc-inputs.yaml file and update the following parameters to reflect your settings. Save the file when complete:\nbaseName: \u0026lt;domain\u0026gt; domainUID: \u0026lt;domain_uid\u0026gt; namespace: \u0026lt;domain_namespace\u0026gt; weblogicDomainStorageType: NFS weblogicDomainStorageNFSServer: \u0026lt;nfs_server\u0026gt; weblogicDomainStoragePath: \u0026lt;physical_path_of_persistent_storage\u0026gt; weblogicDomainStorageSize: 10Gi For example:\n\t# The base name of the pv and pvc baseName: domain # Unique ID identifying a domain. # If left empty, the generated pv can be shared by multiple domains # This ID must not contain an underscope (\u0026quot;_\u0026quot;), and must be lowercase and unique across all domains in a Kubernetes cluster. domainUID: accessdomain # Name of the namespace for the persistent volume claim namespace: oamns ... # Persistent volume type for the persistent storage. # The value must be 'HOST_PATH' or 'NFS'. # If using 'NFS', weblogicDomainStorageNFSServer must be specified. weblogicDomainStorageType: NFS # The server name or ip address of the NFS server to use for the persistent storage. # The following line must be uncomment and customized if weblogicDomainStorateType is NFS: weblogicDomainStorageNFSServer: mynfsserver # Physical path of the persistent storage. # When weblogicDomainStorageType is set to HOST_PATH, this value should be set the to path to the # domain storage on the Kubernetes host. # When weblogicDomainStorageType is set to NFS, then weblogicDomainStorageNFSServer should be set # to the IP address or name of the DNS server, and this value should be set to the exported path # on that server. # Note that the path where the domain is mounted in the WebLogic containers is not affected by this # setting, that is determined when you create your domain. # The following line must be uncomment and customized: weblogicDomainStoragePath: /scratch/shared/accessdomainpv # Reclaim policy of the persistent storage # The valid values are: 'Retain', 'Delete', and 'Recycle' weblogicDomainStorageReclaimPolicy: Retain # Total storage allocated to the persistent storage. weblogicDomainStorageSize: 10Gi Execute the create-pv-pvc.sh script to create the PV and PVC configuration files:\n$ ./create-pv-pvc.sh -i create-pv-pvc-inputs.yaml -o output The output will be similar to the following:\nInput parameters being used export version=\u0026quot;create-weblogic-sample-domain-pv-pvc-inputs-v1\u0026quot; export baseName=\u0026quot;domain\u0026quot; export domainUID=\u0026quot;accessdomain\u0026quot; export namespace=\u0026quot;oamns\u0026quot; export weblogicDomainStorageType=\u0026quot;NFS\u0026quot; export weblogicDomainStorageNFSServer=\u0026quot;mynfsserver\u0026quot; export weblogicDomainStoragePath=\u0026quot;/scratch/shared/accessdomainpv\u0026quot; export weblogicDomainStorageReclaimPolicy=\u0026quot;Retain\u0026quot; export weblogicDomainStorageSize=\u0026quot;10Gi\u0026quot; Generating output/pv-pvcs/accessdomain-domain-pv.yaml Generating output/pv-pvcs/accessdomain-domain-pvc.yaml The following files were generated: output/pv-pvcs/accessdomain-domain-pv.yaml.yaml output/pv-pvcs/accessdomain-domain-pvc.yaml Run the following to show the files are created:\n$ ls output/pv-pvcs accessdomain-domain-pv.yaml accessdomain-domain-pvc.yaml create-pv-pvc-inputs.yaml Run the following kubectl command to create the PV and PVC in the domain namespace:\n$ kubectl create -f output/pv-pvcs/accessdomain-domain-pv.yaml -n \u0026lt;domain_namespace\u0026gt; $ kubectl create -f output/pv-pvcs/accessdomain-domain-pvc.yaml -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl create -f output/pv-pvcs/accessdomain-domain-pv.yaml -n oamns $ kubectl create -f output/pv-pvcs/accessdomain-domain-pvc.yaml -n oamns The output will look similar to the following:\npersistentvolume/accessdomain-domain-pv created persistentvolumeclaim/accessdomain-domain-pvc created Run the following commands to verify the PV and PVC were created successfully:\n$ kubectl describe pv \u0026lt;pv_name\u0026gt; $ kubectl describe pvc \u0026lt;pvc_name\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl describe pv accessdomain-domain-pv $ kubectl describe pvc accessdomain-domain-pvc -n oamns The output will look similar to the following:\n$ kubectl describe pv accessdomain-domain-pv Name: accessdomain-domain-pv Labels: weblogic.domainUID=accessdomain Annotations: pv.kubernetes.io/bound-by-controller: yes Finalizers: [kubernetes.io/pv-protection] StorageClass: accessdomain-domain-storage-class Status: Bound Claim: oamns/accessdomain-domain-pvc Reclaim Policy: Retain Access Modes: RWX VolumeMode: Filesystem Capacity: 10Gi Node Affinity: \u0026lt;none\u0026gt; Message: Source: Type: NFS (an NFS mount that lasts the lifetime of a pod) Server: mynfsserver Path: /scratch/shared/accessdomainpv ReadOnly: false Events: \u0026lt;none\u0026gt; $ kubectl describe pvc accessdomain-domain-pvc -n oamns Name: accessdomain-domain-pvc Namespace: oamns StorageClass: accessdomain-domain-storage-class Status: Bound Volume: accessdomain-domain-pv Labels: weblogic.domainUID=accessdomain Annotations: pv.kubernetes.io/bind-completed: yes pv.kubernetes.io/bound-by-controller: yes Finalizers: [kubernetes.io/pvc-protection] Capacity: 10Gi Access Modes: RWX VolumeMode: Filesystem Events: \u0026lt;none\u0026gt; Mounted By: \u0026lt;none\u0026gt; You are now ready to create the OAM domain as per Create OAM Domains.\n " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oig/prepare-your-environment/", + "title": "Prepare your environment", + "tags": [], + "description": "Preparation to deploy OIG on Kubernetes", + "content": "To prepare for Oracle Identity Governance deployment in a Kubernetes environment, complete the following steps:\n Check the Kubernetes cluster is ready\n Obtain the OIG container image\n Setup the code repository to deploy OIG domains\n Install the WebLogic Kubernetes Operator\n Create a namespace for Oracle Identity Governance\n Create a Kubernetes secret for the container registry\n RCU schema creation\n Preparing the environment for domain creation\na. Creating Kubernetes secrets for the domain and RCU\nb. Create a Kubernetes persistent volume and persistent volume claim\n Check the Kubernetes cluster is ready As per the Prerequisites a Kubernetes cluster should have already been configured.\n Run the following command on the master node to check the cluster and worker nodes are running:\n$ kubectl get nodes,pods -n kube-system The output will look similar to the following:\nNAME STATUS ROLES AGE VERSION node/worker-node1 Ready \u0026lt;none\u0026gt; 17h v1.26.6+1.el8 node/worker-node2 Ready \u0026lt;none\u0026gt; 17h v1.26.6+1.el8 node/master-node Ready master 23h v1.26.6+1.el8 NAME READY STATUS RESTARTS AGE pod/coredns-66bff467f8-fnhbq 1/1 Running 0 23h pod/coredns-66bff467f8-xtc8k 1/1 Running 0 23h pod/etcd-master 1/1 Running 0 21h pod/kube-apiserver-master-node 1/1 Running 0 21h pod/kube-controller-manager-master-node 1/1 Running 0 21h pod/kube-flannel-ds-amd64-lxsfw 1/1 Running 0 17h pod/kube-flannel-ds-amd64-pqrqr 1/1 Running 0 17h pod/kube-flannel-ds-amd64-wj5nh 1/1 Running 0 17h pod/kube-proxy-2kxv2 1/1 Running 0 17h pod/kube-proxy-82vvj 1/1 Running 0 17h pod/kube-proxy-nrgw9 1/1 Running 0 23h pod/kube-scheduler-master 1/1 Running 0 21h Obtain the OIG container image The OIG Kubernetes deployment requires access to an OIG container image. The image can be obtained in the following ways:\n Prebuilt OIG container image Build your own OIG container image using WebLogic Image Tool Prebuilt OIG container image The latest prebuilt OIG October 2023 container image can be downloaded from Oracle Container Registry. This image is prebuilt by Oracle and includes Oracle Identity Governance 12.2.1.4.0, the October Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program..\nNote: Before using this image you must login to Oracle Container Registry, navigate to Middleware \u0026gt; oig_cpu and accept the license agreement.\nYou can use this image in the following ways:\n Pull the container image from the Oracle Container Registry automatically during the OIG Kubernetes deployment. Manually pull the container image from the Oracle Container Registry and then upload it to your own container registry. Manually pull the container image from the Oracle Container Registry and manually stage it on the master node and each worker node. Build your own OIG container image using WebLogic Image Tool You can build your own OIG container image using the WebLogic Image Tool. This is recommended if you need to apply one off patches to a Prebuilt OIG container image. For more information about building your own container image with WebLogic Image Tool, see Create or update image.\nYou can use an image built with WebLogic Image Tool in the following ways:\n Manually upload them to your own container registry. Manually stage them on the master node and each worker node. Note: This documentation does not tell you how to pull or push the above images into a private container registry, or stage them on the master and worker nodes. Details of this can be found in the Enterprise Deployment Guide.\nSetup the code repository to deploy OIG domains Oracle Identity Governance domain deployment on Kubernetes leverages the WebLogic Kubernetes Operator infrastructure. For deploying the OIG domains, you need to set up the deployment scripts on the master node as below:\n Create a working directory to setup the source code.\n$ mkdir \u0026lt;workdir\u0026gt; For example:\n$ mkdir /scratch/OIGK8S Download the latest OIG deployment scripts from the OIG repository.\n$ cd \u0026lt;workdir\u0026gt; $ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1 For example:\n$ cd /scratch/OIGK8S $ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1 Set the $WORKDIR environment variable as follows:\n$ export WORKDIR=\u0026lt;workdir\u0026gt;/fmw-kubernetes/OracleIdentityGovernance For example:\n$ export WORKDIR=/scratch/OIGK8S/fmw-kubernetes/OracleIdentityGovernance Run the following command and see if the WebLogic custom resource definition name already exists:\n$ kubectl get crd In the output you should see:\nNo resources found in default namespace. If you see any of the following:\nNAME AGE clusters.weblogic.oracle 5d domains.weblogic.oracle 5d then run the following command to delete the existing crd\u0026rsquo;s:\n$ kubectl delete crd clusters.weblogic.oracle $ kubectl delete crd domains.weblogic.oracle Install the WebLogic Kubernetes Operator On the master node run the following command to create a namespace for the operator:\n$ kubectl create namespace \u0026lt;sample-kubernetes-operator-ns\u0026gt; For example:\n$ kubectl create namespace opns The output will look similar to the following:\nnamespace/opns created Create a service account for the operator in the operator\u0026rsquo;s namespace by running the following command:\n$ kubectl create serviceaccount -n \u0026lt;sample-kubernetes-operator-ns\u0026gt; \u0026lt;sample-kubernetes-operator-sa\u0026gt; For example:\n$ kubectl create serviceaccount -n opns op-sa The output will look similar to the following:\nserviceaccount/op-sa created Run the following helm command to install and start the operator:\n$ cd $WORKDIR $ helm install weblogic-kubernetes-operator kubernetes/charts/weblogic-operator \\ --namespace \u0026lt;sample-kubernetes-operator-ns\u0026gt; \\ --set image=ghcr.io/oracle/weblogic-kubernetes-operator:4.1.2 \\ --set serviceAccount=\u0026lt;sample-kubernetes-operator-sa\u0026gt; \\ --set “enableClusterRoleBinding=true” \\ --set \u0026#34;domainNamespaceSelectionStrategy=LabelSelector\u0026#34; \\ --set \u0026#34;domainNamespaceLabelSelector=weblogic-operator\\=enabled\u0026#34; \\ --set \u0026#34;javaLoggingLevel=FINE\u0026#34; --wait For example:\n$ cd $WORKDIR $ helm install weblogic-kubernetes-operator kubernetes/charts/weblogic-operator \\ --namespace opns \\ --set image=ghcr.io/oracle/weblogic-kubernetes-operator:4.1.2 \\ --set serviceAccount=op-sa \\ --set \u0026#34;enableClusterRoleBinding=true\u0026#34; \\ --set \u0026#34;domainNamespaceSelectionStrategy=LabelSelector\u0026#34; \\ --set \u0026#34;domainNamespaceLabelSelector=weblogic-operator\\=enabled\u0026#34; \\ --set \u0026#34;javaLoggingLevel=FINE\u0026#34; --wait The output will look similar to the following:\nNAME: weblogic-kubernetes-operator LAST DEPLOYED: \u0026lt;DATE\u0026gt; NAMESPACE: opns STATUS: deployed REVISION: 1 TEST SUITE: None Verify that the operator\u0026rsquo;s pod and services are running by executing the following command:\n$ kubectl get all -n \u0026lt;sample-kubernetes-operator-ns\u0026gt; For example:\n$ kubectl get all -n opns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE pod/weblogic-operator-b7d6df78c-vxnpt 1/1 Running 0 33s pod/weblogic-operator-webhook-7996b8b58b-68l8s 1/1 Running 0 33s NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/weblogic-operator-webhook-svc ClusterIP 10.109.163.130 \u0026lt;none\u0026gt; 8083/TCP,8084/TCP 34s NAME READY UP-TO-DATE AVAILABLE AGE deployment.apps/weblogic-operator 1/1 1 1 33s deployment.apps/weblogic-operator-webhook 1/1 1 1 33s NAME DESIRED CURRENT READY AGE replicaset.apps/weblogic-operator-b7d6df78c 1 1 1 33s replicaset.apps/weblogic-operator-webhook-7996b8b58b 1 1 1 33s Verify the operator pod\u0026rsquo;s log:\n$ kubectl logs -n \u0026lt;sample-kubernetes-operator-ns\u0026gt; -c weblogic-operator deployments/weblogic-operator For example:\n$ kubectl logs -n opns -c weblogic-operator deployments/weblogic-operator The output will look similar to the following:\n{\u0026quot;timestamp\u0026quot;:\u0026quot;\u0026lt;DATE\u0026gt;\u0026quot;,\u0026quot;thread\u0026quot;:37,\u0026quot;fiber\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;namespace\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;domainUID\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;level\u0026quot;:\u0026quot;FINE\u0026quot;,\u0026quot;class\u0026quot;:\u0026quot;oracle.kubernetes.operator.DeploymentLiveness\u0026quot;,\u0026quot;method\u0026quot;:\u0026quot;run\u0026quot;,\u0026quot;timeInMillis\u0026quot;:1678902295852,\u0026quot;message\u0026quot;:\u0026quot;Liveness file last modified time set\u0026quot;,\u0026quot;exception\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;code\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;headers\u0026quot;:{},\u0026quot;body\u0026quot;:\u0026quot;\u0026quot;} {\u0026quot;timestamp\u0026quot;:\u0026quot;\u0026lt;DATE\u0026gt;\u0026quot;,\u0026quot;thread\u0026quot;:42,\u0026quot;fiber\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;namespace\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;domainUID\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;level\u0026quot;:\u0026quot;FINE\u0026quot;,\u0026quot;class\u0026quot;:\u0026quot;oracle.kubernetes.operator.DeploymentLiveness\u0026quot;,\u0026quot;method\u0026quot;:\u0026quot;run\u0026quot;,\u0026quot;timeInMillis\u0026quot;:1678902300853,\u0026quot;message\u0026quot;:\u0026quot;Liveness file last modified time set\u0026quot;,\u0026quot;exception\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;code\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;headers\u0026quot;:{},\u0026quot;body\u0026quot;:\u0026quot;\u0026quot;} {\u0026quot;timestamp\u0026quot;:\u0026quot;\u0026lt;DATE\u0026gt;\u0026quot;,\u0026quot;thread\u0026quot;:21,\u0026quot;fiber\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;namespace\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;domainUID\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;level\u0026quot;:\u0026quot;FINE\u0026quot;,\u0026quot;class\u0026quot;:\u0026quot;oracle.kubernetes.operator.DeploymentLiveness\u0026quot;,\u0026quot;method\u0026quot;:\u0026quot;run\u0026quot;,\u0026quot;timeInMillis\u0026quot;:1678902305854,\u0026quot;message\u0026quot;:\u0026quot;Liveness file last modified time set\u0026quot;,\u0026quot;exception\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;code\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;headers\u0026quot;:{},\u0026quot;body\u0026quot;:\u0026quot;\u0026quot;} Create a namespace for Oracle Identity Governance Run the following command to create a namespace for the domain:\n$ kubectl create namespace \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl create namespace oigns The output will look similar to the following:\nnamespace/oigns created Run the following command to tag the namespace so the WebLogic Kubernetes Operator can manage it:\n$ kubectl label namespaces \u0026lt;domain_namespace\u0026gt; weblogic-operator=enabled For example:\n$ kubectl label namespaces oigns weblogic-operator=enabled The output will look similar to the following:\nnamespace/oigns labeled Run the following command to check the label was created:\n$ kubectl describe namespace \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl describe namespace oigns The output will look similar to the following:\nName: oigns Labels: kubernetes.io/metadata.name=oigns weblogic-operator=enabled Annotations: \u0026lt;none\u0026gt; Status: Active No resource quota. No LimitRange resource. Create a Kubernetes secret for the container registry In this section you create a secret that stores the credentials for the container registry where the OIG image is stored.\nIf you are not using a container registry and have loaded the images on each of the master and worker nodes, then there is no need to create the registry secret.\n Run the following command to create the secret:\nkubectl create secret docker-registry \u0026#34;orclcred\u0026#34; --docker-server=\u0026lt;CONTAINER_REGISTRY\u0026gt; \\ --docker-username=\u0026#34;\u0026lt;USER_NAME\u0026gt;\u0026#34; \\ --docker-password=\u0026lt;PASSWORD\u0026gt; --docker-email=\u0026lt;EMAIL_ID\u0026gt; \\ --namespace=\u0026lt;domain_namespace\u0026gt; For example, if using Oracle Container Registry:\nkubectl create secret docker-registry \u0026#34;orclcred\u0026#34; --docker-server=container-registry.oracle.com \\ --docker-username=\u0026#34;user@example.com\u0026#34; \\ --docker-password=password --docker-email=user@example.com \\ --namespace=oigns Replace \u0026lt;USER_NAME\u0026gt; and \u0026lt;PASSWORD\u0026gt; with the credentials for the registry with the following caveats:\n If using Oracle Container Registry to pull the OIG container image, this is the username and password used to login to Oracle Container Registry. Before you can use this image you must login to Oracle Container Registry, navigate to Middleware \u0026gt; oig_cpu and accept the license agreement.\n If using your own container registry to store the OIG container image, this is the username and password (or token) for your container registry.\n The output will look similar to the following:\nsecret/orclcred created RCU schema creation In this section you create the RCU schemas in the Oracle Database.\nBefore following the steps in this section, make sure that the database and listener are up and running and you can connect to the database via SQL*Plus or other client tool.\n If using Oracle Container Registry or your own container registry for your OIG container image, run the following command to create a helper pod to run RCU:\n$ kubectl run --image=\u0026lt;image_name-from-registry\u0026gt; --image-pull-policy=\u0026#34;IfNotPresent\u0026#34; --overrides=\u0026#39;{\u0026#34;apiVersion\u0026#34;: \u0026#34;v1\u0026#34;, \u0026#34;spec\u0026#34;:{\u0026#34;imagePullSecrets\u0026#34;: [{\u0026#34;name\u0026#34;: \u0026#34;orclcred\u0026#34;}]}}\u0026#39; helper -n \u0026lt;domain_namespace\u0026gt; -- sleep infinity For example:\n$ kubectl run --image=container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-\u0026lt;October`23\u0026gt; --image-pull-policy=\u0026#34;IfNotPresent\u0026#34; --overrides=\u0026#39;{\u0026#34;apiVersion\u0026#34;: \u0026#34;v1\u0026#34;,\u0026#34;spec\u0026#34;:{\u0026#34;imagePullSecrets\u0026#34;: [{\u0026#34;name\u0026#34;: \u0026#34;orclcred\u0026#34;}]}}\u0026#39; helper -n oigns -- sleep infinity If you are not using a container registry and have loaded the image on each of the master and worker nodes, run the following command:\n$ kubectl run helper --image \u0026lt;image\u0026gt; -n oigns -- sleep infinity For example:\n$ kubectl run helper --image oracle/oig:12.2.1.4-jdk8-ol7-\u0026lt;October`23\u0026gt; -n oigns -- sleep infinity The output will look similar to the following:\npod/helper created Run the following command to check the pod is running:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oigns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE helper 1/1 Running 0 3m Note: If you are pulling the image from a container registry it may take several minutes before the pod has a STATUS of 1\\1. While the pod is starting you can check the status of the pod, by running the following command:\n$ kubectl describe pod helper -n oigns Run the following command to start a bash shell in the helper pod:\n$ kubectl exec -it helper -n \u0026lt;domain_namespace\u0026gt; -- /bin/bash For example:\n$ kubectl exec -it helper -n oigns -- /bin/bash This will take you into a bash shell in the running helper pod:\n[oracle@helper oracle]$ In the helper bash shell run the following commands to set the environment:\n[oracle@helper oracle]$ export DB_HOST=\u0026lt;db_host.domain\u0026gt; [oracle@helper oracle]$ export DB_PORT=\u0026lt;db_port\u0026gt; [oracle@helper oracle]$ export DB_SERVICE=\u0026lt;service_name\u0026gt; [oracle@helper oracle]$ export RCUPREFIX=\u0026lt;rcu_schema_prefix\u0026gt; [oracle@helper oracle]$ export RCU_SCHEMA_PWD=\u0026lt;rcu_schema_pwd\u0026gt; [oracle@helper oracle]$ echo -e \u0026lt;db_pwd\u0026gt;\u0026#34;\\n\u0026#34;\u0026lt;rcu_schema_pwd\u0026gt; \u0026gt; /tmp/pwd.txt [oracle@helper oracle]$ cat /tmp/pwd.txt where:\n\u0026lt;db_host.domain\u0026gt; is the database server hostname\n\u0026lt;db_port\u0026gt; is the database listener port\n\u0026lt;service_name\u0026gt; is the database service name\n\u0026lt;rcu_schema_prefix\u0026gt; is the RCU schema prefix you want to set\n\u0026lt;rcu_schema_pwd\u0026gt; is the password you want to set for the \u0026lt;rcu_schema_prefix\u0026gt;\n\u0026lt;db_pwd\u0026gt; is the SYS password for the database\nFor example:\n[oracle@helper oracle]$ export DB_HOST=mydatabasehost.example.com [oracle@helper oracle]$ export DB_PORT=1521 [oracle@helper oracle]$ export DB_SERVICE=orcl.example.com [oracle@helper oracle]$ export RCUPREFIX=OIGK8S [oracle@helper oracle]$ export RCU_SCHEMA_PWD=\u0026lt;password\u0026gt; [oracle@helper oracle]$ echo -e \u0026lt;password\u0026gt;\u0026#34;\\n\u0026#34;\u0026lt;password\u0026gt; \u0026gt; /tmp/pwd.txt [oracle@helper oracle]$ cat /tmp/pwd.txt \u0026lt;password\u0026gt; \u0026lt;password\u0026gt; In the helper bash shell run the following commands to create the RCU schemas in the database:\n[oracle@helper oracle]$ /u01/oracle/oracle_common/bin/rcu -silent -createRepository -databaseType ORACLE -connectString \\ $DB_HOST:$DB_PORT/$DB_SERVICE -dbUser sys -dbRole sysdba -useSamePasswordForAllSchemaUsers true \\ -selectDependentsForComponents true -schemaPrefix $RCUPREFIX -component OIM -component MDS -component SOAINFRA -component OPSS \\ -f \u0026lt; /tmp/pwd.txt The output will look similar to the following:\nRCU Logfile: /tmp/RCU\u0026lt;DATE\u0026gt;/logs/rcu.log Processing command line .... Repository Creation Utility - Checking Prerequisites Checking Global Prerequisites Repository Creation Utility - Checking Prerequisites Checking Component Prerequisites Repository Creation Utility - Creating Tablespaces Validating and Creating Tablespaces Create tablespaces in the repository database Repository Creation Utility - Create Repository Create in progress. Percent Complete: 10 Executing pre create operations Percent Complete: 25 Percent Complete: 25 Percent Complete: 26 Percent Complete: 27 Percent Complete: 28 Percent Complete: 28 Percent Complete: 29 Percent Complete: 29 Creating Common Infrastructure Services(STB) Percent Complete: 36 Percent Complete: 36 Percent Complete: 44 Percent Complete: 44 Percent Complete: 44 Creating Audit Services Append(IAU_APPEND) Percent Complete: 51 Percent Complete: 51 Percent Complete: 59 Percent Complete: 59 Percent Complete: 59 Creating Audit Services Viewer(IAU_VIEWER) Percent Complete: 66 Percent Complete: 66 Percent Complete: 67 Percent Complete: 67 Percent Complete: 68 Percent Complete: 68 Creating Metadata Services(MDS) Percent Complete: 76 Percent Complete: 76 Percent Complete: 76 Percent Complete: 77 Percent Complete: 77 Percent Complete: 78 Percent Complete: 78 Percent Complete: 78 Creating Weblogic Services(WLS) Percent Complete: 82 Percent Complete: 82 Percent Complete: 83 Percent Complete: 84 Percent Complete: 86 Percent Complete: 88 Percent Complete: 88 Percent Complete: 88 Creating User Messaging Service(UCSUMS) Percent Complete: 92 Percent Complete: 92 Percent Complete: 95 Percent Complete: 95 Percent Complete: 100 Creating Audit Services(IAU) Creating Oracle Platform Security Services(OPSS) Creating SOA Infrastructure(SOAINFRA) Creating Oracle Identity Manager(OIM) Executing post create operations Repository Creation Utility: Create - Completion Summary Database details: ----------------------------- Host Name : mydatabasehost.example.com Port : 1521 Service Name : ORCL.EXAMPLE.COM Connected As : sys Prefix for (prefixable) Schema Owners : OIGK8S RCU Logfile : /tmp/RCU\u0026lt;DATE\u0026gt;/logs/rcu.log Component schemas created: ----------------------------- Component Status Logfile Common Infrastructure Services Success /tmp/RCU\u0026lt;DATE\u0026gt;/logs/stb.log Oracle Platform Security Services Success /tmp/RCU\u0026lt;DATE\u0026gt;/logs/opss.log SOA Infrastructure Success /tmp/RCU\u0026lt;DATE\u0026gt;/logs/soainfra.log Oracle Identity Manager Success /tmp/RCU\u0026lt;DATE\u0026gt;/logs/oim.log User Messaging Service Success /tmp/RCU\u0026lt;DATE\u0026gt;/logs/ucsums.log Audit Services Success /tmp/RCU\u0026lt;DATE\u0026gt;/logs/iau.log Audit Services Append Success /tmp/RCU\u0026lt;DATE\u0026gt;/logs/iau_append.log Audit Services Viewer Success /tmp/RCU\u0026lt;DATE\u0026gt;/logs/iau_viewer.log Metadata Services Success /tmp/RCU\u0026lt;DATE\u0026gt;/logs/mds.log WebLogic Services Success /tmp/RCU\u0026lt;DATE\u0026gt;/logs/wls.log Repository Creation Utility - Create : Operation Completed [oracle@helper oracle]$ Run the following command to patch schemas in the database:\nThis command should be run if you are using an OIG image that contains OIG bundle patches. If using an OIG image without OIG bundle patches, then you can skip this step.\n [oracle@helper oracle]$ /u01/oracle/oracle_common/modules/thirdparty/org.apache.ant/1.10.5.0.0/apache-ant-1.10.5/bin/ant \\ -f /u01/oracle/idm/server/setup/deploy-files/automation.xml \\ run-patched-sql-files \\ -logger org.apache.tools.ant.NoBannerLogger \\ -logfile /u01/oracle/idm/server/bin/patch_oim_wls.log \\ -DoperationsDB.host=$DB_HOST \\ -DoperationsDB.port=$DB_PORT \\ -DoperationsDB.serviceName=$DB_SERVICE \\ -DoperationsDB.user=${RCUPREFIX}_OIM \\ -DOIM.DBPassword=$RCU_SCHEMA_PWD \\ -Dojdbc=/u01/oracle/oracle_common/modules/oracle.jdbc/ojdbc8.jar The output will look similar to the following:\nBuildfile: /u01/oracle/idm/server/setup/deploy-files/automation.xml Verify the database was patched successfully by viewing the patch_oim_wls.log:\n[oracle@helper oracle]$ cat /u01/oracle/idm/server/bin/patch_oim_wls.log The output should look similar to below:\n... [sql] Executing resource: /u01/oracle/idm/server/db/oim/oracle/Upgrade/oim12cps4/list/oim12cps4_upg_ent_trg_bkp.sql [sql] Executing resource: /u01/oracle/idm/server/db/oim/oracle/Upgrade/oim12cps4/list/oim12cps4_upg_ent_trg_fix.sql [sql] Executing resource: /u01/oracle/idm/server/db/oim/oracle/Upgrade/oim12cps4/list/oim12cps4_upg_ent_trg_restore_bkp.sql [sql] Executing resource: /u01/oracle/idm/server/db/oim/oracle/Upgrade/oim12cps4/list/oim12cps4_ddl_alter_pwr_add_column.sql [sql] 67 of 67 SQL statements executed successfully BUILD SUCCESSFUL Total time: 6 seconds Exit the helper bash shell by issuing the command exit.\n Preparing the environment for domain creation In this section you prepare the environment for the OIG domain creation. This involves the following steps:\na. Creating Kubernetes secrets for the domain and RCU\nb. Create a Kubernetes persistent volume and persistent volume claim\nCreating Kubernetes secrets for the domain and RCU Create a Kubernetes secret for the domain using the create-weblogic-credentials script in the same Kubernetes namespace as the domain:\n$ cd $WORKDIR/kubernetes/create-weblogic-domain-credentials $ ./create-weblogic-credentials.sh -u weblogic -p \u0026lt;pwd\u0026gt; -n \u0026lt;domain_namespace\u0026gt; -d \u0026lt;domain_uid\u0026gt; -s \u0026lt;kubernetes_domain_secret\u0026gt; where:\n-u weblogic is the WebLogic username\n-p \u0026lt;pwd\u0026gt; is the password for the WebLogic user\n-n \u0026lt;domain_namespace\u0026gt; is the domain namespace\n-d \u0026lt;domain_uid\u0026gt; is the domain UID to be created. The default is domain1 if not specified\n-s \u0026lt;kubernetes_domain_secret\u0026gt; is the name you want to create for the secret for this namespace. The default is to use the domainUID if not specified\nFor example:\n$ cd $WORKDIR/kubernetes/create-weblogic-domain-credentials $ ./create-weblogic-credentials.sh -u weblogic -p \u0026lt;password\u0026gt; -n oigns -d governancedomain -s oig-domain-credentials The output will look similar to the following:\nsecret/oig-domain-credentials created secret/oig-domain-credentials labeled The secret oig-domain-credentials has been successfully created in the oigns namespace. Verify the secret is created using the following command:\n$ kubectl get secret \u0026lt;kubernetes_domain_secret\u0026gt; -o yaml -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get secret oig-domain-credentials -o yaml -n oigns The output will look similar to the following:\n$ kubectl get secret oig-domain-credentials -o yaml -n oigns apiVersion: v1 data: password: V2VsY29tZTE= username: d2VibG9naWM= kind: Secret metadata: creationTimestamp: \u0026quot;\u0026lt;DATE\u0026gt;\u0026quot; labels: weblogic.domainName: governancedomain weblogic.domainUID: governancedomain name: oig-domain-credentials namespace: oigns resourceVersion: \u0026quot;3216738\u0026quot; uid: c2ec07e0-0135-458d-bceb-c648d2a9ac54 type: Opaque Create a Kubernetes secret for RCU in the same Kubernetes namespace as the domain, using the create-rcu-credentials.sh script:\n$ cd $WORKDIR/kubernetes/create-rcu-credentials $ ./create-rcu-credentials.sh -u \u0026lt;rcu_prefix\u0026gt; -p \u0026lt;rcu_schema_pwd\u0026gt; -a sys -q \u0026lt;sys_db_pwd\u0026gt; -d \u0026lt;domain_uid\u0026gt; -n \u0026lt;domain_namespace\u0026gt; -s \u0026lt;kubernetes_rcu_secret\u0026gt; where:\n-u \u0026lt;rcu_prefix\u0026gt; is the name of the RCU schema prefix created previously\n-p \u0026lt;rcu_schema_pwd\u0026gt; is the password for the RCU schema prefix\n-a \u0026lt;sys_db_user\u0026gt; is the database user with sys dba privilege\n-q \u0026lt;sys_db_pwd\u0026gt; is the sys database password\n-d \u0026lt;domain_uid\u0026gt; is the domain_uid that you created earlier\n-n \u0026lt;domain_namespace\u0026gt; is the domain namespace\n-s \u0026lt;kubernetes_rcu_secret\u0026gt; is the name of the rcu secret to create\nFor example:\n$ cd $WORKDIR/kubernetes/create-rcu-credentials $ ./create-rcu-credentials.sh -u OIGK8S -p \u0026lt;password\u0026gt; -a sys -q \u0026lt;password\u0026gt; -d governancedomain -n oigns -s oig-rcu-credentials The output will look similar to the following:\nsecret/oig-rcu-credentials created secret/oig-rcu-credentials labeled The secret oig-rcu-credentials has been successfully created in the oigns namespace. Verify the secret is created using the following command:\n$ kubectl get secret \u0026lt;kubernetes_rcu_secret\u0026gt; -o yaml -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get secret oig-rcu-credentials -o yaml -n oigns The output will look similar to the following:\napiVersion: v1 data: password: V2VsY29tZTE= sys_password: V2VsY29tZTE= sys_username: c3lz username: T0lHSzhT kind: Secret metadata: creationTimestamp: \u0026quot;\u0026lt;DATE\u0026gt;\u0026quot; labels: weblogic.domainName: governancedomain weblogic.domainUID: governancedomain name: oig-rcu-credentials namespace: oigns resourceVersion: \u0026quot;3217023\u0026quot; uid: ce70b91a-fbbc-4839-9616-4cc2c1adeb4f type: Opaque Create a Kubernetes persistent volume and persistent volume claim As referenced in Prerequisites the nodes in the Kubernetes cluster must have access to a persistent volume such as a Network File System (NFS) mount or a shared file system.\nA persistent volume is the same as a disk mount but is inside a container. A Kubernetes persistent volume is an arbitrary name (determined in this case, by Oracle) that is mapped to a physical volume on a disk.\nWhen a container is started, it needs to mount that volume. The physical volume should be on a shared disk accessible by all the Kubernetes worker nodes because it is not known on which worker node the container will be started. In the case of Identity and Access Management, the persistent volume does not get erased when a container stops. This enables persistent configurations.\nThe example below uses an NFS mounted volume (\u0026lt;persistent_volume\u0026gt;/governancedomainpv). Other volume types can also be used. See the official Kubernetes documentation for Volumes.\nNote: The persistent volume directory needs to be accessible to both the master and worker node(s). In this example /scratch/shared/governancedomainpv is accessible from all nodes via NFS.\n Make a backup copy of the create-pv-pvc-inputs.yaml file and create required directories:\n$ cd $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc $ cp create-pv-pvc-inputs.yaml create-pv-pvc-inputs.yaml.orig $ mkdir output $ mkdir -p \u0026lt;persistent_volume\u0026gt;/governancedomainpv $ sudo chown -R 1000:0 \u0026lt;persistent_volume\u0026gt;/governancedomainpv For example:\n$ cd $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc $ cp create-pv-pvc-inputs.yaml create-pv-pvc-inputs.yaml.orig $ mkdir output $ mkdir -p /scratch/shared/governancedomainpv $ sudo chown -R 1000:0 /scratch/shared/governancedomainpv On the master node run the following command to ensure it is possible to read and write to the persistent volume:\ncd \u0026lt;persistent_volume\u0026gt;/governancedomainpv touch file.txt ls filemaster.txt For example:\ncd /scratch/shared/governancedomainpv touch filemaster.txt ls filemaster.txt On the first worker node run the following to ensure it is possible to read and write to the persistent volume:\ncd /scratch/shared/governancedomainpv ls filemaster.txt touch fileworker1.txt ls fileworker1.txt Repeat the above for any other worker nodes e.g fileworker2.txt etc. Once proven that it\u0026rsquo;s possible to read and write from each node to the persistent volume, delete the files created.\n Navigate to $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc:\n$ cd $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc and edit the create-pv-pvc-inputs.yaml file and update the following parameters to reflect your settings. Save the file when complete:\nbaseName: \u0026lt;domain\u0026gt; domainUID: \u0026lt;domain_uid\u0026gt; namespace: \u0026lt;domain_namespace\u0026gt; weblogicDomainStorageType: NFS weblogicDomainStorageNFSServer: \u0026lt;nfs_server\u0026gt; weblogicDomainStoragePath: \u0026lt;physical_path_of_persistent_storage\u0026gt; weblogicDomainStorageSize: 10Gi For example:\n# The base name of the pv and pvc baseName: domain # Unique ID identifying a domain. # If left empty, the generated pv can be shared by multiple domains # This ID must not contain an underscope (\u0026quot;_\u0026quot;), and must be lowercase and unique across all domains in a Kubernetes cluster. domainUID: governancedomain # Name of the namespace for the persistent volume claim namespace: oigns # Persistent volume type for the persistent storage. # The value must be 'HOST_PATH' or 'NFS'. # If using 'NFS', weblogicDomainStorageNFSServer must be specified. weblogicDomainStorageType: NFS # The server name or ip address of the NFS server to use for the persistent storage. # The following line must be uncomment and customized if weblogicDomainStorateType is NFS: weblogicDomainStorageNFSServer: mynfsserver # Physical path of the persistent storage. # When weblogicDomainStorageType is set to HOST_PATH, this value should be set the to path to the # domain storage on the Kubernetes host. # When weblogicDomainStorageType is set to NFS, then weblogicDomainStorageNFSServer should be set # to the IP address or name of the DNS server, and this value should be set to the exported path # on that server. # Note that the path where the domain is mounted in the WebLogic containers is not affected by this # setting, that is determined when you create your domain. # The following line must be uncomment and customized: weblogicDomainStoragePath: /scratch/shared/governancedomainpv # Reclaim policy of the persistent storage # The valid values are: 'Retain', 'Delete', and 'Recycle' weblogicDomainStorageReclaimPolicy: Retain # Total storage allocated to the persistent storage. weblogicDomainStorageSize: 10Gi Execute the create-pv-pvc.sh script to create the PV and PVC configuration files:\n$ ./create-pv-pvc.sh -i create-pv-pvc-inputs.yaml -o output The output will be similar to the following:\nInput parameters being used export version=\u0026quot;create-weblogic-sample-domain-pv-pvc-inputs-v1\u0026quot; export baseName=\u0026quot;domain\u0026quot; export domainUID=\u0026quot;governancedomain\u0026quot; export namespace=\u0026quot;oigns\u0026quot; export weblogicDomainStorageType=\u0026quot;NFS\u0026quot; export weblogicDomainStorageNFSServer=\u0026quot;mynfsserver\u0026quot; export weblogicDomainStoragePath=\u0026quot;/scratch/shared/governancedomainpv\u0026quot; export weblogicDomainStorageReclaimPolicy=\u0026quot;Retain\u0026quot; export weblogicDomainStorageSize=\u0026quot;10Gi\u0026quot; Generating output/pv-pvcs/governancedomain-domain-pv.yaml Generating output/pv-pvcs/governancedomain-domain-pvc.yaml The following files were generated: output/pv-pvcs/governancedomain-domain-pv.yaml output/pv-pvcs/governancedomain-domain-pvc.yaml Completed Run the following to show the files are created:\n$ ls output/pv-pvcs create-pv-pvc-inputs.yaml governancedomain-domain-pv.yaml governancedomain-domain-pvc.yaml Run the following kubectl command to create the PV and PVC in the domain namespace:\n$ kubectl create -f output/pv-pvcs/governancedomain-domain-pv.yaml -n \u0026lt;domain_namespace\u0026gt; $ kubectl create -f output/pv-pvcs/governancedomain-domain-pvc.yaml -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl create -f output/pv-pvcs/governancedomain-domain-pv.yaml -n oigns $ kubectl create -f output/pv-pvcs/governancedomain-domain-pvc.yaml -n oigns The output will look similar to the following:\npersistentvolume/governancedomain-domain-pv created persistentvolumeclaim/governancedomain-domain-pvc created Run the following commands to verify the PV and PVC were created successfully:\n$ kubectl describe pv \u0026lt;pv_name\u0026gt; $ kubectl describe pvc \u0026lt;pvc_name\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl describe pv governancedomain-domain-pv $ kubectl describe pvc governancedomain-domain-pvc -n oigns The output will look similar to the following:\n$ kubectl describe pv governancedomain-domain-pv Name: governancedomain-domain-pv Labels: weblogic.domainUID=governancedomain Annotations: pv.kubernetes.io/bound-by-controller: yes Finalizers: [kubernetes.io/pv-protection] StorageClass: governancedomain-domain-storage-class Status: Bound Claim: oigns/governancedomain-domain-pvc Reclaim Policy: Retain Access Modes: RWX VolumeMode: Filesystem Capacity: 10Gi Node Affinity: \u0026lt;none\u0026gt; Message: Source: Type: NFS (an NFS mount that lasts the lifetime of a pod) Server: mynfsserver Path: /scratch/shared/governancedomainpv ReadOnly: false Events: \u0026lt;none\u0026gt; $ kubectl describe pvc governancedomain-domain-pvc -n oigns Name: governancedomain-domain-pvc Namespace: oigns StorageClass: governancedomain-domain-storage-class Status: Bound Volume: governancedomain-domain-pv Labels: weblogic.domainUID=governancedomain Annotations: pv.kubernetes.io/bind-completed: yes pv.kubernetes.io/bound-by-controller: yes Finalizers: [kubernetes.io/pvc-protection] Capacity: 10Gi Access Modes: RWX VolumeMode: Filesystem Mounted By: \u0026lt;none\u0026gt; Events: \u0026lt;none\u0026gt; You are now ready to create the OIG domain as per Create OIG Domains\n " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oam/create-oam-domains/", + "title": "Create OAM domains", + "tags": [], + "description": "Sample for creating an OAM domain home on an existing PV or PVC, and the domain resource YAML file for deploying the generated OAM domain.", + "content": " Introduction\n Prerequisites\n Prepare the create domain script\n Run the create domain script\n Set the OAM server memory parameters\n Initializing the domain\n Verify the results\na. Verify the domain, pods and services\nb. Verify the domain\nc. Verify the pods\n Introduction The OAM deployment scripts demonstrate the creation of an OAM domain home on an existing Kubernetes persistent volume (PV) and persistent volume claim (PVC). The scripts also generate the domain YAML file, which can then be used to start the Kubernetes artifacts of the corresponding domain.\nPrerequisites Before you begin, perform the following steps:\n Review the Domain resource documentation. Ensure that you have executed all the preliminary steps documented in Prepare your environment. Ensure that the database is up and running. Prepare the create domain script The sample scripts for Oracle Access Management domain deployment are available at $WORKDIR/kubernetes/create-access-domain.\n Make a copy of the create-domain-inputs.yaml file:\n$ cd $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv $ cp create-domain-inputs.yaml create-domain-inputs.yaml.orig Edit the create-domain-inputs.yaml and modify the following parameters. Save the file when complete:\ndomainUID: \u0026lt;domain_uid\u0026gt; domainHome: /u01/oracle/user_projects/domains/\u0026lt;domain_uid\u0026gt; image: \u0026lt;image_name\u0026gt;:\u0026lt;tag\u0026gt; imagePullSecretName: \u0026lt;container_registry_secret\u0026gt; weblogicCredentialsSecretName: \u0026lt;kubernetes_domain_secret\u0026gt; logHome: /u01/oracle/user_projects/domains/logs/\u0026lt;domain_uid\u0026gt; namespace: \u0026lt;domain_namespace\u0026gt; persistentVolumeClaimName: \u0026lt;pvc_name\u0026gt; rcuSchemaPrefix: \u0026lt;rcu_prefix\u0026gt; rcuDatabaseURL: \u0026lt;rcu_db_host\u0026gt;:\u0026lt;rcu_db_port\u0026gt;/\u0026lt;rcu_db_service_name\u0026gt; rcuCredentialsSecret: \u0026lt;kubernetes_rcu_secret\u0026gt; For example:\ndomainUID: accessdomain domainHome: /u01/oracle/user_projects/domains/accessdomain image: container-registry.oracle.com/middleware/oam_cpu:12.2.1.4-jdk8-ol7-\u0026lt;October`23\u0026gt; imagePullSecretName: orclcred weblogicCredentialsSecretName: accessdomain-credentials logHome: /u01/oracle/user_projects/domains/logs/accessdomain namespace: oamns persistentVolumeClaimName: accessdomain-domain-pvc rcuSchemaPrefix: OAMK8S rcuDatabaseURL: mydatabasehost.example.com:1521/orcl.example.com rcuCredentialsSecret: accessdomain-rcu-credentials A full list of parameters in the create-domain-inputs.yaml file are shown below:\n Parameter Definition Default adminPort Port number for the Administration Server inside the Kubernetes cluster. 7001 adminNodePort Port number of the Administration Server outside the Kubernetes cluster. 30701 adminServerName Name of the Administration Server. AdminServer clusterName Name of the WebLogic cluster instance to generate for the domain. By default the cluster name is oam_cluster for the OAM domain. oam_cluster configuredManagedServerCount Number of Managed Server instances to generate for the domain. 5 createDomainFilesDir Directory on the host machine to locate all the files to create a WebLogic domain, including the script that is specified in the createDomainScriptName property. By default, this directory is set to the relative path wlst, and the create script will use the built-in WLST offline scripts in the wlst directory to create the WebLogic domain. It can also be set to the relative path wdt, and then the built-in WDT scripts will be used instead. An absolute path is also supported to point to an arbitrary directory in the file system. The built-in scripts can be replaced by the user-provided scripts or model files as long as those files are in the specified directory. Files in this directory are put into a Kubernetes config map, which in turn is mounted to the createDomainScriptsMountPath, so that the Kubernetes pod can use the scripts and supporting files to create a domain home. wlst createDomainScriptsMountPath Mount path where the create domain scripts are located inside a pod. The create-domain.sh script creates a Kubernetes job to run the script (specified in the createDomainScriptName property) in a Kubernetes pod to create a domain home. Files in the createDomainFilesDir directory are mounted to this location in the pod, so that the Kubernetes pod can use the scripts and supporting files to create a domain home. /u01/weblogic createDomainScriptName Script that the create domain script uses to create a WebLogic domain. The create-domain.sh script creates a Kubernetes job to run this script to create a domain home. The script is located in the in-pod directory that is specified in the createDomainScriptsMountPath property. If you need to provide your own scripts to create the domain home, instead of using the built-it scripts, you must use this property to set the name of the script that you want the create domain job to run. create-domain-job.sh domainHome Home directory of the OAM domain. If not specified, the value is derived from the domainUID as /shared/domains/\u0026lt;domainUID\u0026gt;. /u01/oracle/user_projects/domains/accessdomain domainPVMountPath Mount path of the domain persistent volume. /u01/oracle/user_projects/domains domainUID Unique ID that will be used to identify this particular domain. Used as the name of the generated WebLogic domain as well as the name of the Kubernetes domain resource. This ID must be unique across all domains in a Kubernetes cluster. This ID cannot contain any character that is not valid in a Kubernetes service name. accessdomain domainType Type of the domain. Mandatory input for OAM domains. You must provide one of the supported domain type value: oam (deploys an OAM domain) oam exposeAdminNodePort Boolean indicating if the Administration Server is exposed outside of the Kubernetes cluster. false exposeAdminT3Channel Boolean indicating if the T3 administrative channel is exposed outside the Kubernetes cluster. true image OAM container image. The operator requires OAM 12.2.1.4. Refer to Obtain the OAM container image for details on how to obtain or create the image. oracle/oam:12.2.1.4.0 imagePullPolicy WebLogic container image pull policy. Legal values are IfNotPresent, Always, or Never IfNotPresent imagePullSecretName Name of the Kubernetes secret to access the container registry to pull the OAM container image. The presence of the secret will be validated when this parameter is specified. includeServerOutInPodLog Boolean indicating whether to include the server .out to the pod\u0026rsquo;s stdout. true initialManagedServerReplicas Number of Managed Servers to initially start for the domain. 2 javaOptions Java options for starting the Administration Server and Managed Servers. A Java option can have references to one or more of the following pre-defined variables to obtain WebLogic domain information: $(DOMAIN_NAME), $(DOMAIN_HOME), $(ADMIN_NAME), $(ADMIN_PORT), and $(SERVER_NAME). -Dweblogic.StdoutDebugEnabled=false logHome The in-pod location for the domain log, server logs, server out, and Node Manager log files. If not specified, the value is derived from the domainUID as /shared/logs/\u0026lt;domainUID\u0026gt;. /u01/oracle/user_projects/domains/logs/accessdomain managedServerNameBase Base string used to generate Managed Server names. oam_server managedServerPort Port number for each Managed Server. 8001 namespace Kubernetes namespace in which to create the domain. accessns persistentVolumeClaimName Name of the persistent volume claim created to host the domain home. If not specified, the value is derived from the domainUID as \u0026lt;domainUID\u0026gt;-weblogic-sample-pvc. accessdomain-domain-pvc productionModeEnabled Boolean indicating if production mode is enabled for the domain. true serverStartPolicy Determines which WebLogic Server instances will be started. Legal values are Never, IfNeeded, AdminOnly. IfNeeded t3ChannelPort Port for the T3 channel of the NetworkAccessPoint. 30012 t3PublicAddress Public address for the T3 channel. This should be set to the public address of the Kubernetes cluster. This would typically be a load balancer address. For development environments only: In a single server (all-in-one) Kubernetes deployment, this may be set to the address of the master, or at the very least, it must be set to the address of one of the worker nodes. If not provided, the script will attempt to set it to the IP address of the Kubernetes cluster weblogicCredentialsSecretName Name of the Kubernetes secret for the Administration Server\u0026rsquo;s user name and password. If not specified, then the value is derived from the domainUID as \u0026lt;domainUID\u0026gt;-weblogic-credentials. accessdomain-domain-credentials weblogicImagePullSecretName Name of the Kubernetes secret for the container registry, used to pull the WebLogic Server image. serverPodCpuRequest, serverPodMemoryRequest, serverPodCpuCLimit, serverPodMemoryLimit The maximum amount of compute resources allowed, and minimum amount of compute resources required, for each server pod. Please refer to the Kubernetes documentation on Managing Compute Resources for Containers for details. Resource requests and resource limits are not specified. rcuSchemaPrefix The schema prefix to use in the database, for example OAM1. You may wish to make this the same as the domainUID in order to simplify matching domains to their RCU schemas. OAM1 rcuDatabaseURL The database URL. oracle-db.default.svc.cluster.local:1521/devpdb.k8s rcuCredentialsSecret The Kubernetes secret containing the database credentials. accessdomain-rcu-credentials datasourceType Type of JDBC datasource applicable for the OAM domain. Legal values are agl and generic. Choose agl for Active GridLink datasource and generic for Generic datasource. For enterprise deployments, Oracle recommends that you use GridLink data sources to connect to Oracle RAC databases. See the Enterprise Deployment Guide for further details. generic Note that the names of the Kubernetes resources in the generated YAML files may be formed with the value of some of the properties specified in the create-inputs.yaml file. Those properties include the adminServerName, clusterName and managedServerNameBase. If those values contain any characters that are invalid in a Kubernetes service name, those characters are converted to valid values in the generated YAML files. For example, an uppercase letter is converted to a lowercase letter and an underscore (\u0026quot;_\u0026quot;) is converted to a hyphen (\u0026quot;-\u0026quot;).\nThe sample demonstrates how to create an OAM domain home and associated Kubernetes resources for a domain that has one cluster only. In addition, the sample provides the capability for users to supply their own scripts to create the domain home for other use cases. The generated domain YAML file could also be modified to cover more use cases.\nRun the create domain script Run the create domain script, specifying your inputs file and an output directory to store the generated artifacts:\n$ cd $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv $ ./create-domain.sh -i create-domain-inputs.yaml -o /\u0026lt;path to output-directory\u0026gt; For example:\n$ cd $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv $ ./create-domain.sh -i create-domain-inputs.yaml -o output The output will look similar to the following:\nInput parameters being used export version=\u0026quot;create-weblogic-sample-domain-inputs-v1\u0026quot; export adminPort=\u0026quot;7001\u0026quot; export adminServerName=\u0026quot;AdminServer\u0026quot; export domainUID=\u0026quot;accessdomain\u0026quot; export domainType=\u0026quot;oam\u0026quot; export domainHome=\u0026quot;/u01/oracle/user_projects/domains/accessdomain\u0026quot; export serverStartPolicy=\u0026quot;IfNeeded\u0026quot; export clusterName=\u0026quot;oam_cluster\u0026quot; export configuredManagedServerCount=\u0026quot;5\u0026quot; export initialManagedServerReplicas=\u0026quot;2\u0026quot; export managedServerNameBase=\u0026quot;oam_server\u0026quot; export managedServerPort=\u0026quot;14100\u0026quot; export image=\u0026quot;container-registry.oracle.com/middleware/oam_cpu:12.2.1.4-jdk8-ol7-\u0026lt;October`23\u0026gt;\u0026quot; export imagePullPolicy=\u0026quot;IfNotPresent\u0026quot; export imagePullSecretName=\u0026quot;orclcred\u0026quot; export productionModeEnabled=\u0026quot;true\u0026quot; export weblogicCredentialsSecretName=\u0026quot;accessdomain-credentials\u0026quot; export includeServerOutInPodLog=\u0026quot;true\u0026quot; export logHome=\u0026quot;/u01/oracle/user_projects/domains/logs/accessdomain\u0026quot; export httpAccessLogInLogHome=\u0026quot;true\u0026quot; export t3ChannelPort=\u0026quot;30012\u0026quot; export exposeAdminT3Channel=\u0026quot;false\u0026quot; export adminNodePort=\u0026quot;30701\u0026quot; export exposeAdminNodePort=\u0026quot;false\u0026quot; export namespace=\u0026quot;oamns\u0026quot; javaOptions=-Dweblogic.StdoutDebugEnabled=false export persistentVolumeClaimName=\u0026quot;accessdomain-domain-pvc\u0026quot; export domainPVMountPath=\u0026quot;/u01/oracle/user_projects/domains\u0026quot; export createDomainScriptsMountPath=\u0026quot;/u01/weblogic\u0026quot; export createDomainScriptName=\u0026quot;create-domain-job.sh\u0026quot; export createDomainFilesDir=\u0026quot;wlst\u0026quot; export rcuSchemaPrefix=\u0026quot;OAMK8S\u0026quot; export rcuDatabaseURL=\u0026quot;mydatabasehost.example.com:1521/orcl.example.com\u0026quot; export rcuCredentialsSecret=\u0026quot;accessdomain-rcu-credentials\u0026quot; export datasourceType=\u0026quot;generic\u0026quot; validateWlsDomainName called with accessdomain createFiles - valuesInputFile is create-domain-inputs.yaml createDomainScriptName is create-domain-job.sh Generating output/weblogic-domains/accessdomain/create-domain-job.yaml Generating output/weblogic-domains/accessdomain/delete-domain-job.yaml Generating output/weblogic-domains/accessdomain/domain.yaml Checking to see if the secret accessdomain-credentials exists in namespace oamns configmap/accessdomain-create-oam-infra-domain-job-cm created Checking the configmap accessdomain-create-oam-infra-domain-job-cm was created configmap/accessdomain-create-oam-infra-domain-job-cm labeled Checking if object type job with name accessdomain-create-oam-infra-domain-job exists No resources found in oamns namespace. Creating the domain by creating the job output/weblogic-domains/accessdomain/create-domain-job.yaml job.batch/accessdomain-create-oam-infra-domain-job created Waiting for the job to complete... status on iteration 1 of 20 pod accessdomain-create-oam-infra-domain-job-6tgw4 status is Running status on iteration 2 of 20 pod accessdomain-create-oam-infra-domain-job-6tgw4 status is Running status on iteration 3 of 20 pod accessdomain-create-oam-infra-domain-job-6tgw4 status is Running status on iteration 4 of 20 pod accessdomain-create-oam-infra-domain-job-6tgw4 status is Running status on iteration 5 of 20 pod accessdomain-create-oam-infra-domain-job-6tgw4 status is Running status on iteration 6 of 20 pod accessdomain-create-oam-infra-domain-job-6tgw4 status is Completed Domain accessdomain was created and will be started by the WebLogic Kubernetes Operator The following files were generated: output/weblogic-domains/accessdomain/create-domain-inputs.yaml output/weblogic-domains/accessdomain/create-domain-job.yaml output/weblogic-domains/accessdomain/domain.yaml Note: If the domain creation fails, refer to the Troubleshooting section.\nThe command creates a domain.yaml file required for domain creation.\n Set the OAM server memory parameters By default, the java memory parameters assigned to the oam_server cluster are very small. The minimum recommended values are -Xms4096m -Xmx8192m. However, Oracle recommends you to set these to -Xms8192m -Xmx8192m in a production environment.\n Navigate to the /output/weblogic-domains/\u0026lt;domain_uid\u0026gt; directory:\n$ cd $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv/output/weblogic-domains/\u0026lt;domain_uid\u0026gt; For example:\n$ cd $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv/output/weblogic-domains/accessdomain Edit the domain.yaml file and inside name: accessdomain-oam-cluster, add the memory setting as below:\n serverPod: env: - name: USER_MEM_ARGS value: \u0026quot;-XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m\u0026quot; resources: limits: cpu: \u0026quot;2\u0026quot; memory: \u0026quot;8Gi\u0026quot; requests: cpu: \u0026quot;1000m\u0026quot; memory: \u0026quot;4Gi\u0026quot; For example:\napiVersion: weblogic.oracle/v1 kind: Cluster metadata: name: accessdomain-oam-cluster namespace: oamns spec: clusterName: oam_cluster serverService: precreateService: true serverPod: env: - name: USER_MEM_ARGS value: \u0026quot;-XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m\u0026quot; resources: limits: cpu: \u0026quot;2\u0026quot; memory: \u0026quot;8Gi\u0026quot; requests: cpu: \u0026quot;1000m\u0026quot; memory: \u0026quot;4Gi\u0026quot; replicas: 1 Note: The above CPU and memory values are for development environments only. For Enterprise Deployments, please review the performance recommendations and sizing requirements in Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster.\nNote: Limits and requests for CPU resources are measured in CPU units. One CPU in Kubernetes is equivalent to 1 vCPU/Core for cloud providers, and 1 hyperthread on bare-metal Intel processors. An \u0026ldquo;m\u0026rdquo; suffix in a CPU attribute indicates ‘milli-CPU’, so 500m is 50% of a CPU. Memory can be expressed in various units, where one Mi is one IEC unit mega-byte (1024^2), and one Gi is one IEC unit giga-byte (1024^3). For more information, see Resource Management for Pods and Containers, Assign Memory Resources to Containers and Pods, and Assign CPU Resources to Containers and Pods.\nNote: The parameters above are also utilized by the Kubernetes Horizontal Pod Autoscaler (HPA). For more details on HPA, see Kubernetes Horizontal Pod Autoscaler.\nNote: If required you can also set the same resources and limits for the accessdomain-policy-cluster.\n In the domain.yaml locate the section of the file starting with adminServer:. Under the env: tag add the following CLASSPATH entries. This is required for running the idmconfigtool from the Administration Server.\n- name: CLASSPATH value: \u0026quot;/u01/oracle/wlserver/server/lib/weblogic.jar\u0026quot; For example:\n# adminServer is used to configure the desired behavior for starting the administration server. adminServer: # adminService: # channels: # The Admin Server's NodePort # - channelName: default # nodePort: 30701 # Uncomment to export the T3Channel as a service # - channelName: T3Channel serverPod: # an (optional) list of environment variable to be set on the admin servers env: - name: USER_MEM_ARGS value: \u0026quot;-Djava.security.egd=file:/dev/./urandom -Xms512m -Xmx1024m \u0026quot; - name: CLASSPATH value: \u0026quot;/u01/oracle/wlserver/server/lib/weblogic.jar\u0026quot; If required, you can add the optional parameter maxClusterConcurrentStartup to the spec section of the domain.yaml. This parameter specifies the number of managed servers to be started in sequence per cluster. For example if you updated the initialManagedServerReplicas to 4 in create-domain-inputs.yaml and only had 2 nodes, then setting maxClusterConcurrentStartup: 1 will start one managed server at a time on each node, rather than starting them all at once. This can be useful to take the strain off individual nodes at startup. Below is an example with the parameter added:\napiVersion: \u0026quot;weblogic.oracle/v9\u0026quot; kind: Domain metadata: name: accessdomain namespace: oamns labels: weblogic.domainUID: accessdomain spec: # The WebLogic Domain Home domainHome: /u01/oracle/user_projects/domains/accessdomain maxClusterConcurrentStartup: 1 # The domain home source type # Set to PersistentVolume for domain-in-pv, Image for domain-in-image, or FromModel for model-in-image domainHomeSourceType: PersistentVolume .... Save the changes to domain.yaml\n Initializing the domain Create the Kubernetes resource using the following command:\n$ kubectl apply -f $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv/output/weblogic-domains/\u0026lt;domain_uid\u0026gt;/domain.yaml For example:\n$ kubectl apply -f $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv/output/weblogic-domains/accessdomain/domain.yaml The output will look similar to the following:\ndomain.weblogic.oracle/accessdomain created cluster.weblogic.oracle/accessdomain-oam-cluster created cluster.weblogic.oracle/accessdomain-policy-cluster created Verify the results Verify the domain, pods and services Verify the domain, servers pods and services are created and in the READY state with a status of 1/1, by running the following command:\n$ kubectl get all,domains -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get all,domains -n oamns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE pod/accessdomain-adminserver 1/1 Running 0 11m pod/accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 18m pod/accessdomain-oam-policy-mgr1 1/1 Running 0 3m31s pod/accessdomain-oam-server1 1/1 Running 0 3m31s pod/helper 1/1 Running 0 33m NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/accessdomain-adminserver ClusterIP None \u0026lt;none\u0026gt; 7001/TCP 11m service/accessdomain-cluster-oam-cluster ClusterIP 10.101.59.154 \u0026lt;none\u0026gt; 14100/TCP 3m31s service/accessdomain-cluster-policy-cluster ClusterIP 10.98.236.51 \u0026lt;none\u0026gt; 15100/TCP 3m31s service/accessdomain-oam-policy-mgr1 ClusterIP None \u0026lt;none\u0026gt; 15100/TCP 3m31s service/accessdomain-oam-policy-mgr2 ClusterIP 10.104.92.12 \u0026lt;none\u0026gt; 15100/TCP 3m31s service/accessdomain-oam-policy-mgr3 ClusterIP 10.96.244.37 \u0026lt;none\u0026gt; 15100/TCP 3m31s service/accessdomain-oam-policy-mgr4 ClusterIP 10.105.201.23 \u0026lt;none\u0026gt; 15100/TCP 3m31s service/accessdomain-oam-policy-mgr5 ClusterIP 10.110.12.227 \u0026lt;none\u0026gt; 15100/TCP 3m31s service/accessdomain-oam-server1 ClusterIP None \u0026lt;none\u0026gt; 14100/TCP 3m31s service/accessdomain-oam-server2 ClusterIP 10.96.137.33 \u0026lt;none\u0026gt; 14100/TCP 3m31s service/accessdomain-oam-server3 ClusterIP 10.103.178.35 \u0026lt;none\u0026gt; 14100/TCP 3m31s service/accessdomain-oam-server4 ClusterIP 10.97.254.78 \u0026lt;none\u0026gt; 14100/TCP 3m31s service/accessdomain-oam-server5 ClusterIP 10.105.65.104 \u0026lt;none\u0026gt; 14100/TCP 3m31s NAME COMPLETIONS DURATION AGE job.batch/accessdomain-create-oam-infra-domain-job 1/1 2m6s 18m NAME AGE domain.weblogic.oracle/accessdomain 12m NAME AGE cluster.weblogic.oracle/accessdomain-oam-cluster 11m cluster.weblogic.oracle/accessdomain-policy-cluster 11m Note: It will take several minutes before all the services listed above show. When a pod has a STATUS of 0/1 the pod is started but the OAM server associated with it is currently starting. While the pods are starting you can check the startup status in the pod logs, by running the following command:\n$ kubectl logs accessdomain-adminserver -n oamns $ kubectl logs accessdomain-oam-policy-mgr1 -n oamns $ kubectl logs accessdomain-oam-server1 -n oamns etc.. The default domain created by the script has the following characteristics:\n An Administration Server named AdminServer listening on port 7001. A configured OAM cluster named oam_cluster of size 5. A configured Policy Manager cluster named policy_cluster of size 5. One started OAM Managed Server, named oam_server1, listening on port 14100. One started Policy Manager Managed Servers named oam-policy-mgr1, listening on port 15100. Log files that are located in \u0026lt;persistent_volume\u0026gt;/logs/\u0026lt;domainUID\u0026gt;. Verify the domain Run the following command to describe the domain:\n$ kubectl describe domain \u0026lt;domain_uid\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl describe domain accessdomain -n oamns The output will look similar to the following:\nName: accessdomain Namespace: oamns Labels: weblogic.domainUID=accessdomain Annotations: \u0026lt;none\u0026gt; API Version: weblogic.oracle/v9 Kind: Domain Metadata: Creation Timestamp: \u0026lt;DATE\u0026gt; Generation: 1 Managed Fields: API Version: weblogic.oracle/v9 Fields Type: FieldsV1 fieldsV1: f:metadata: f:annotations: .: f:kubectl.kubernetes.io/last-applied-configuration: f:labels: .: f:weblogic.domainUID: f:spec: .: f:adminServer: .: f:adminChannelPortForwardingEnabled: f:serverPod: .: f:env: f:serverStartPolicy: f:clusters: f:dataHome: f:domainHome: f:domainHomeSourceType: f:failureRetryIntervalSeconds: f:failureRetryLimitMinutes: f:httpAccessLogInLogHome: f:image: f:imagePullPolicy: f:imagePullSecrets: f:includeServerOutInPodLog: f:logHome: f:logHomeEnabled: f:logHomeLayout: f:maxClusterConcurrentShutdown: f:maxClusterConcurrentStartup: f:maxClusterUnavailable: f:replicas: f:serverPod: .: f:env: f:volumeMounts: f:volumes: f:serverStartPolicy: f:webLogicCredentialsSecret: .: f:name: Manager: kubectl-client-side-apply Operation: Update Time: \u0026lt;DATE\u0026gt; API Version: weblogic.oracle/v9 Fields Type: FieldsV1 fieldsV1: f:status: .: f:clusters: f:conditions: f:observedGeneration: f:servers: f:startTime: Manager: Kubernetes Java Client Operation: Update Subresource: status Time: \u0026lt;DATE\u0026gt; Resource Version: 2074089 UID: e194d483-7383-4359-adb9-bf97de36518b Spec: Admin Server: Admin Channel Port Forwarding Enabled: true Server Pod: Env: Name: USER_MEM_ARGS Value: -Djava.security.egd=file:/dev/./urandom -Xms512m -Xmx1024m Name: CLASSPATH Value: /u01/oracle/wlserver/server/lib/weblogic.jar Server Start Policy: IfNeeded Clusters: Name: accessdomain-oam-cluster Name: accessdomain-policy-cluster Data Home: Domain Home: /u01/oracle/user_projects/domains/accessdomain Domain Home Source Type: PersistentVolume Failure Retry Interval Seconds: 120 Failure Retry Limit Minutes: 1440 Http Access Log In Log Home: true Image: container-registry.oracle.com/middleware/oam_cpu:12.2.1.4-jdk8-ol7-\u0026lt;October'23\u0026gt; Image Pull Policy: IfNotPresent Image Pull Secrets: Name: orclcred Include Server Out In Pod Log: true Log Home: /u01/oracle/user_projects/domains/logs/accessdomain Log Home Enabled: true Log Home Layout: ByServers Max Cluster Concurrent Shutdown: 1 Max Cluster Concurrent Startup: 0 Max Cluster Unavailable: 1 Replicas: 1 Server Pod: Env: Name: JAVA_OPTIONS Value: -Dweblogic.StdoutDebugEnabled=false Name: USER_MEM_ARGS Value: -Djava.security.egd=file:/dev/./urandom -Xms256m -Xmx1024m Volume Mounts: Mount Path: /u01/oracle/user_projects/domains Name: weblogic-domain-storage-volume Volumes: Name: weblogic-domain-storage-volume Persistent Volume Claim: Claim Name: accessdomain-domain-pvc Server Start Policy: IfNeeded Web Logic Credentials Secret: Name: accessdomain-credentials Status: Clusters: Cluster Name: oam_cluster Conditions: Last Transition Time: \u0026lt;DATE\u0026gt; Status: True Type: Available Last Transition Time: \u0026lt;DATE\u0026gt; Status: True Type: Completed Label Selector: weblogic.domainUID=accessdomain,weblogic.clusterName=oam_cluster Maximum Replicas: 5 Minimum Replicas: 0 Observed Generation: 1 Ready Replicas: 1 Replicas: 1 Replicas Goal: 1 Cluster Name: policy_cluster Conditions: Last Transition Time: \u0026lt;DATE\u0026gt; Status: True Type: Available Last Transition Time: \u0026lt;DATE\u0026gt; Status: True Type: Completed Label Selector: weblogic.domainUID=accessdomain,weblogic.clusterName=policy_cluster Maximum Replicas: 5 Minimum Replicas: 0 Observed Generation: 1 Ready Replicas: 1 Replicas: 1 Replicas Goal: 1 Conditions: Last Transition Time: \u0026lt;DATE\u0026gt; Status: True Type: Available Last Transition Time: \u0026lt;DATE\u0026gt; Status: True Type: Completed Observed Generation: 1 Servers: Health: Activation Time: \u0026lt;DATE\u0026gt; Overall Health: ok Subsystems: Subsystem Name: ServerRuntime Symptoms: Node Name: worker-node2 Pod Phase: Running Pod Ready: True Server Name: AdminServer State: RUNNING State Goal: RUNNING Cluster Name: oam_cluster Health: Activation Time: \u0026lt;DATE\u0026gt; Overall Health: ok Subsystems: Subsystem Name: ServerRuntime Symptoms: Node Name: worker-node1 Pod Phase: Running Pod Ready: True Server Name: oam_server1 State: RUNNING State Goal: RUNNING Cluster Name: oam_cluster Server Name: oam_server2 State: SHUTDOWN State Goal: SHUTDOWN Cluster Name: oam_cluster Server Name: oam_server3 State: SHUTDOWN State Goal: SHUTDOWN Cluster Name: oam_cluster Server Name: oam_server4 State: SHUTDOWN State Goal: SHUTDOWN Cluster Name: oam_cluster Server Name: oam_server5 State: SHUTDOWN State Goal: SHUTDOWN Cluster Name: policy_cluster Health: Activation Time: \u0026lt;DATE\u0026gt; Overall Health: ok Subsystems: Subsystem Name: ServerRuntime Symptoms: Node Name: worker-node1 Pod Phase: Running Pod Ready: True Server Name: oam_policy_mgr1 State: RUNNING State Goal: RUNNING Cluster Name: policy_cluster Server Name: oam_policy_mgr2 State: SHUTDOWN State Goal: SHUTDOWN Cluster Name: policy_cluster Server Name: oam_policy_mgr3 State: SHUTDOWN State Goal: SHUTDOWN Cluster Name: policy_cluster Server Name: oam_policy_mgr4 State: SHUTDOWN State Goal: SHUTDOWN Cluster Name: policy_cluster Server Name: oam_policy_mgr5 State: SHUTDOWN State Goal: SHUTDOWN Start Time: \u0026lt;DATE\u0026gt; Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Created 15m weblogic.operator Domain accessdomain was created. Normal Available 2m56s weblogic.operator Domain accessdomain is available: a sufficient number of its servers have reached the ready state. Normal Completed 2m56s weblogic.operator Domain accessdomain is complete because all of the following are true: there is no failure detected, there are no pending server shutdowns, and all servers expected to be running are ready and at their target image, auxiliary images, restart version, and introspect version. In the Status section of the output, the available servers and clusters are listed.\n Verify the pods Run the following command to see the pods running the servers and which nodes they are running on:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; -o wide For example:\n$ kubectl get pods -n oamns -o wide The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES accessdomain-adminserver 1/1 Running 0 18m 10.244.6.63 10.250.42.252 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 25m 10.244.6.61 10.250.42.252 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; accessdomain-oam-policy-mgr1 1/1 Running 0 10m 10.244.5.13 10.250.42.255 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; accessdomain-oam-server1 1/1 Running 0 10m 10.244.5.12 10.250.42.255 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; helper 1/1 Running 0 40m 10.244.6.60 10.250.42.252 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; You are now ready to configure an Ingress to direct traffic for your OAM domain as per Configure an Ingress for an OAM domain.\n " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oig/create-oig-domains/", + "title": "Create OIG domains", + "tags": [], + "description": "Sample for creating an OIG domain home on an existing PV or PVC, and the domain resource YAML file for deploying the generated OIG domain.", + "content": " Introduction\n Prerequisites\n Prepare the create domain script\n Run the create domain script\na. Generate the create domain script\nb. Setting the OIM server memory parameters\nc. Run the create domain scripts\n Verify the results\na. Verify the domain, pods and services\nb. Verify the domain\nc. Verify the pods\n Introduction The OIG deployment scripts demonstrate the creation of an OIG domain home on an existing Kubernetes persistent volume (PV) and persistent volume claim (PVC). The scripts also generate the domain YAML file, which can then be used to start the Kubernetes artifacts of the corresponding domain.\nPrerequisites Before you begin, perform the following steps:\n Review the Domain resource documentation. Ensure that you have executed all the preliminary steps documented in Prepare your environment. Ensure that the database is up and running. Prepare the create domain script The sample scripts for Oracle Identity Governance domain deployment are available at $WORKDIR/kubernetes/create-oim-domain.\n Make a copy of the create-domain-inputs.yaml file:\n$ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv $ cp create-domain-inputs.yaml create-domain-inputs.yaml.orig Edit the create-domain-inputs.yaml and modify the following parameters. Save the file when complete:\nNote: Do not edit any other parameters other than ones mentioned below.\ndomainUID: \u0026lt;domain_uid\u0026gt; domainHome: /u01/oracle/user_projects/domains/\u0026lt;domain_uid\u0026gt; image: \u0026lt;image_name\u0026gt; imagePullSecretName: \u0026lt;container_registry_secret\u0026gt; weblogicCredentialsSecretName: \u0026lt;kubernetes_domain_secret\u0026gt; logHome: /u01/oracle/user_projects/domains/logs/\u0026lt;domain_id\u0026gt; namespace: \u0026lt;domain_namespace\u0026gt; persistentVolumeClaimName: \u0026lt;pvc_name\u0026gt; rcuSchemaPrefix: \u0026lt;rcu_prefix\u0026gt; rcuDatabaseURL: \u0026lt;rcu_db_host\u0026gt;:\u0026lt;rcu_db_port\u0026gt;/\u0026lt;rcu_db_service_name\u0026gt; rcuCredentialsSecret: \u0026lt;kubernetes_rcu_secret\u0026gt; frontEndHost: \u0026lt;front_end_hostname\u0026gt; frontEndPort: \u0026lt;front_end_port\u0026gt; For example:\ndomainUID: governancedomain domainHome: /u01/oracle/user_projects/domains/governancedomain image: container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-\u0026lt;October'23\u0026gt; imagePullSecretName: orclcred weblogicCredentialsSecretName: oig-domain-credentials logHome: /u01/oracle/user_projects/domains/logs/governancedomain namespace: oigns persistentVolumeClaimName: governancedomain-domain-pvc rcuSchemaPrefix: OIGK8S rcuDatabaseURL: mydatabasehost.example.com:1521/orcl.example.com rcuCredentialsSecret: oig-rcu-credentials frontEndHost: example.com frontEndPort: 14100 Note: For now frontEndHost and front_end_port should be set to example.com and 14100 respectively. These values will be changed to the correct values in post installation tasks in Set OIMFrontendURL using MBeans.\n A full list of parameters in the create-domain-inputs.yaml file are shown below:\n Parameter Definition Default adminPort Port number for the Administration Server inside the Kubernetes cluster. 7001 adminNodePort Port number of the Administration Server outside the Kubernetes cluster. 30701 adminServerName Name of the Administration Server. AdminServer clusterName Name of the WebLogic cluster instance to generate for the domain. By default the cluster name is oimcluster for the OIG domain. oimcluster configuredManagedServerCount Number of Managed Server instances to generate for the domain. 5 createDomainFilesDir Directory on the host machine to locate all the files to create a WebLogic domain, including the script that is specified in the createDomainScriptName property. By default, this directory is set to the relative path wlst, and the create script will use the built-in WLST offline scripts in the wlst directory to create the WebLogic domain. It can also be set to the relative path wdt, and then the built-in WDT scripts will be used instead. An absolute path is also supported to point to an arbitrary directory in the file system. The built-in scripts can be replaced by the user-provided scripts or model files as long as those files are in the specified directory. Files in this directory are put into a Kubernetes config map, which in turn is mounted to the createDomainScriptsMountPath, so that the Kubernetes pod can use the scripts and supporting files to create a domain home. wlst createDomainScriptsMountPath Mount path where the create domain scripts are located inside a pod. The create-domain.sh script creates a Kubernetes job to run the script (specified in the createDomainScriptName property) in a Kubernetes pod to create a domain home. Files in the createDomainFilesDir directory are mounted to this location in the pod, so that the Kubernetes pod can use the scripts and supporting files to create a domain home. /u01/weblogic createDomainScriptName Script that the create domain script uses to create a WebLogic domain. The create-domain.sh script creates a Kubernetes job to run this script to create a domain home. The script is located in the in-pod directory that is specified in the createDomainScriptsMountPath property. If you need to provide your own scripts to create the domain home, instead of using the built-it scripts, you must use this property to set the name of the script that you want the create domain job to run. create-domain-job.sh domainHome Home directory of the OIG domain. If not specified, the value is derived from the domainUID as /shared/domains/\u0026lt;domainUID\u0026gt;. /u01/oracle/user_projects/domains/oimcluster domainPVMountPath Mount path of the domain persistent volume. /u01/oracle/user_projects/domains domainUID Unique ID that will be used to identify this particular domain. Used as the name of the generated WebLogic domain as well as the name of the Kubernetes domain resource. This ID must be unique across all domains in a Kubernetes cluster. This ID cannot contain any character that is not valid in a Kubernetes service name. oimcluster exposeAdminNodePort Boolean indicating if the Administration Server is exposed outside of the Kubernetes cluster. false exposeAdminT3Channel Boolean indicating if the T3 administrative channel is exposed outside the Kubernetes cluster. true image OIG container image. The operator requires OIG 12.2.1.4. Refer to OIG domains for details on how to obtain or create the image. oracle/oig:12.2.1.4.0 imagePullPolicy WebLogic container image pull policy. Legal values are IfNotPresent, Always, or Never IfNotPresent imagePullSecretName Name of the Kubernetes secret to access the container registry to pull the OIG container image. The presence of the secret will be validated when this parameter is specified. includeServerOutInPodLog Boolean indicating whether to include the server .out to the pod\u0026rsquo;s stdout. true initialManagedServerReplicas Number of Managed Servers to initially start for the domain. 2 javaOptions Java options for starting the Administration Server and Managed Servers. A Java option can have references to one or more of the following pre-defined variables to obtain WebLogic domain information: $(DOMAIN_NAME), $(DOMAIN_HOME), $(ADMIN_NAME), $(ADMIN_PORT), and $(SERVER_NAME). -Dweblogic.StdoutDebugEnabled=false logHome The in-pod location for the domain log, server logs, server out, and Node Manager log files. If not specified, the value is derived from the domainUID as /shared/logs/\u0026lt;domainUID\u0026gt;. /u01/oracle/user_projects/domains/logs/oimcluster managedServerNameBase Base string used to generate Managed Server names. oim_server managedServerPort Port number for each Managed Server. 8001 namespace Kubernetes namespace in which to create the domain. oimcluster persistentVolumeClaimName Name of the persistent volume claim created to host the domain home. If not specified, the value is derived from the domainUID as \u0026lt;domainUID\u0026gt;-weblogic-sample-pvc. oimcluster-domain-pvc productionModeEnabled Boolean indicating if production mode is enabled for the domain. true serverStartPolicy Determines which WebLogic Server instances will be started. Legal values are Never, IfNeeded, AdminOnly. IfNeeded t3ChannelPort Port for the T3 channel of the NetworkAccessPoint. 30012 t3PublicAddress Public address for the T3 channel. This should be set to the public address of the Kubernetes cluster. This would typically be a load balancer address. For development environments only: In a single server (all-in-one) Kubernetes deployment, this may be set to the address of the master, or at the very least, it must be set to the address of one of the worker nodes. If not provided, the script will attempt to set it to the IP address of the Kubernetes cluster weblogicCredentialsSecretName Name of the Kubernetes secret for the Administration Server\u0026rsquo;s user name and password. If not specified, then the value is derived from the domainUID as \u0026lt;domainUID\u0026gt;-weblogic-credentials. oimcluster-domain-credentials weblogicImagePullSecretName Name of the Kubernetes secret for the container registry, used to pull the WebLogic Server image. serverPodCpuRequest, serverPodMemoryRequest, serverPodCpuCLimit, serverPodMemoryLimit The maximum amount of compute resources allowed, and minimum amount of compute resources required, for each server pod. Please refer to the Kubernetes documentation on Managing Compute Resources for Containers for details. Resource requests and resource limits are not specified. rcuSchemaPrefix The schema prefix to use in the database, for example OIGK8S. You may wish to make this the same as the domainUID in order to simplify matching domains to their RCU schemas. OIGK8S rcuDatabaseURL The database URL. oracle-db.default.svc.cluster.local:1521/devpdb.k8s rcuCredentialsSecret The Kubernetes secret containing the database credentials. oimcluster-rcu-credentials frontEndHost The entry point URL for the OIM. Not set frontEndPort The entry point port for the OIM. Not set datasourceType Type of JDBC datasource applicable for the OIG domain. Legal values are agl and generic. Choose agl for Active GridLink datasource and generic for Generic datasource. For enterprise deployments, Oracle recommends that you use GridLink data sources to connect to Oracle RAC databases. See the Enterprise Deployment Guide for further details. generic Note that the names of the Kubernetes resources in the generated YAML files may be formed with the value of some of the properties specified in the create-inputs.yaml file. Those properties include the adminServerName, clusterName and managedServerNameBase. If those values contain any characters that are invalid in a Kubernetes service name, those characters are converted to valid values in the generated YAML files. For example, an uppercase letter is converted to a lowercase letter and an underscore (\u0026quot;_\u0026quot;) is converted to a hyphen (\u0026quot;-\u0026quot;).\nThe sample demonstrates how to create an OIG domain home and associated Kubernetes resources for a domain that has one cluster only. In addition, the sample provides the capability for users to supply their own scripts to create the domain home for other use cases. The generated domain YAML file could also be modified to cover more use cases.\nRun the create domain script Generate the create domain script Run the create domain script, specifying your inputs file and an output directory to store the generated artifacts:\n$ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv $ mkdir output $ ./create-domain.sh -i create-domain-inputs.yaml -o /\u0026lt;path to output-directory\u0026gt; For example:\n$ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv $ mkdir output $ ./create-domain.sh -i create-domain-inputs.yaml -o output The output will look similar to the following:\nInput parameters being used export version=\u0026quot;create-weblogic-sample-domain-inputs-v1\u0026quot; export adminPort=\u0026quot;7001\u0026quot; export adminServerName=\u0026quot;AdminServer\u0026quot; export domainUID=\u0026quot;governancedomain\u0026quot; export domainHome=\u0026quot;/u01/oracle/user_projects/domains/governancedomain\u0026quot; export serverStartPolicy=\u0026quot;IfNeeded\u0026quot; export clusterName=\u0026quot;oim_cluster\u0026quot; export configuredManagedServerCount=\u0026quot;5\u0026quot; export initialManagedServerReplicas=\u0026quot;1\u0026quot; export managedServerNameBase=\u0026quot;oim_server\u0026quot; export managedServerPort=\u0026quot;14000\u0026quot; export image=\u0026quot;container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-\u0026lt;October'23\u0026gt;\u0026quot; export imagePullPolicy=\u0026quot;IfNotPresent\u0026quot; export imagePullSecretName=\u0026quot;orclcred\u0026quot; export productionModeEnabled=\u0026quot;true\u0026quot; export weblogicCredentialsSecretName=\u0026quot;oig-domain-credentials\u0026quot; export includeServerOutInPodLog=\u0026quot;true\u0026quot; export logHome=\u0026quot;/u01/oracle/user_projects/domains/logs/governancedomain\u0026quot; export t3ChannelPort=\u0026quot;30012\u0026quot; export exposeAdminT3Channel=\u0026quot;false\u0026quot; export adminNodePort=\u0026quot;30701\u0026quot; export exposeAdminNodePort=\u0026quot;false\u0026quot; export namespace=\u0026quot;oigns\u0026quot; javaOptions=-Dweblogic.StdoutDebugEnabled=false export persistentVolumeClaimName=\u0026quot;governancedomain-domain-pvc\u0026quot; export domainPVMountPath=\u0026quot;/u01/oracle/user_projects/domains\u0026quot; export createDomainScriptsMountPath=\u0026quot;/u01/weblogic\u0026quot; export createDomainScriptName=\u0026quot;create-domain-job.sh\u0026quot; export createDomainFilesDir=\u0026quot;wlst\u0026quot; export rcuSchemaPrefix=\u0026quot;OIGK8S\u0026quot; export rcuDatabaseURL=\u0026quot;mydatabasehost.example.com:1521/orcl.example.com\u0026quot; export rcuCredentialsSecret=\u0026quot;oig-rcu-credentials\u0026quot; export frontEndHost=\u0026quot;example.com\u0026quot; export frontEndPort=\u0026quot;14100\u0026quot; export datasourceType=\u0026quot;generic\u0026quot; validateWlsDomainName called with governancedomain createFiles - valuesInputFile is create-domain-inputs.yaml createDomainScriptName is create-domain-job.sh Generating output/weblogic-domains/governancedomain/create-domain-job.yaml Generating output/weblogic-domains/governancedomain/delete-domain-job.yaml Generating output/weblogic-domains/governancedomain/domain.yaml Checking to see if the secret governancedomain-domain-credentials exists in namespace oigns configmap/governancedomain-create-fmw-infra-sample-domain-job-cm created Checking the configmap governancedomain-create-fmw-infra-sample-domain-job-cm was created configmap/governancedomain-create-fmw-infra-sample-domain-job-cm labeled Checking if object type job with name governancedomain-create-fmw-infra-sample-domain-job exists No resources found in oigns namespace. Creating the domain by creating the job output/weblogic-domains/governancedomain/create-domain-job.yaml job.batch/governancedomain-create-fmw-infra-sample-domain-job created Waiting for the job to complete... status on iteration 1 of 40 pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running status on iteration 2 of 40 pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running status on iteration 3 of 40 pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running status on iteration 4 of 40 pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running status on iteration 5 of 40 pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running status on iteration 6 of 40 pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running status on iteration 7 of 40 pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running status on iteration 8 of 40 pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running status on iteration 9 of 40 pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running status on iteration 10 of 40 pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running status on iteration 11 of 40 pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Completed Domain governancedomain was created and will be started by the WebLogic Kubernetes Operator The following files were generated: output/weblogic-domains/governancedomain/create-domain-inputs.yaml output/weblogic-domains/governancedomain/create-domain-job.yaml output/weblogic-domains/governancedomain/domain.yaml sed Completed $ Note: If the create domain script creation fails, refer to the Troubleshooting section.\n Setting the OIM server memory parameters Navigate to the /output/weblogic-domains/\u0026lt;domain_uid\u0026gt; directory:\n$ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv/output/weblogic-domains/\u0026lt;domain_uid\u0026gt; For example:\n$ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv/output/weblogic-domains/governancedomain Edit the domain.yaml and locate the section of the file starting with: - clusterName: oim_cluster under governancedomain-oim-cluster. Add the following lines:\nserverPod: env: - name: USER_MEM_ARGS value: \u0026quot;-Djava.security.egd=file:/dev/./urandom -Xms2408m -Xmx8192m resources: limits: cpu: \u0026quot;2\u0026quot; memory: \u0026quot;8Gi\u0026quot; requests: cpu: \u0026quot;1000m\u0026quot; memory: \u0026quot;4Gi\u0026quot;\tThe file should looks as follows:\n... apiVersion: weblogic.oracle/v1 kind: Cluster metadata: name: governancedomain-oim-cluster namespace: oigns spec: clusterName: oim_cluster serverService: precreateService: true replicas: 0 serverPod: env: - name: USER_MEM_ARGS value: \u0026quot;-XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m\u0026quot; resources: limits: cpu: \u0026quot;2\u0026quot; memory: \u0026quot;8Gi\u0026quot; requests: cpu: \u0026quot;1000m\u0026quot; memory: \u0026quot;4Gi\u0026quot; ... Note: The above CPU and memory values are for development environments only. For Enterprise Deployments, please review the performance recommendations and sizing requirements in Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster.\nNote: Limits and requests for CPU resources are measured in CPU units. One CPU in Kubernetes is equivalent to 1 vCPU/Core for cloud providers, and 1 hyperthread on bare-metal Intel processors. An \u0026ldquo;m\u0026rdquo; suffix in a CPU attribute indicates ‘milli-CPU’, so 500m is 50% of a CPU. Memory can be expressed in various units, where one Mi is one IEC unit mega-byte (1024^2), and one Gi is one IEC unit giga-byte (1024^3). For more information, see Resource Management for Pods and Containers, Assign Memory Resources to Containers and Pods, and Assign CPU Resources to Containers and Pods.\nNote: The parameters above are also utilized by the Kubernetes Horizontal Pod Autoscaler (HPA). For more details on HPA, see Kubernetes Horizontal Pod Autoscaler.\nNote: If required you can also set the same resources and limits for the governancedomain-soa-cluster.\n Run the create domain scripts Create the Kubernetes resource using the following command:\n$ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv/output/weblogic-domains/\u0026lt;domain_uid\u0026gt; $ kubectl apply -f domain.yaml For example:\n$ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv/output/weblogic-domains/governancedomain $ kubectl apply -f domain.yaml The output will look similar to the following:\ndomain.weblogic.oracle/governancedomain unchanged cluster.weblogic.oracle/governancedomain-oim-cluster created cluster.weblogic.oracle/governancedomain-soa-cluster created Run the following command to view the status of the OIG pods:\n$ kubectl get pods -n oigns The output will initially look similar to the following:\nNAME READY STATUS RESTARTS AGE governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 27m governancedomain-introspect-domain-job-p4brt 1/1 Running 0 6s helper 1/1 Running 0 3h30m The introspect-domain-job pod will be displayed first. Run the command again after several minutes and check to see that the Administration Server and SOA Server are both started. When started they should have STATUS = Running and READY = 1/1.\nNAME READY STATUS RESTARTS AGE/ governancedomain-adminserver 1/1 Running 0 7m30s governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 35m governancedomain-soa-server1 1/1 Running 0 4m helper 1/1 Running 0 3h38m Note: It will take several minutes before all the pods listed above show. When a pod has a STATUS of 0/1 the pod is started but the OIG server associated with it is currently starting. While the pods are starting you can check the startup status in the pod logs, by running the following command:\n$ kubectl logs governancedomain-adminserver -n oigns $ kubectl logs governancedomain-soa-server1 -n oigns Check the clusters using the following command:\n$ kubectl get cluster -n oigns The output will look similar to the following:\nNAME AGE governancedomain-oim-cluster 9m governancedomain-soa-cluster 9m Start the OIM server using the following command:\n$ kubectl patch cluster -n \u0026lt;namespace\u0026gt; \u0026lt;OIMClusterName\u0026gt; --type=merge -p '{\u0026quot;spec\u0026quot;:{\u0026quot;replicas\u0026quot;:\u0026lt;initialManagedServerReplicas\u0026gt;}}' For example:\n$ kubectl patch cluster -n oigns governancedomain-oim-cluster --type=merge -p '{\u0026quot;spec\u0026quot;:{\u0026quot;replicas\u0026quot;:1}}' The output will look similar to the following:\ncluster.weblogic.oracle/governancedomain-oim-cluster patched Run the following command to view the status of the OIG pods:\n$ kubectl get pods -n oigns The output will initially look similar to the following:\nNAME READY STATUS RESTARTS AGE governancedomain-adminserver 1/1 Running 0 7m30s governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 35m governancedomain-oim-server1 1/1 Running 0 4m25s governancedomain-soa-server1 1/1 Running 0 4m helper 1/1 Running 0 3h38m Note: It will take several minutes before the governancedomain-oim-server1 pod has a STATUS of 1/1. While the pod is starting you can check the startup status in the pod log, by running the following command:\n$ kubectl logs governancedomain-oim-server1 -n oigns Verify the results Verify the domain, pods and services Verify the domain, servers pods and services are created and in the READY state with a STATUS of 1/1, by running the following command:\n$ kubectl get all,domains -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get all,domains -n oigns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE pod/governancedomain-adminserver 1/1 Running 0 19m30s pod/governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 47m pod/governancedomain-oim-server1 1/1 Running 0 16m25s pod/governancedomain-soa-server1 1/1 Running 0 16m pod/helper 1/1 Running 0 3h50m NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/governancedomain-adminserver ClusterIP None \u0026lt;none\u0026gt; 7001/TCP 28m service/governancedomain-cluster-oim-cluster ClusterIP 10.106.198.40 \u0026lt;none\u0026gt; 14002/TCP,14000/TCP 25m service/governancedomain-cluster-soa-cluster ClusterIP 10.102.218.11 \u0026lt;none\u0026gt; 8001/TCP 25m service/governancedomain-oim-server1 ClusterIP None \u0026lt;none\u0026gt; 14002/TCP,14000/TCP 16m24s service/governancedomain-oim-server2 ClusterIP 10.97.32.112 \u0026lt;none\u0026gt; 14002/TCP,14000/TCP 25m service/governancedomain-oim-server3 ClusterIP 10.100.233.109 \u0026lt;none\u0026gt; 14002/TCP,14000/TCP 25m service/governancedomain-oim-server4 ClusterIP 10.96.154.17 \u0026lt;none\u0026gt; 14002/TCP,14000/TCP 25m service/governancedomain-oim-server5 ClusterIP 10.103.222.213 \u0026lt;none\u0026gt; 14002/TCP,14000/TCP 25m service/governancedomain-soa-server1 ClusterIP None \u0026lt;none\u0026gt; 8001/TCP 25m service/governancedomain-soa-server2 ClusterIP 10.104.43.118 \u0026lt;none\u0026gt; 8001/TCP 25m service/governancedomain-soa-server3 ClusterIP 10.110.180.120 \u0026lt;none\u0026gt; 8001/TCP 25m service/governancedomain-soa-server4 ClusterIP 10.99.161.73 \u0026lt;none\u0026gt; 8001/TCP 25m service/governancedomain-soa-server5 ClusterIP 10.97.67.196 \u0026lt;none\u0026gt; 8001/TCP 25m NAME COMPLETIONS DURATION AGE job.batch/governancedomain-create-fmw-infra-sample-domain-job 1/1 3m6s 125m NAME AGE domain.weblogic.oracle/governancedomain 24m NAME AGE cluster.weblogic.oracle/governancedomain-oim-cluster 23m cluster.weblogic.oracle/governancedomain-soa-cluster 23m The default domain created by the script has the following characteristics:\n An Administration Server named AdminServer listening on port 7001. A configured OIG cluster named oig_cluster of size 5. A configured SOA cluster named soa_cluster of size 5. One started OIG managed Server, named oim_server1, listening on port 14000. One started SOA managed Server, named soa_server1, listening on port 8001. Log files that are located in \u0026lt;persistent_volume\u0026gt;/logs/\u0026lt;domainUID\u0026gt; Verify the domain Run the following command to describe the domain:\n$ kubectl describe domain \u0026lt;domain_uid\u0026gt; -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl describe domain governancedomain -n oigns The output will look similar to the following:\nName: governancedomain Namespace: oigns Labels: weblogic.domainUID=governancedomain Annotations: \u0026lt;none\u0026gt; API Version: weblogic.oracle/v9 Kind: Domain Metadata: Creation Timestamp: \u0026lt;DATE\u0026gt; Generation: 1 Managed Fields: API Version: weblogic.oracle/v9 Fields Type: FieldsV1 fieldsV1: f:metadata: f:annotations: .: f:kubectl.kubernetes.io/last-applied-configuration: f:labels: .: f:weblogic.domainUID: f:spec: .: f:adminServer: .: f:adminChannelPortForwardingEnabled: f:serverPod: .: f:env: f:serverStartPolicy: f:clusters: f:dataHome: f:domainHome: f:domainHomeSourceType: f:failureRetryIntervalSeconds: f:failureRetryLimitMinutes: f:httpAccessLogInLogHome: f:image: f:imagePullPolicy: f:imagePullSecrets: f:includeServerOutInPodLog: f:logHome: f:logHomeEnabled: f:logHomeLayout: f:maxClusterConcurrentShutdown: f:maxClusterConcurrentStartup: f:maxClusterUnavailable: f:replicas: f:serverPod: .: f:env: f:volumeMounts: f:volumes: f:serverStartPolicy: f:webLogicCredentialsSecret: .: f:name: Manager: kubectl-client-side-apply Operation: Update Time: \u0026lt;DATE\u0026gt; API Version: weblogic.oracle/v9 Fields Type: FieldsV1 fieldsV1: f:status: .: f:clusters: f:conditions: f:observedGeneration: f:servers: f:startTime: Manager: Kubernetes Java Client Operation: Update Subresource: status Time: \u0026lt;DATE\u0026gt; Resource Version: 1247307 UID: 4933be73-df97-493f-a20c-bf1e24f6b3f2 Spec: Admin Server: Admin Channel Port Forwarding Enabled: true Server Pod: Env: Name: USER_MEM_ARGS Value: -Djava.security.egd=file:/dev/./urandom -Xms512m -Xmx1024m Server Start Policy: IfNeeded Clusters: Name: governancedomain-oim-cluster Name: governancedomain-soa-cluster Data Home: Domain Home: /u01/oracle/user_projects/domains/governancedomain Domain Home Source Type: PersistentVolume Failure Retry Interval Seconds: 120 Failure Retry Limit Minutes: 1440 Http Access Log In Log Home: true Image: container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-\u0026lt;October'23\u0026gt; Image Pull Policy: IfNotPresent Image Pull Secrets: Name: orclcred Include Server Out In Pod Log: true Log Home: /u01/oracle/user_projects/domains/logs/governancedomain Log Home Enabled: true Log Home Layout: ByServers Max Cluster Concurrent Shutdown: 1 Max Cluster Concurrent Startup: 0 Max Cluster Unavailable: 1 Replicas: 1 Server Pod: Env: Name: JAVA_OPTIONS Value: -Dweblogic.StdoutDebugEnabled=false Name: USER_MEM_ARGS Value: -Djava.security.egd=file:/dev/./urandom -Xms256m -Xmx1024m Volume Mounts: Mount Path: /u01/oracle/user_projects/domains Name: weblogic-domain-storage-volume Volumes: Name: weblogic-domain-storage-volume Persistent Volume Claim: Claim Name: governancedomain-domain-pvc Server Start Policy: IfNeeded Web Logic Credentials Secret: Name: oig-domain-credentials Status: Clusters: Cluster Name: oim_cluster Conditions: Last Transition Time: \u0026lt;DATE\u0026gt; Status: True Type: Available Last Transition Time: \u0026lt;DATE\u0026gt; Status: True Type: Completed Label Selector: weblogic.domainUID=governancedomain,weblogic.clusterName=oim_cluster Maximum Replicas: 5 Minimum Replicas: 0 Observed Generation: 2 Ready Replicas: 1 Replicas: 1 Replicas Goal: 1 Cluster Name: soa_cluster Conditions: Last Transition Time: \u0026lt;DATE\u0026gt; Status: True Type: Available Last Transition Time: \u0026lt;DATE\u0026gt; Status: True Type: Completed Label Selector: weblogic.domainUID=governancedomain,weblogic.clusterName=soa_cluster Maximum Replicas: 5 Minimum Replicas: 0 Observed Generation: 1 Ready Replicas: 1 Replicas: 1 Replicas Goal: 1 Conditions: Last Transition Time: \u0026lt;DATE\u0026gt; Status: True Type: Available Last Transition Time: \u0026lt;DATE\u0026gt; Status: True Type: Completed Observed Generation: 1 Servers: Health: Activation Time: \u0026lt;DATE\u0026gt; Overall Health: ok Subsystems: Subsystem Name: ServerRuntime Symptoms: Node Name: worker-node2 Pod Phase: Running Pod Ready: True Server Name: AdminServer State: RUNNING State Goal: RUNNING Cluster Name: oim_cluster Health: Activation Time: \u0026lt;DATE\u0026gt; Overall Health: ok Subsystems: Subsystem Name: ServerRuntime Symptoms: Node Name: worker-node1 Pod Phase: Running Pod Ready: True Server Name: oim_server1 State: RUNNING State Goal: RUNNING Cluster Name: oim_cluster Server Name: oim_server2 State: SHUTDOWN State Goal: SHUTDOWN Cluster Name: oim_cluster Server Name: oim_server3 State: SHUTDOWN State Goal: SHUTDOWN Cluster Name: oim_cluster Server Name: oim_server4 State: SHUTDOWN State Goal: SHUTDOWN Cluster Name: oim_cluster Server Name: oim_server5 State: SHUTDOWN State Goal: SHUTDOWN Cluster Name: soa_cluster Health: Activation Time: \u0026lt;DATE\u0026gt; Overall Health: ok Subsystems: Subsystem Name: ServerRuntime Symptoms: Node Name: worker-node1 Pod Phase: Running Pod Ready: True Server Name: soa_server1 State: RUNNING State Goal: RUNNING Cluster Name: soa_cluster Server Name: soa_server2 State: SHUTDOWN State Goal: SHUTDOWN Cluster Name: soa_cluster Server Name: soa_server3 State: SHUTDOWN State Goal: SHUTDOWN Cluster Name: soa_cluster Server Name: soa_server4 State: SHUTDOWN State Goal: SHUTDOWN Cluster Name: soa_cluster Server Name: soa_server5 State: SHUTDOWN State Goal: SHUTDOWN Start Time: \u0026lt;DATE\u0026gt; Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Created 35m weblogic.operator Domain governancedomain was created. Normal Changed 34m (x1127 over 35m) weblogic.operator Domain governancedomain was changed. Warning Failed 34m (x227 over 35m) weblogic.operator Domain governancedomain failed due to 'Domain validation error': Cluster resource 'governancedomain-oim-cluster' not found in namespace 'oigns' Cluster resource 'governancedomain-soa-cluster' not found in namespace 'oigns'. Update the domain resource to correct the validation error. Warning Unavailable 17m weblogic.operator Domain governancedomain is unavailable: an insufficient number of its servers that are expected to be running are ready.\u0026quot;; Warning Incomplete 17m weblogic.operator Domain governancedomain is incomplete for one or more of the following reasons: there are failures detected, there are pending server shutdowns, or not all servers expected to be running are ready and at their target image, auxiliary images, restart version, and introspect version. Normal Completed 13m (x2 over 26m) weblogic.operator Domain governancedomain is complete because all of the following are true: there is no failure detected, there are no pending server shutdowns, and all servers expected to be running are ready and at their target image, auxiliary images, restart version, and introspect version. Normal Available 13m (x2 over 26m) weblogic.operator Domain governancedomain is available: a sufficient number of its servers have reached the ready state. In the Status section of the output, the available servers and clusters are listed.\n Verify the pods Run the following command to see the pods running the servers and which nodes they are running on:\n$ kubectl get pods -n \u0026lt;namespace\u0026gt; -o wide For example:\n$ kubectl get pods -n oigns -o wide The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES governancedomain-adminserver 1/1 Running 0 24m 10.244.1.42 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 52m 10.244.1.40 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; governancedomain-oim-server1 1/1 Running 0 52m 10.244.1.44 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; governancedomain-soa-server1 1/1 Running 0 21m 10.244.1.43 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; helper 1/1 Running 0 3h55m 10.244.1.39 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; You are now ready to configure an Ingress to direct traffic for your OIG domain as per Configure an ingress for an OIG domain.\n " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oud/create-oud-instances/", + "title": "Create Oracle Unified Directory Instances", + "tags": [], + "description": "This document provides details of the oud-ds-rs Helm chart.", + "content": " Introduction Create a Kubernetes namespace Create a Kubernetes secret for the container registry Create a Kubernetes secret for cronjob images The oud-ds-rs Helm chart Create OUD instances Helm command output Verify the OUD deployment Verify the OUD replication Verify the cronjob Undeploy an OUD deployment Appendix A: Configuration parameters Appendix B: Environment Variables Introduction This chapter demonstrates how to deploy Oracle Unified Directory (OUD) 12c instance(s) and replicated instances using the Helm package manager for Kubernetes.\nThe helm chart can be used to deploy an Oracle Unified Directory instance as a base, with configured sample entries, and multiple replicated Oracle Unified Directory instances/pods/services based on the specified replicaCount.\nBased on the configuration, this chart deploys the following objects in the specified namespace of a Kubernetes cluster.\n Service Account Secret Persistent Volume and Persistent Volume Claim Pod(s)/Container(s) for Oracle Unified Directory Instances Services for interfaces exposed through Oracle Unified Directory Instances Ingress configuration Note: From July 22 (22.3.1) onwards OUD deployment is performed using StatefulSets.\nCreate a Kubernetes namespace Create a Kubernetes namespace for the OUD deployment by running the following command:\n$ kubectl create namespace \u0026lt;namespace\u0026gt; For example:\n$ kubectl create namespace oudns The output will look similar to the following:\nnamespace/oudns created Create a Kubernetes secret for the container registry Create a Kubernetes secret to stores the credentials for the container registry where the OUD image is stored. This step must be followed if using Oracle Container Registry or your own private container registry. If you are not using a container registry and have loaded the images on each of the master and worker nodes, you can skip this step.\n Run the following command to create the secret:\nkubectl create secret docker-registry \u0026#34;orclcred\u0026#34; --docker-server=\u0026lt;CONTAINER_REGISTRY\u0026gt; \\ --docker-username=\u0026#34;\u0026lt;USER_NAME\u0026gt;\u0026#34; \\ --docker-password=\u0026lt;PASSWORD\u0026gt; --docker-email=\u0026lt;EMAIL_ID\u0026gt; \\ --namespace=\u0026lt;domain_namespace\u0026gt; For example, if using Oracle Container Registry:\n$ kubectl create secret docker-registry \u0026#34;orclcred\u0026#34; --docker-server=container-registry.oracle.com \\ --docker-username=\u0026#34;user@example.com\u0026#34; \\ --docker-password=password --docker-email=user@example.com \\ --namespace=oudns Replace \u0026lt;USER_NAME\u0026gt; and \u0026lt;PASSWORD\u0026gt; with the credentials for the registry with the following caveats:\n If using Oracle Container Registry to pull the OUD container image, this is the username and password used to login to Oracle Container Registry. Before you can use this image you must login to Oracle Container Registry, navigate to Middleware \u0026gt; oud_cpu and accept the license agreement.\n If using your own container registry to store the OUD container image, this is the username and password (or token) for your container registry.\n The output will look similar to the following:\nsecret/orclcred created Create a Kubernetes secret for cronjob images Once OUD is deployed, if the Kubernetes node where the OUD pod(s) is/are running goes down after the pod eviction time-out, the pod(s) don\u0026rsquo;t get evicted but move to a Terminating state. The pod(s) will then remain in that state forever. To avoid this problem a cron-job is created during OUD deployment that checks for any pods in Terminating state. If there are any pods in Terminating state, the cron job will delete them. The pods will then start again automatically. This cron job requires access to images on hub.docker.com. A Kubernetes secret must therefore be created to enable access to these images.\n Create a Kubernetes secret to access the required images on hub.docker.com:\nNote: You must first have a user account on hub.docker.com:\n$ kubectl create secret docker-registry \u0026#34;dockercred\u0026#34; --docker-server=\u0026#34;https://index.docker.io/v1/\u0026#34; --docker-username=\u0026#34;\u0026lt;docker_username\u0026gt;\u0026#34; --docker-password=\u0026lt;password\u0026gt; --docker-email=\u0026lt;docker_email_credentials\u0026gt; --namespace=\u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl create secret docker-registry \u0026quot;dockercred\u0026quot; --docker-server=\u0026quot;https://index.docker.io/v1/\u0026quot; --docker-username=\u0026quot;username\u0026quot; --docker-password=\u0026lt;password\u0026gt; --docker-email=user@example.com --namespace=oudns The output will look similar to the following:\nsecret/dockercred created The oud-ds-rs Helm chart The oud-ds-rs Helm chart allows you to create or deploy a group of replicated Oracle Unified Directory instances along with Kubernetes objects in a specified namespace.\nThe deployment can be initiated by running the following Helm command with reference to the oud-ds-rs Helm chart, along with configuration parameters according to your environment.\n$ cd $WORKDIR/kubernetes/helm $ helm install --namespace \u0026lt;namespace\u0026gt; \\ \u0026lt;Configuration Parameters\u0026gt; \\ \u0026lt;deployment/release name\u0026gt; \\ \u0026lt;Helm Chart Path/Name\u0026gt; Configuration Parameters (override values in chart) can be passed on with --set arguments on the command line and/or with -f / --values arguments when referring to files.\nNote: The examples in Create OUD instances below provide values which allow the user to override the default values provided by the Helm chart. A full list of configuration parameters and their default values is shown in Appendix A: Configuration parameters.\nFor more details about the helm command and parameters, please execute helm --help and helm install --help.\nCreate OUD instances You can create OUD instances using one of the following methods:\n Using a YAML file Using --set argument Note: While it is possible to install sample data during the OID deployment is it not possible to load your own data via an ldif file . In order to load data in OUD, create the OUD deployment and then use ldapmodify post the ingress deployment. See Using LDAP utilities.\nUsing a YAML file Navigate to the $WORKDIR/kubernetes/helm directory:\n$ cd $WORKDIR/kubernetes/helm Create an oud-ds-rs-values-override.yaml as follows:\nimage: repository: \u0026lt;image_location\u0026gt; tag: \u0026lt;image_tag\u0026gt; pullPolicy: IfNotPresent imagePullSecrets: - name: orclcred oudConfig: # memory, cpu parameters for both requests and limits for oud instances resources: limits: cpu: \u0026#34;1\u0026#34; memory: \u0026#34;4Gi\u0026#34; requests: cpu: \u0026#34;500m\u0026#34; memory: \u0026#34;4Gi\u0026#34; rootUserPassword: \u0026lt;password\u0026gt; sampleData: \u0026#34;200\u0026#34; persistence: type: filesystem filesystem: hostPath: path: \u0026lt;persistent_volume\u0026gt;/oud_user_projects cronJob: kubectlImage: repository: bitnami/kubectl tag: \u0026lt;version\u0026gt; pullPolicy: IfNotPresent imagePullSecrets: - name: dockercred For example:\nimage: repository: container-registry.oracle.com/middleware/oud_cpu tag: 12.2.1.4-jdk8-ol7-\u0026lt;October`23\u0026gt; pullPolicy: IfNotPresent imagePullSecrets: - name: orclcred oudConfig: # memory, cpu parameters for both requests and limits for oud instances resources: limits: cpu: \u0026#34;1\u0026#34; memory: \u0026#34;8Gi\u0026#34; requests: cpu: \u0026#34;500m\u0026#34; memory: \u0026#34;4Gi\u0026#34; rootUserPassword: \u0026lt;password\u0026gt; sampleData: \u0026#34;200\u0026#34; persistence: type: filesystem filesystem: hostPath: path: /scratch/shared/oud_user_projects cronJob: kubectlImage: repository: bitnami/kubectl tag: 1.26.6 pullPolicy: IfNotPresent imagePullSecrets: - name: dockercred The following caveats exist:\n Replace \u0026lt;password\u0026gt; with the relevant password.\n sampleData: \u0026quot;200\u0026quot; will load 200 sample users into the default baseDN dc=example,dc=com. If you do not want sample data, remove this entry. If sampleData is set to 1,000,000 users or greater, then you must add the following entries to the yaml file to prevent inconsistencies in dsreplication:\ndeploymentConfig: startupTime: 720 period: 120 timeout: 60 The \u0026lt;version\u0026gt; in kubectlImage tag: should be set to the same version as your Kubernetes version (kubectl version). For example if your Kubernetes version is 1.26.6 set to 1.26.6.\n If you are not using Oracle Container Registry or your own container registry for your OUD container image, then you can remove the following:\nimagePullSecrets: - name: orclcred If your cluster does not have access to the internet to pull external images, such as bitnami/kubectl or busybox, you must load the images in a local container registry. You must then set the following:\ncronJob: kubectlImage: repository: container-registry.example.com/bitnami/kubectl tag: 1.26.6 pullPolicy: IfNotPresent busybox: image: container-registry.example.com/busybox If using NFS for your persistent volume then change the persistence section as follows:\nNote: If you want to use NFS you should ensure that you have a default Kubernetes storage class defined for your environment that allows network storage.\nFor more information on storage classes, see Storage Classes.\npersistence: type: networkstorage networkstorage: nfs: path: \u0026lt;persistent_volume\u0026gt;/oud_user_projects server: \u0026lt;NFS IP address\u0026gt; # if true, it will create the storageclass. if value is false, please provide existing storage class (storageClass) to be used. storageClassCreate: true storageClass: oud-sc # if storageClassCreate is true, please provide the custom provisioner if any to use. If you do not have a custom provisioner, delete this line, and it will use the default class kubernetes.io/is-default-class. provisioner: kubernetes.io/is-default-class The following caveats exist:\n If you want to create your own storage class, set storageClassCreate: true. If storageClassCreate: true it is recommended to set storageClass to a value of your choice, and provisioner to the provisioner supported by your cloud vendor. If you have an existing storageClass that supports network storage, set storageClassCreate: false and storageClass to the NAME value returned in \u0026ldquo;kubectl get storageclass\u0026rdquo;. The provisioner can be ignored. If using Block Device storage for your persistent volume then change the persistence section as follows:\nNote: If you want to use block devices you should ensure that you have a default Kubernetes storage class defined for your environment that allows dynamic storage. Each vendor has its own storage provider but it may not be configured to provide dynamic storage allocation.\nFor more information on storage classes, see Storage Classes.\npersistence: type: blockstorage # Specify Accessmode ReadWriteMany for NFS and for block ReadWriteOnce accessMode: ReadWriteOnce # if true, it will create the storageclass. if value is false, please provide existing storage class (storageClass) to be used. storageClassCreate: true storageClass: oud-sc # if storageClassCreate is true, please provide the custom provisioner if any to use or else it will use default. provisioner: oracle.com/oci The following caveats exist:\n If you want to create your own storage class, set storageClassCreate: true. If storageClassCreate: true it is recommended to set storageClass to a value of your choice, and provisioner to the provisioner supported by your cloud vendor. If you have an existing storageClass that supports dynamic storage, set storageClassCreate: false and storageClass to the NAME value returned in \u0026ldquo;kubectl get storageclass\u0026rdquo;. The provisioner can be ignored. For resources, limits and requests, the example CPU and memory values shown are for development environments only. For Enterprise Deployments, please review the performance recommendations and sizing requirements in Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster.\nNote: Limits and requests for CPU resources are measured in CPU units. One CPU in Kubernetes is equivalent to 1 vCPU/Core for cloud providers, and 1 hyperthread on bare-metal Intel processors. An \u0026ldquo;m\u0026rdquo; suffix in a CPU attribute indicates ‘milli-CPU’, so 500m is 50% of a CPU. Memory can be expressed in various units, where one Mi is one IEC unit mega-byte (1024^2), and one Gi is one IEC unit giga-byte (1024^3). For more information, see Resource Management for Pods and Containers, Assign Memory Resources to Containers and Pods, and Assign CPU Resources to Containers and Pods.\nNote: The parameters above are also utilized by the Kubernetes Horizontal Pod Autoscaler (HPA). For more details on HPA, see Kubernetes Horizontal Pod Autoscaler.\n If you plan on integrating OUD with other Oracle components then you must specify the following under the oudConfig: section:\n integration: \u0026lt;Integration option\u0026gt; For example: oudConfig: etc... integration: \u0026lt;Integration option\u0026gt; It is recommended to choose the option covering your minimal requirements. Allowed values include: `no-integration` (no integration), `basic` (Directory Integration Platform), `generic` (Directory Integration Platform, Database Net Services and E-Business Suite integration), `eus` (Directory Integration Platform, Database Net Services, E-Business Suite and Enterprise User Security integration). The default value is `no-integration` **Note**: This will enable the integration type only. To integrate OUD with the Oracle component referenced, refer to the relevant product component documentation. If you want to enable Assured Replication, see Enabling Assured Replication (Optional).\n Run the following command to deploy OUD:\n$ helm install --namespace \u0026lt;namespace\u0026gt; \\ --values oud-ds-rs-values-override.yaml \\ \u0026lt;release_name\u0026gt; oud-ds-rs For example:\n$ helm install --namespace oudns \\ --values oud-ds-rs-values-override.yaml \\ oud-ds-rs oud-ds-rs Check the OUD deployment as per Verify the OUD deployment and Verify the OUD replication.\n Using --set argument Navigate to the $WORKDIR/kubernetes/helm directory:\n$ cd $WORKDIR/kubernetes/helm Run the following command to create OUD instances:\n$ helm install --namespace \u0026lt;namespace\u0026gt; \\ --set oudConfig.rootUserPassword=\u0026lt;password\u0026gt; \\ --set persistence.filesystem.hostPath.path=\u0026lt;persistent_volume\u0026gt;/oud_user_projects \\ --set image.repository=\u0026lt;image_location\u0026gt;,image.tag=\u0026lt;image_tag\u0026gt; \\ --set oudConfig.sampleData=\u0026#34;200\u0026#34; \\ --set oudConfig.resources.limits.cpu=\u0026#34;1\u0026#34;,oudConfig.resources.limits.memory=\u0026#34;8Gi\u0026#34;,oudConfig.resources.requests.cpu=\u0026#34;500m\u0026#34;,oudConfig.resources.requests.memory=\u0026#34;4Gi\u0026#34; \\ --set cronJob.kubectlImage.repository=bitnami/kubectl,cronJob.kubectlImage.tag=\u0026lt;version\u0026gt; \\ --set cronJob.imagePullSecrets[0].name=\u0026#34;dockercred\u0026#34; \\ --set imagePullSecrets[0].name=\u0026#34;orclcred\u0026#34; \\ \u0026lt;release_name\u0026gt; oud-ds-rs For example:\n$ helm install --namespace oudns \\ --set oudConfig.rootUserPassword=\u0026lt;password\u0026gt; \\ --set persistence.filesystem.hostPath.path=/scratch/shared/oud_user_projects \\ --set image.repository=container-registry.oracle.com/middleware/oud_cpu,image.tag=12.2.1.4-jdk8-ol7-\u0026lt;October`23\u0026gt; \\ --set oudConfig.sampleData=\u0026#34;200\u0026#34; \\ --set oudConfig.resources.limits.cpu=\u0026#34;1\u0026#34;,oudConfig.resources.limits.memory=\u0026#34;8Gi\u0026#34;,oudConfig.resources.requests.cpu=\u0026#34;500m\u0026#34;,oudConfig.resources.requests.memory=\u0026#34;4Gi\u0026#34; \\ --set cronJob.kubectlImage.repository=bitnami/kubectl,cronJob.kubectlImage.tag=1.26.6 \\ --set cronJob.imagePullSecrets[0].name=\u0026#34;dockercred\u0026#34; \\ --set imagePullSecrets[0].name=\u0026#34;orclcred\u0026#34; \\ oud-ds-rs oud-ds-rs The following caveats exist:\n Replace \u0026lt;password\u0026gt; with a the relevant password.\n sampleData: \u0026quot;200\u0026quot; will load 200 sample users into the default baseDN dc=example,dc=com. If you do not want sample data, remove this entry. If sampleData is set to 1,000,000 users or greater, then you must add the following entries to the yaml file to prevent inconsistencies in dsreplication: --set deploymentConfig.startupTime=720,deploymentConfig.period=120,deploymentConfig.timeout=60.\n The \u0026lt;version\u0026gt; in kubectlImage tag: should be set to the same version as your Kubernetes version (kubectl version). For example if your Kubernetes version is 1.26.6 set to 1.26.6.\n If using using NFS for your persistent volume then use:\n--set persistence.networkstorage.nfs.path=\u0026lt;persistent_volume\u0026gt;/oud_user_projects,persistence.networkstorage.nfs.server:\u0026lt;NFS IP address\u0026gt;` \\ --set persistence.storageClassCreate=\u0026quot;true\u0026quot;,persistence.storageClass=\u0026quot;oud-sc\u0026quot;,persistence.provisioner=\u0026quot;kubernetes.io/is-default-class\u0026quot; \\ * If you want to create your own storage class, set `storageClassCreate: true`. If `storageClassCreate: true` it is recommended to set `storageClass` to a value of your choice, and `provisioner` to the provisioner supported by your cloud vendor. * If you have an existing storageClass that supports dynamic storage, set `storageClassCreate: false` and `storageClass` to the NAME value returned in \u0026quot;`kubectl get storageclass`\u0026quot;. The `provisioner` can be ignored. If using using block storage for your persistent volume then use:\n--set persistence.type=\u0026quot;blockstorage\u0026quot;,persistence.accessMode=\u0026quot;ReadWriteOnce\u0026quot; \\ --set persistence.storageClassCreate=\u0026quot;true\u0026quot;,persistence.storageClass=\u0026quot;oud-sc\u0026quot;,persistence.provisioner=\u0026quot;oracle.com/oci\u0026quot; \\ * If you want to create your own storage class, set `storageClassCreate: true`. If `storageClassCreate: true` it is recommended to set `storageClass` to a value of your choice, and `provisioner` to the provisioner supported by your cloud vendor. * If you have an existing storageClass that supports dynamic storage, set `storageClassCreate: false` and `storageClass` to the NAME value returned in \u0026quot;`kubectl get storageclass`\u0026quot;. The `provisioner` can be ignored. If you are not using Oracle Container Registry or your own container registry for your OUD container image, then you can remove the following: --set imagePullSecrets[0].name=\u0026quot;orclcred\u0026quot;.\n For resources, limits and `requests1, the example CPU and memory values shown are for development environments only. For Enterprise Deployments, please review the performance recommendations and sizing requirements in Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster.\nNote: Limits and requests for CPU resources are measured in CPU units. One CPU in Kubernetes is equivalent to 1 vCPU/Core for cloud providers, and 1 hyperthread on bare-metal Intel processors. An \u0026ldquo;m\u0026rdquo; suffix in a CPU attribute indicates ‘milli-CPU’, so 500m is 50% of a CPU. Memory can be expressed in various units, where one Mi is one IEC unit mega-byte (1024^2), and one Gi is one IEC unit giga-byte (1024^3). For more information, see Resource Management for Pods and Containers, Assign Memory Resources to Containers and Pods, and Assign CPU Resources to Containers and Pods.\nNote: The parameters above are also utilized by the Kubernetes Horizontal Pod Autoscaler (HPA). For more details on HPA, see Kubernetes Horizontal Pod Autoscaler.\n If you plan on integrating OUD with other Oracle components then you must specify the following:\n--set oudConfig.integration=\u0026lt;Integration option\u0026gt; It is recommended to choose the option covering your minimal requirements. Allowed values include: `no-integration` (no integration), `basic` (Directory Integration Platform), `generic` (Directory Integration Platform, Database Net Services and E-Business Suite integration), `eus` (Directory Integration Platform, Database Net Services, E-Business Suite and Enterprise User Security integration). The default value is `no-integration` **Note**: This will enable the integration type only. To integrate OUD with the Oracle component referenced, refer to the relevant product component documentation. If you want to enable Assured Replication, see Enabling Assured Replication (Optional).\n Check the OUD deployment as per Verify the OUD deployment and Verify the OUD replication.\n Enabling Assured Replication (Optional) If you want to enable assured replication, perform the following steps:\n Create a directory on the persistent volume as follows:\n$ cd \u0026lt;persistent_volume\u0026gt; $ mkdir oud-repl-config $ sudo chown -R 1000:0 oud-repl-config For example:\n$ cd /scratch/shared $ mkdir oud-repl-config $ sudo chown -R 1000:0 oud-repl-config Add the following section in the oud-ds-rs-values-override.yaml:\nreplOUD: envVars: - name: post_dsreplication_dsconfig_3 value: set-replication-domain-prop --domain-name ${baseDN} --advanced --set assured-type:safe-read --set assured-sd-level:2 --set assured-timeout:5s - name: execCmd_1 value: /u01/oracle/user_projects/${OUD_INSTANCE_NAME}/OUD/bin/dsconfig --no-prompt --hostname ${sourceHost} --port ${adminConnectorPort} --bindDN \u0026quot;${rootUserDN}\u0026quot; --bindPasswordFile /u01/oracle/user_projects/${OUD_INSTANCE_NAME}/admin/rootPwdFile.txt --trustAll set-replication-domain-prop --domain-name ${baseDN} --advanced --set assured-type:safe-read --set assured-sd-level:2 --set assured-timeout:5s --provider-name \u0026quot;Multimaster Synchronization\u0026quot; configVolume: enabled: true type: networkstorage storageClassCreate: true storageClass: oud-config provisioner: kubernetes.io/is-default-class networkstorage: nfs: server: \u0026lt;IP_address\u0026gt; path: \u0026lt;persistent_volume\u0026gt;/oud-repl-config mountPath: /u01/oracle/config-input For more information on OUD Assured Replication, and other options and levels, see, Understanding the Oracle Unified Directory Replication Model.\nThe following caveats exist:\n post_dsreplication_dsconfig_N and execCmd_N should be a unique key - change the suffix accordingly. For more information on the environment variable and respective keys, see, Appendix B: Environment Variables.\n For configVolume the storage can be networkstorage(nfs) or filesystem(hostPath) as the config volume path has to be accessible from all the Kuberenetes nodes. Please note that block storage is not supported for configVolume.\n If you want to create your own storage class, set storageClassCreate: true. If storageClassCreate: true it is recommended to set storageClass to a value of your choice, and provisioner to the provisioner supported by your cloud vendor.\n If you have an existing storageClass that supports network storage, set storageClassCreate: false and storageClass to the NAME value returned in \u0026ldquo;kubectl get storageclass\u0026rdquo;. Please note that the storage-class should not be the one you used for the persistent volume earlier. The provisioner can be ignored.\n Helm command output In all the examples above, the following output is shown following a successful execution of the helm install command.\nNAME: oud-ds-rs LAST DEPLOYED: \u0026lt;DATE\u0026gt; NAMESPACE: oudns STATUS: deployed REVISION: 4 NOTES: # # Copyright (c) 2020, Oracle and/or its affiliates. # # Licensed under the Universal Permissive License v 1.0 as shown at # https://oss.oracle.com/licenses/upl # # Since \u0026#34;nginx\u0026#34; has been chosen, follow the steps below to configure nginx ingress controller. Add Repo reference to helm for retriving/installing Chart for nginx-ingress implementation. command-# helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx Command helm install to install nginx-ingress related objects like pod, service, deployment, etc. # helm install --namespace \u0026lt;namespace for ingress\u0026gt; --values nginx-ingress-values-override.yaml lbr-nginx ingress-nginx/ingress-nginx For details of content of nginx-ingress-values-override.yaml refer README.md file of this chart. Run these commands to check port mapping and services: # kubectl --namespace \u0026lt;namespace for ingress\u0026gt; get services -o wide -w lbr-nginx-ingress-controller # kubectl describe --namespace \u0026lt;namespace for oud-ds-rs chart\u0026gt; ingress.extensions/oud-ds-rs-http-ingress-nginx # kubectl describe --namespace \u0026lt;namespace for oud-ds-rs chart\u0026gt; ingress.extensions/oud-ds-rs-admin-ingress-nginx Accessible interfaces through ingress: (External IP Address for LoadBalancer NGINX Controller can be determined through details associated with lbr-nginx-ingress-controller) 1. OUD Admin REST: Port: http/https 2. OUD Data REST: Port: http/https 3. OUD Data SCIM: Port: http/https 4. OUD LDAP/LDAPS: Port: ldap/ldaps 5. OUD Admin LDAPS: Port: ldaps Please refer to README.md from Helm Chart to find more details about accessing interfaces and configuration parameters. Verify the OUD deployment Run the following command to verify the OUD deployment:\n$ kubectl --namespace \u0026lt;namespace\u0026gt; get pod,service,secret,pv,pvc,ingress -o wide For example:\n$ kubectl --namespace oudns get pod,service,secret,pv,pvc,ingress -o wide The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/oud-ds-rs-0 1/1 Running 0 14m 10.244.1.180 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oud-ds-rs-1 1/1 Running 0 8m26s 10.244.1.181 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oud-ds-rs-2 0/1 Running 0 2m24s 10.244.1.182 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oud-pod-cron-job-27586680-p5d8q 0/1 Completed 0 50s 10.244.1.183 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR service/oud-ds-rs ClusterIP None \u0026lt;none\u0026gt; 1444/TCP,1888/TCP,1389/TCP,1636/TCP,1080/TCP,1081/TCP,1898/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs service/oud-ds-rs-0 ClusterIP None \u0026lt;none\u0026gt; 1444/TCP,1888/TCP,1898/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-0 service/oud-ds-rs-1 ClusterIP None \u0026lt;none\u0026gt; 1444/TCP,1888/TCP,1898/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-1 service/oud-ds-rs-2 ClusterIP None \u0026lt;none\u0026gt; 1444/TCP,1888/TCP,1898/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-2 service/oud-ds-rs-http-0 ClusterIP 10.104.112.93 \u0026lt;none\u0026gt; 1080/TCP,1081/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-0 service/oud-ds-rs-http-1 ClusterIP 10.103.105.70 \u0026lt;none\u0026gt; 1080/TCP,1081/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-1 service/oud-ds-rs-http-2 ClusterIP 10.110.160.107 \u0026lt;none\u0026gt; 1080/TCP,1081/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-2 service/oud-ds-rs-lbr-admin ClusterIP 10.99.238.222 \u0026lt;none\u0026gt; 1888/TCP,1444/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs service/oud-ds-rs-lbr-http ClusterIP 10.101.250.196 \u0026lt;none\u0026gt; 1080/TCP,1081/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs service/oud-ds-rs-lbr-ldap ClusterIP 10.104.149.90 \u0026lt;none\u0026gt; 1389/TCP,1636/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs service/oud-ds-rs-ldap-0 ClusterIP 10.109.255.221 \u0026lt;none\u0026gt; 1389/TCP,1636/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-0 service/oud-ds-rs-ldap-1 ClusterIP 10.111.135.142 \u0026lt;none\u0026gt; 1389/TCP,1636/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-1 service/oud-ds-rs-ldap-2 ClusterIP 10.100.8.145 \u0026lt;none\u0026gt; 1389/TCP,1636/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-2 NAME TYPE DATA AGE secret/dockercred kubernetes.io/dockerconfigjson 1 4h24m secret/orclcred kubernetes.io/dockerconfigjson 1 14m secret/oud-ds-rs-creds opaque 8 14m secret/oud-ds-rs-tls-cert kubernetes.io/tls 2 14m secret/sh.helm.release.v1.oud-ds-rs.v1 helm.sh/release.v1 1 14m NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE VOLUMEMODE persistentvolume/oud-ds-rs-pv 20Gi RWX Delete Bound oudns/oud-ds-rs-pvc manual 14m Filesystem NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE VOLUMEMODE persistentvolumeclaim/oud-ds-rs-pvc Bound oud-ds-rs-pv 20Gi RWX manual 14m Filesystem NAME CLASS HOSTS ADDRESS PORTS AGE ingress.networking.k8s.io/oud-ds-rs-admin-ingress-nginx \u0026lt;none\u0026gt; oud-ds-rs-admin-0,oud-ds-rs-admin-0,oud-ds-rs-admin-1 + 3 more... 80, 443 14m ingress.networking.k8s.io/oud-ds-rs-http-ingress-nginx \u0026lt;none\u0026gt; oud-ds-rs-http-0,oud-ds-rs-http-1,oud-ds-rs-http-2 + 3 more... 80, 443 14m Note: If you are using block storage you will see slightly different entries for PV and PVC, for example:\nNAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE VOLUMEMODE persistentvolume/ocid1.volume.oc1.iad.\u0026lt;unique_ID\u0026gt; 50Gi RWO Delete Bound oudns/oud-ds-rs-pv-oud-ds-rs-2 oud-sc 60m Filesystem persistentvolume/ocid1.volume.oc1.iad.\u0026lt;unique_ID\u0026gt; 50Gi RWO Delete Bound oudns/oud-ds-rs-pv-oud-ds-rs-1 oud-sc 67m Filesystem persistentvolume/ocid1.volume.oc1.iad.\u0026lt;unique_ID\u0026gt; 50Gi RWO Delete Bound oudns/oud-ds-rs-pv-oud-ds-rs-3 oud-sc 45m Filesystem NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE VOLUMEMODE persistentvolumeclaim/oud-ds-rs-pv-oud-ds-rs-1 Bound ocid1.volume.oc1.iad.\u0026lt;unique_ID\u0026gt; 50Gi RWO oud-sc 67m Filesystem persistentvolumeclaim/oud-ds-rs-pv-oud-ds-rs-2 Bound ocid1.volume.oc1.iad.\u0026lt;unique_ID\u0026gt; 50Gi RWO oud-sc 60m Filesystem persistentvolumeclaim/oud-ds-rs-pv-oud-ds-rs-3 Bound ocid1.volume.oc1.iad.\u0026lt;unique_ID\u0026gt; 50Gi RWO oud-sc 45m Filesystem Note: Initially pod/oud-ds-rs-0 will appear with a STATUS of 0/1 and it will take approximately 5 minutes before OUD is started (1/1). Once pod/oud-ds-rs-0 has a STATUS of 1/1, pod/oud-ds-rs-1 will appear with a STATUS of 0/1. Once pod/oud-ds-rs-1 is started (1/1), pod/oud-ds-rs-2 will appear. It will take around 15 minutes for all the pods to fully started.\nWhile the oud-ds-rs pods have a STATUS of 0/1 the pod is running but OUD server associated with it is currently starting. While the pod is starting you can check the startup status in the pod logs, by running the following command:\n$ kubectl logs \u0026lt;pod\u0026gt; -n oudns For example:\n$ kubectl logs oud-ds-rs-0 -n oudns Note : If the OUD deployment fails additionally refer to Troubleshooting for instructions on how describe the failing pod(s). Once the problem is identified follow Undeploy an OUD deployment to clean down the deployment before deploying again.\nKubernetes Objects Kubernetes objects created by the Helm chart are detailed in the table below:\n Type Name Example Name Purpose Service Account \u0026lt;deployment/release name\u0026gt; oud-ds-rs Kubernetes Service Account for the Helm Chart deployment Secret \u0026lt;deployment/release name\u0026gt;-creds oud-ds-rs-creds Secret object for Oracle Unified Directory related critical values like passwords Persistent Volume \u0026lt;deployment/release name\u0026gt;-pv oud-ds-rs-pv Persistent Volume for user_projects mount. Persistent Volume Claim \u0026lt;deployment/release name\u0026gt;-pvc oud-ds-rs-pvc Persistent Volume Claim for user_projects mount. Persistent Volume \u0026lt;deployment/release name\u0026gt;-pv-config oud-ds-rs-pv-config Persistent Volume for mounting volume in containers for configuration files like ldif, schema, jks, java.security, etc. Persistent Volume Claim \u0026lt;deployment/release name\u0026gt;-pvc-config oud-ds-rs-pvc-config Persistent Volume Claim for mounting volume in containers for configuration files like ldif, schema, jks, java.security, etc. Pod \u0026lt;deployment/release name\u0026gt;-0 oud-ds-rs-0 Pod/Container for base Oracle Unified Directory Instance which would be populated first with base configuration (like number of sample entries) Pod \u0026lt;deployment/release name\u0026gt;-N oud-ds-rs-1, oud-ds-rs-2, \u0026hellip; Pod(s)/Container(s) for Oracle Unified Directory Instances - each would have replication enabled against base Oracle Unified Directory instance \u0026lt;deployment/release name\u0026gt;-0 Service \u0026lt;deployment/release name\u0026gt;-0 oud-ds-rs-0 Service for LDAPS Admin, REST Admin and Replication interfaces from base Oracle Unified Directory instance \u0026lt;deployment/release name\u0026gt;-0 Service \u0026lt;deployment/release name\u0026gt;-http-0 oud-ds-rs-http-0 Service for HTTP and HTTPS interfaces from base Oracle Unified Directory instance \u0026lt;deployment/release name\u0026gt;-0 Service \u0026lt;deployment/release name\u0026gt;-ldap-0 oud-ds-rs-ldap-0 Service for LDAP and LDAPS interfaces from base Oracle Unified Directory instance \u0026lt;deployment/release name\u0026gt;-0 Service \u0026lt;deployment/release name\u0026gt;-N oud-ds-rs-1, oud-ds-rs-2, \u0026hellip; Service(s) for LDAPS Admin, REST Admin and Replication interfaces from base Oracle Unified Directory instance \u0026lt;deployment/release name\u0026gt;-N Service \u0026lt;deployment/release name\u0026gt;-http-N oud-ds-rs-http-1, oud-ds-rs-http-2, \u0026hellip; Service(s) for HTTP and HTTPS interfaces from base Oracle Unified Directory instance \u0026lt;deployment/release name\u0026gt;-N Service \u0026lt;deployment/release name\u0026gt;-ldap-N oud-ds-rs-ldap-1, oud-ds-rs-ldap-2, \u0026hellip; Service(s) for LDAP and LDAPS interfaces from base Oracle Unified Directory instance \u0026lt;deployment/release name\u0026gt;-N Service \u0026lt;deployment/release name\u0026gt;-lbr-admin oud-ds-rs-lbr-admin Service for LDAPS Admin, REST Admin and Replication interfaces from all Oracle Unified Directory instances Service \u0026lt;deployment/release name\u0026gt;-lbr-http oud-ds-rs-lbr-http Service for HTTP and HTTPS interfaces from all Oracle Unified Directory instances Service \u0026lt;deployment/release name\u0026gt;-lbr-ldap oud-ds-rs-lbr-ldap Service for LDAP and LDAPS interfaces from all Oracle Unified Directory instances Ingress \u0026lt;deployment/release name\u0026gt;-admin-ingress-nginx oud-ds-rs-admin-ingress-nginx Ingress Rules for HTTP Admin interfaces. Ingress \u0026lt;deployment/release name\u0026gt;-http-ingress-nginx oud-ds-rs-http-ingress-nginx Ingress Rules for HTTP (Data/REST) interfaces. In the table above the \u0026lsquo;Example Name\u0026rsquo; for each Object is based on the value \u0026lsquo;oud-ds-rs\u0026rsquo; as deployment/release name for the Helm chart installation. Verify the OUD replication Once all the PODs created are visible as READY (i.e. 1/1), you can verify your replication across multiple Oracle Unified Directory instances.\n To verify the replication group, connect to the container and issue an OUD administration command to show the details. The name of the container can be found by issuing the following:\n$ kubectl get pods -n \u0026lt;namespace\u0026gt; -o jsonpath=\u0026#39;{.items[*].spec.containers[*].name}\u0026#39; For example:\n$ kubectl get pods -n oudns -o jsonpath=\u0026#39;{.items[*].spec.containers[*].name}\u0026#39; The output will look similar to the following:\noud-ds-rs oud-ds-rs oud-ds-rs Once you have the container name you can verify the replication status in the following ways:\n Run dresplication inside the pod Using kubectl commands Run dresplication inside the pod Run the following command to create a bash shell in the pod:\n$ kubectl --namespace \u0026lt;namespace\u0026gt; exec -it -c \u0026lt;containername\u0026gt; \u0026lt;podname\u0026gt; -- bash For example:\n$ kubectl --namespace oudns exec -it -c oud-ds-rs oud-ds-rs-0 -- bash This will take you into the pod:\n[oracle@oud-ds-rs-0 oracle]$ From the prompt, use the dsreplication command to check the status of your replication group:\n$ cd /u01/oracle/user_projects/oud-ds-rs-0/OUD/bin $ ./dsreplication status --trustAll \\ --hostname oud-ds-rs-0 --port 1444 --adminUID admin \\ --dataToDisplay compat-view --dataToDisplay rs-connections The output will look similar to the following. Enter credentials where prompted:\n\u0026gt;\u0026gt;\u0026gt;\u0026gt; Specify Oracle Unified Directory LDAP connection parameters Password for user \u0026#39;admin\u0026#39;: Establishing connections and reading configuration ..... Done. dc=example,dc=com - Replication Enabled ======================================= Server : Entries : M.C. [1] : A.O.M.C. [2] : Port [3] : Encryption [4] : Trust [5] : U.C. [6] : Status [7] : ChangeLog [8] : Group ID [9] : Connected To [10] ---------------------:---------:----------:--------------:----------:----------------:-----------:----------:------------:---------------:--------------:------------------------------- oud-ds-rs-0:1444 : 202 : 0 : 0 : 1898 : Disabled : Trusted : -- : Normal : Enabled : 1 : oud-ds-rs-0:1898 : : : : : : : : : : : (GID=1) oud-ds-rs-1:1444 : 202 : 0 : 0 : 1898 : Disabled : Trusted : -- : Normal : Enabled : 1 : oud-ds-rs-1:1898 : : : : : : : : : : : (GID=1) oud-ds-rs-2:1444 : 202 : 0 : 0 : 1898 : Disabled : Trusted : -- : Normal : Enabled : 1 : oud-ds-rs-2:1898 : : : : : : : : : : : (GID=1) Replication Server [11] : RS #1 : RS #2 : RS #3 -------------------------------:-------:-------:------ oud-ds-rs-0:1898 : -- : Yes : Yes (#1) : : : oud-ds-rs-1:1898 : Yes : -- : Yes (#2) : : : oud-ds-rs-2:1898 : Yes : Yes : -- (#3) : : : [1] The number of changes that are still missing on this element (and that have been applied to at least one other server). [2] Age of oldest missing change: the age (in seconds) of the oldest change that has not yet arrived on this element. [3] The replication port used to communicate between the servers whose contents are being replicated. [4] Whether the replication communication initiated by this element is encrypted or not. [5] Whether the directory server is trusted or not. Updates coming from an untrusted server are discarded and not propagated. [6] The number of untrusted changes. These are changes generated on this server while it is untrusted. Those changes are not propagated to the rest of the topology but are effective on the untrusted server. [7] The status of the replication on this element. [8] Whether the external change log is enabled for the base DN on this server or not. [9] The ID of the replication group to which the server belongs. [10] The replication server this server is connected to with its group ID between brackets. [11] This table represents the connections between the replication servers. The headers of the columns use a number as identifier for each replication server. See the values of the first column to identify the corresponding replication server for each number. Type exit to exit the pod.\n Using kubectl commands The dsreplication status command can be invoked using the following kubectl command:\n$ kubectl --namespace \u0026lt;namespace\u0026gt; exec -it -c \u0026lt;containername\u0026gt; \u0026lt;podname\u0026gt; -- \\ /u01/oracle/user_projects/\u0026lt;OUD Instance/Pod Name\u0026gt;/OUD/bin/dsreplication status \\ --trustAll --hostname \u0026lt;OUD Instance/Pod Name\u0026gt; --port 1444 --adminUID admin \\ --dataToDisplay compat-view --dataToDisplay rs-connections For example:\n$ kubectl --namespace oudns exec -it -c oud-ds-rs oud-ds-rs-0 -- \\ /u01/oracle/user_projects/oud-ds-rs-0/OUD/bin/dsreplication status \\ --trustAll --hostname oud-ds-rs-0 --port 1444 --adminUID admin \\ --dataToDisplay compat-view --dataToDisplay rs-connections The output will be the same as per Run dresplication inside the pod.\n Verify OUD assured replication status Note: This section only needs to be followed if you enabled assured replication as per Enabling Assured Replication (Optional).\n Run the following command to create a bash shell in the pod:\n$ kubectl --namespace \u0026lt;namespace\u0026gt; exec -it -c \u0026lt;containername\u0026gt; \u0026lt;podname\u0026gt; -- bash For example:\n$ kubectl --namespace oudns exec -it -c oud-ds-rs oud-ds-rs-0 -- bash This will take you into the pod:\n[oracle@oud-ds-rs-0 oracle]$ At the prompt, enter the following commands:\n$ echo $bindPassword1 \u0026gt; /tmp/pwd.txt $ /u01/oracle/user_projects/${OUD_INSTANCE_NAME}/OUD/bin/dsconfig --no-prompt --hostname ${OUD_INSTANCE_NAME} --port ${adminConnectorPort} --bindDN \u0026#34;${rootUserDN}\u0026#34; --bindPasswordFile /tmp/pwd.txt --trustAll get-replication-domain-prop --domain-name ${baseDN} --advanced --property assured-type --property assured-sd-level --property assured-timeout --provider-name \u0026#34;Multimaster Synchronization\u0026#34; The output will look similar to the following:\nProperty : Value(s) -----------------:---------- assured-sd-level : 2 assured-timeout : 5 s assured-type : safe-read Verify the cronjob Run the following command to make sure the cronjob is created:\n$ kubectl get cronjob -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl get cronjob -n oudns The output will look similar to the following:\nNAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE oud-pod-cron-job */30 * * * * False 0 5m18s 19m Run the following command to make sure the job(s) is created:\n$ kubectl get job -n \u0026lt;namespace\u0026gt; -o wide For example:\n$ kubectl get job -n oudns -o wide The output will look similar to the following:\nNAME COMPLETIONS DURATION AGE CONTAINERS IMAGES SELECTOR oud-pod-cron-job-27586680 1/1 1s 5m36s cron-kubectl bitnami/kubectl:1.26.6 controller-uid=700ab9f7-6094-488a-854d-f1b914de5f61 Disabling the cronjob If you need to disable the job, for example if maintenance needs to be performed on the node, you can disable the job as follows:\n Run the following command to edit the cronjob:\n$ kubectl edit cronjob pod-cron-job -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl edit cronjob oud-pod-cron-job -n oudns Note: This opens an edit session for the cronjob where parameters can be changed using standard vi commands.\n In the edit session search for suspend and change the vaule from false to true:\n... schedule: '*/30 * * * *' successfulJobsHistoryLimit: 3 suspend: true ... Save the file and exit (wq!).\n Run the following to make sure the cronjob is suspended:\n$ kubectl get cronjob -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl get cronjob -n oudns The output will look similar to the following:\nNAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE oud-pod-cron-job */30 * * * * True 0 7m47s 21m To enable the cronjob again, repeat the above steps and set suspend to false.\n Ingress Configuration With an OUD instance now deployed you are now ready to configure an ingress controller to direct traffic to OUD as per Configure an ingress for an OUD.\nUndeploy an OUD deployment Delete the OUD deployment Find the deployment release name:\n$ helm --namespace \u0026lt;namespace\u0026gt; list For example:\n$ helm --namespace oudns list The output will look similar to the following:\nNAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION oud-ds-rs oudns 1 \u0026lt;DATE\u0026gt; deployed oud-ds-rs-0.2 12.2.1.4.0 Delete the deployment using the following command:\n$ helm uninstall --namespace \u0026lt;namespace\u0026gt; \u0026lt;release\u0026gt; For example:\n$ helm uninstall --namespace oudns oud-ds-rs release \u0026#34;oud-ds-rs\u0026#34; uninstalled Run the following command to view the status:\n$ kubectl --namespace oudns get pod,service,secret,pv,pvc,ingress -o wide Initially the pods and persistent volume (PV) and persistent volume claim (PVC) will move to a Terminating status:\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/oud-ds-rs-0 1/1 Terminating 0 24m 10.244.1.180 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oud-ds-rs-1 1/1 Terminating 0 18m 10.244.1.181 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oud-ds-rs-2 1/1 Terminating 0 12m 10.244.1.182 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; NAME TYPE DATA AGE secret/default-token-msmmd kubernetes.io/service-account-token 3 3d20h secret/dockercred kubernetes.io/dockerconfigjson 1 3d20h secret/orclcred kubernetes.io/dockerconfigjson 1 3d20h NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE VOLUMEMODE persistentvolume/oud-ds-rs-pv 20Gi RWX Delete Terminating oudns/oud-ds-rs-pvc manual 24m Filesystem NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE VOLUMEMODE persistentvolumeclaim/oud-ds-rs-pvc Terminating oud-ds-rs-pv 20Gi RWX manual 24m Filesystem Run the command again until the pods, PV and PVC disappear.\n If the PV or PVC\u0026rsquo;s don\u0026rsquo;t delete, remove them manually:\n$ kubectl delete pvc oud-ds-rs-pvc -n oudns $ kubectl delete pv oud-ds-rs-pv -n oudns Note: If using blockstorage, you will see a PV and PVC for each pod. Delete all of the PVC\u0026rsquo;s and PV\u0026rsquo;s using the above commands.\n Delete the persistent volume contents Note: The steps below are not relevant for block storage.\n Delete the contents of the oud_user_projects directory in the persistent volume:\n$ cd \u0026lt;persistent_volume\u0026gt;/oud_user_projects $ rm -rf * For example:\n$ cd /scratch/shared/oud_user_projects $ rm -rf * Appendix A: Configuration Parameters The following table lists the configurable parameters of the oud-ds-rs chart and their default values.\n Parameter Description Default Value replicaCount Number of DS+RS instances/pods/services to be created with replication enabled against a base Oracle Unified Directory instance/pod. 3 restartPolicyName restartPolicy to be configured for each POD containing Oracle Unified Directory instance OnFailure image.repository Oracle Unified Directory Image Registry/Repository and name. Based on this, image parameter would be configured for Oracle Unified Directory pods/containers oracle/oud image.tag Oracle Unified Directory Image Tag. Based on this, image parameter would be configured for Oracle Unified Directory pods/containers 12.2.1.4.0 image.pullPolicy policy to pull the image IfnotPresent imagePullSecrets.name name of Secret resource containing private registry credentials regcred nameOverride override the fullname with this name fullnameOverride Overrides the fullname with the provided string serviceAccount.create Specifies whether a service account should be created true serviceAccount.name If not set and create is true, a name is generated using the fullname template oud-ds-rs-\u0026lt; fullname \u0026gt;-token-\u0026lt; randomalphanum \u0026gt; podSecurityContext Security context policies to add to the controller pod securityContext Security context policies to add by default service.type type of controller service to create ClusterIP nodeSelector node labels for pod assignment tolerations node taints to tolerate affinity node/pod affinities ingress.enabled true ingress.type Supported value: nginx nginx ingress.nginx.http.host Hostname to be used with Ingress Rules. If not set, hostname would be configured according to fullname. Hosts would be configured as \u0026lt; fullname \u0026gt;-http.\u0026lt; domain \u0026gt;, \u0026lt; fullname \u0026gt;-http-0.\u0026lt; domain \u0026gt;, \u0026lt; fullname \u0026gt;-http-1.\u0026lt; domain \u0026gt;, etc. ingress.nginx.http.domain Domain name to be used with Ingress Rules. In ingress rules, hosts would be configured as \u0026lt; host \u0026gt;.\u0026lt; domain \u0026gt;, \u0026lt; host \u0026gt;-0.\u0026lt; domain \u0026gt;, \u0026lt; host \u0026gt;-1.\u0026lt; domain \u0026gt;, etc. ingress.nginx.http.backendPort http ingress.nginx.http.nginxAnnotations { kubernetes.io/ingress.class: \u0026ldquo;nginx\u0026quot;} ingress.nginx.admin.host Hostname to be used with Ingress Rules. If not set, hostname would be configured according to fullname. Hosts would be configured as \u0026lt; fullname \u0026gt;-admin.\u0026lt; domain \u0026gt;, \u0026lt; fullname \u0026gt;-admin-0.\u0026lt; domain \u0026gt;, \u0026lt; fullname \u0026gt;-admin-1.\u0026lt; domain \u0026gt;, etc. ingress.nginx.admin.domain Domain name to be used with Ingress Rules. In ingress rules, hosts would be configured as \u0026lt; host \u0026gt;.\u0026lt; domain \u0026gt;, \u0026lt; host \u0026gt;-0.\u0026lt; domain \u0026gt;, \u0026lt; host \u0026gt;-1.\u0026lt; domain \u0026gt;, etc. ingress.nginx.admin.nginxAnnotations { kubernetes.io/ingress.class: \u0026ldquo;nginx\u0026rdquo; nginx.ingress.kubernetes.io/backend-protocol: \u0026ldquo;https\u0026quot;} ingress.ingress.tlsSecret Secret name to use an already created TLS Secret. If such secret is not provided, one would be created with name \u0026lt; fullname \u0026gt;-tls-cert. If the TLS Secret is in different namespace, name can be mentioned as \u0026lt; namespace \u0026gt;/\u0026lt; tlsSecretName \u0026gt; ingress.certCN Subject\u0026rsquo;s common name (cn) for SelfSigned Cert. \u0026lt; fullname \u0026gt; ingress.certValidityDays Validity of Self-Signed Cert in days 365 secret.enabled If enabled it will use the secret created with base64 encoding. if value is false, secret would not be used and input values (through \u0026ndash;set, \u0026ndash;values, etc.) would be used while creation of pods. true secret.name secret name to use an already created Secret oud-ds-rs-\u0026lt; fullname \u0026gt;-creds secret.type Specifies the type of the secret Opaque persistence.enabled If enabled, it will use the persistent volume. if value is false, PV and PVC would not be used and pods would be using the default emptyDir mount volume. true persistence.pvname pvname to use an already created Persistent Volume , If blank will use the default name oud-ds-rs-\u0026lt; fullname \u0026gt;-pv persistence.pvcname pvcname to use an already created Persistent Volume Claim , If blank will use default name oud-ds-rs-\u0026lt; fullname \u0026gt;-pvc persistence.type supported values: either filesystem or networkstorage or blockstorage or custom filesystem persistence.filesystem.hostPath.path The path location mentioned should be created and accessible from the local host provided with necessary privileges for the user. /scratch/shared/oud_user_projects persistence.networkstorage.nfs.path Path of NFS Share location /scratch/shared/oud_user_projects persistence.networkstorage.nfs.server IP or hostname of NFS Server 0.0.0.0 persistence.custom.* Based on values/data, YAML content would be included in PersistenceVolume Object persistence.accessMode Specifies the access mode of the location provided. ReadWriteMany for Filesystem/NFS, ReadWriteOnce for block storage. ReadWriteMany persistence.size Specifies the size of the storage 10Gi persistence.storageClassCreate if true, it will create the storageclass. if value is false, please provide existing storage class (storageClass) to be used. empty persistence.storageClass Specifies the storageclass of the persistence volume. empty persistence.provisioner If storageClassCreate is true, provide the custom provisioner if any . kubernetes.io/is-default-class persistence.annotations specifies any annotations that will be used { } configVolume.enabled If enabled, it will use the persistent volume. If value is false, PV and PVC would not be used and pods would be using the default emptyDir mount volume. true configVolume.mountPath If enabled, it will use the persistent volume. If value is false, PV and PVC would not be used and there would not be any mount point available for config false configVolume.pvname pvname to use an already created Persistent Volume , If blank will use the default name oud-ds-rs-\u0026lt; fullname \u0026gt;-pv-config configVolume.pvcname pvcname to use an already created Persistent Volume Claim , If blank will use default name oud-ds-rs-\u0026lt; fullname \u0026gt;-pvc-config configVolume.type supported values: either filesystem or networkstorage or custom filesystem configVolume.filesystem.hostPath.path The path location mentioned should be created and accessible from the local host provided with necessary privileges for the user. /scratch/shared/oud_user_projects configVolume.networkstorage.nfs.path Path of NFS Share location /scratch/shared/oud_config configVolume.networkstorage.nfs.server IP or hostname of NFS Server 0.0.0.0 configVolume.custom.* Based on values/data, YAML content would be included in PersistenceVolume Object configVolume.accessMode Specifies the access mode of the location provided ReadWriteMany configVolume.size Specifies the size of the storage 10Gi configVolume.storageClass Specifies the storageclass of the persistence volume. empty configVolume.annotations Specifies any annotations that will be used { } configVolume.storageClassCreate If true, it will create the storageclass. if value is false, provide existing storage class (storageClass) to be used. true configVolume.provisioner If configVolume.storageClassCreate is true, please provide the custom provisioner if any. kubernetes.io/is-default-class oudPorts.adminldaps Port on which Oracle Unified Directory Instance in the container should listen for Administration Communication over LDAPS Protocol 1444 oudPorts.adminhttps Port on which Oracle Unified Directory Instance in the container should listen for Administration Communication over HTTPS Protocol. 1888 oudPorts.ldap Port on which Oracle Unified Directory Instance in the container should listen for LDAP Communication. 1389 oudPorts.ldaps Port on which Oracle Unified Directory Instance in the container should listen for LDAPS Communication. 1636 oudPorts.http Port on which Oracle Unified Directory Instance in the container should listen for HTTP Communication. 1080 oudPorts.https Port on which Oracle Unified Directory Instance in the container should listen for HTTPS Communication. 1081 oudPorts.replication Port value to be used while setting up replication server. 1898 oudConfig.baseDN BaseDN for Oracle Unified Directory Instances dc=example,dc=com oudConfig.rootUserDN Root User DN for Oracle Unified Directory Instances cn=Directory Manager oudConfig.rootUserPassword Password for Root User DN RandomAlphanum oudConfig.sampleData To specify that the database should be populated with the specified number of sample entries. 0 oudConfig.sleepBeforeConfig Based on the value for this parameter, initialization/configuration of each Oracle Unified Directory replica would be delayed. 120 oudConfig.adminUID AdminUID to be configured with each replicated Oracle Unified Directory instance admin oudConfig.adminPassword Password for AdminUID. If the value is not passed, value of rootUserPassword would be used as password for AdminUID. rootUserPassword baseOUD.envVarsConfigMap Reference to ConfigMap which can contain additional environment variables to be passed on to POD for Base Oracle Unified Directory Instance. Following are the environment variables which would not be honored from the ConfigMap. instanceType, sleepBeforeConfig, OUD_INSTANCE_NAME, hostname, baseDN, rootUserDN, rootUserPassword, adminConnectorPort, httpAdminConnectorPort, ldapPort, ldapsPort, httpPort, httpsPort, replicationPort, sampleData. - baseOUD.envVars Environment variables in Yaml Map format. This is helpful when its requried to pass environment variables through \u0026ndash;values file. List of env variables which would not be honored from envVars map is same as list of env var names mentioned for envVarsConfigMap. For a full list of environment variables, see Appendix B: Environment Variables. - replOUD.envVarsConfigMap Reference to ConfigMap which can contain additional environment variables to be passed on to PODs for Replicated Oracle Unified Directory Instances. Following are the environment variables which would not be honored from the ConfigMap. instanceType, sleepBeforeConfig, OUD_INSTANCE_NAME, hostname, baseDN, rootUserDN, rootUserPassword, adminConnectorPort, httpAdminConnectorPort, ldapPort, ldapsPort, httpPort, httpsPort, replicationPort, sampleData, sourceHost, sourceServerPorts, sourceAdminConnectorPort, sourceReplicationPort, dsreplication_1, dsreplication_2, dsreplication_3, dsreplication_4, post_dsreplication_dsconfig_1, post_dsreplication_dsconfig_2 - replOUD.envVars Environment variables in Yaml Map format. This is helpful when its required to pass environment variables through \u0026ndash;values file. List of env variables which would not be honored from envVars map is same as list of env var names mentioned for envVarsConfigMap. For a full list of environment variables, see Appendix B: Environment Variables. - podManagementPolicy Defines the policy for pod management within the statefulset. Typical values are OrderedReady/Parallel OrderedReady updateStrategy Allows you to configure and disable automated rolling updates for containers, labels, resource request/limits, and annotations for the Pods in a StatefulSet. Typical values are OnDelete/RollingUpdate RollingUpdate busybox.image busy box image name. Used for initcontainers busybox oudConfig.cleanupbeforeStart Used to remove the individual pod directories during restart. Recommended value is false. Note: Do not change the default value (false) as it will delete the existing data and clone it from base pod again. false oudConfig.disablereplicationbeforeStop This parameter is used to disable replication when a pod is restarted. Recommended value is false. Note Do not change the default value (false), as changing the value will result in an issue where the pod won\u0026rsquo;t join the replication topology after a restart. false oudConfig.resources.requests.memory This parameter is used to set the memory request for the OUD pod 4Gi oudConfig.resources.requests.cpu This parameter is used to set the cpu request for the OUD pod 0.5 oudConfig.resources.limits.memory This parameter is used to set the memory limit for the OUD pod 4Gi oudConfig.resources.limits.cpu This parameter is used to set the cpu limit for the OUD pod 1 replOUD.groupId Group ID to be used/configured with each Oracle Unified Directory instance in replicated topology. 1 service.lbrtype Type of load balancer Service to be created for admin, http,ldap services. Values allowed: ClusterIP/NodePort ClusterIP oudPorts.nodePorts.adminldaps Public port on which the OUD instance in the container should listen for administration communication over LDAPS Protocol. The port number should be between 30000-32767. No duplicate values are allowed. Note: Set only if service.lbrtype is set as NodePort. If left blank then k8s will assign random ports in between 30000 and 32767. oudPorts.nodePorts.adminhttps Public port on which the OUD instance in the container should listen for administration communication over HTTPS Protocol. The port number should be between 30000-32767. No duplicate values are allowed. Note: Set only if service.lbrtype is set as NodePort. If left blank then k8s will assign random ports in between 30000 and 32767. oudPorts.nodePorts.ldap Public port on which the OUD instance in the container should listen for LDAP communication. The port number should be between 30000-32767. No duplicate values are allowed. Note: Set only if service.lbrtype is set as NodePort. If left blank then k8s will assign random ports in between 30000 and 32767. oudPorts.nodePorts.ldaps Public port on which the OUD instance in the container should listen for LDAPS communication. The port number should be between 30000-32767. No duplicate values are allowed. Note: Set only if service.lbrtype is set as NodePort. If left blank then k8s will assign random ports in between 30000 and 32767. oudPorts.nodePorts.http Public port on which the OUD instance in the container should listen for HTTP communication. The port number should be between 30000-32767. No duplicate values are allowed. Note: Set only if service.lbrtype is set as NodePort. If left blank then k8s will assign random ports in between 30000 and 32767. oudPorts.nodePorts.https Public port on which the OUD instance in the container should listen for HTTPS communication. The port number should be between 30000-32767. No duplicate values are allowed. Note: Set only if service.lbrtype is set as NodePort. If left blank then k8s will assign random ports in between 30000 and 32767. oudConfig.integration Specifies which Oracle components the server can be integrated with. It is recommended to choose the option covering your minimal requirements. Allowed values: no-integration (no integration), basic (Directory Integration Platform), generic (Directory Integration Platform, Database Net Services and E-Business Suite integration), eus (Directory Integration Platform, Database Net Services, E-Business Suite and Enterprise User Security integration) no-integration elk.logStashImage The version of logstash you want to install logstash:8.3.1 elk.sslenabled If SSL is enabled for ELK set the value to true, or if NON-SSL set to false. This value must be lowercase TRUE elk.eshosts The URL for sending logs to Elasticsearch. HTTP if NON-SSL is used https://elasticsearch.example.com:9200 elk.esuser The name of the user for logstash to access Elasticsearch logstash_internal elk.espassword The password for ELK_USER password elk.esapikey The API key details apikey elk.esindex The log name oudlogs-00001 elk.imagePullSecrets secret to be used for pulling logstash image dockercred Appendix B: Environment Variables Environment Variable Description Default Value ldapPort Port on which the Oracle Unified Directory instance in the container should listen for LDAP communication. Use \u0026lsquo;disabled\u0026rsquo; if you do not want to enable it. 1389 ldapsPort Port on which the Oracle Unified Directory instance in the container should listen for LDAPS communication. Use \u0026lsquo;disabled\u0026rsquo; if you do not want to enable it. 1636 rootUserDN DN for the Oracle Unified Directory instance root user. \u0026mdash;\u0026mdash; rootUserPassword Password for the Oracle Unified Directory instance root user. \u0026mdash;\u0026mdash; adminConnectorPort Port on which the Oracle Unified Directory instance in the container should listen for administration communication over LDAPS. Use \u0026lsquo;disabled\u0026rsquo; if you do not want to enable it. Note that at least one of the LDAP or the HTTP administration ports must be enabled. 1444 httpAdminConnectorPort Port on which the Oracle Unified Directory Instance in the container should listen for Administration Communication over HTTPS Protocol. Use \u0026lsquo;disabled\u0026rsquo; if you do not want to enable it. Note that at least one of the LDAP or the HTTP administration ports must be enabled. 1888 httpPort Port on which the Oracle Unified Directory Instance in the container should listen for HTTP Communication. Use \u0026lsquo;disabled\u0026rsquo; if you do not want to enable it. 1080 httpsPort Port on which the Oracle Unified Directory Instance in the container should listen for HTTPS Communication. Use \u0026lsquo;disabled\u0026rsquo; if you do not want to enable it. 1081 sampleData Specifies the number of sample entries to populate the Oracle Unified Directory instance with on creation. If this parameter has a non-numeric value, the parameter addBaseEntry is added to the command instead of sampleData. Similarly, when the ldifFile_n parameter is specified sampleData will not be considered and ldifFile entries will be populated. 0 adminUID User ID of the Global Administrator to use to bind to the server. This parameter is primarily used with the dsreplication command. \u0026mdash;\u0026mdash; adminPassword Password for adminUID \u0026mdash;\u0026mdash; bindDN1 BindDN to be used while setting up replication using dsreplication to connect to First Directory/Replication Instance. \u0026mdash;\u0026mdash; bindPassword1 Password for bindDN1 \u0026mdash;\u0026mdash; bindDN2 BindDN to be used while setting up replication using dsreplication to connect to Second Directory/Replication Instance. \u0026mdash;\u0026mdash; bindPassword2 Password for bindDN2 \u0026mdash;\u0026mdash; replicationPort Port value to be used while setting up a replication server. This variable is used to substitute values in dsreplication parameters. 1898 sourceHost Value for the hostname to be used while setting up a replication server. This variable is used to substitute values in dsreplication parameters. \u0026mdash;\u0026mdash; initializeFromHost Value for the hostname to be used while initializing data on a new Oracle Unified Directory instance replicated from an existing instance. This variable is used to substitute values in dsreplication parameters. It is possible to have a different value for sourceHost and initializeFromHost while setting up replication with Replication Server, sourceHost can be used for the Replication Server and initializeFromHost can be used for an existing Directory instance from which data will be initialized. $sourceHost serverTuning Values to be used to tune JVM settings. The default value is jvm-default. If specific tuning parameters are required, they can be added using this variable. jvm-default offlineToolsTuning Values to be used to specify the tuning for offline tools. This variable if not specified will consider jvm-default as the default or specify the complete set of values with options if wanted to set to specific tuning jvm-default generateSelfSignedCertificate Set to \u0026ldquo;true\u0026rdquo; if the requirement is to generate a self signed certificate when creating an Oracle Unified Directory instance. If no value is provided this value takes the default, \u0026ldquo;true\u0026rdquo;. If using a certificate generated separately this value should be set to \u0026ldquo;false\u0026rdquo;. true usePkcs11Keystore Use a certificate in a PKCS#11 token that the replication gateway will use as servercertificate when accepting encrypted connections from the Oracle Directory Server Enterprise Edition server. Set to \u0026ldquo;true\u0026rdquo; if the requirement is to use the usePkcs11Keystore parameter when creating an Oracle Unified Directory instance. By default this parameter is not set. To use this option generateSelfSignedCertificate should be set to \u0026ldquo;false\u0026rdquo;. \u0026mdash;\u0026mdash; enableStartTLS Enable StartTLS to allow secure communication with the directory server by using the LDAP port. By default this parameter is not set. To use this option generateSelfSignedCertificate should be set to \u0026ldquo;false\u0026rdquo;. \u0026mdash;\u0026mdash; useJCEKS Specifies the path of a JCEKS that contains a certificate that the replication gateway will use as server certificate when accepting encrypted connections from the Oracle Directory Server Enterprise Edition server. If required this should specify the keyStorePath, for example, /u01/oracle/config/keystore. \u0026mdash;\u0026mdash; useJavaKeystore Specify the path to the Java Keystore (JKS) that contains the server certificate. If required this should specify the path to the JKS, for example, /u01/oracle/config/keystore. By default this parameter is not set. To use this option generateSelfSignedCertificate should be set to \u0026ldquo;false\u0026rdquo;. \u0026mdash;\u0026mdash; usePkcs12keyStore Specify the path to the PKCS#12 keystore that contains the server certificate. If required this should specify the path, for example, /u01/oracle/config/keystore.p12. By default this parameter is not set. \u0026mdash;\u0026mdash; keyStorePasswordFile Use the password in the specified file to access the certificate keystore. A password is required when you specify an existing certificate (JKS, JCEKS, PKCS#11, orPKCS#12) as a server certificate. If required this should specify the path of the password file, for example, /u01/oracle/config/keystorepassword.txt. By default this parameter is not set. \u0026mdash;\u0026mdash; eusPasswordScheme Set password storage scheme, if configuring Oracle Unified Directory for Enterprise User Security. Set this to a value of either \u0026ldquo;sha1\u0026rdquo; or \u0026ldquo;sha2\u0026rdquo;. By default this parameter is not set. \u0026mdash;\u0026mdash; jmxPort Port on which the Directory Server should listen for JMX communication. Use \u0026lsquo;disabled\u0026rsquo; if you do not want to enable it. disabled javaSecurityFile Specify the path to the Java security file. If required this should specify the path, for example, /u01/oracle/config/new_security_file. By default this parameter is not set. \u0026mdash;\u0026mdash; schemaConfigFile_n \u0026lsquo;n\u0026rsquo; in the variable name represents a numeric value between 1 and 50. This variable is used to set the full path of LDIF files that need to be passed to the Oracle Unified Directory instance for schema configuration/extension. If required this should specify the path, for example, schemaConfigFile_1=/u01/oracle/config/00_test.ldif. \u0026mdash;\u0026mdash; ldifFile_n \u0026lsquo;n\u0026rsquo; in the variable name represents a numeric value between 1 and 50. This variable is used to set the full path of LDIF files that need to be passed to the Oracle Unified Directory instance for initial data population. If required this should specify the path, for example, ldifFile_1=/u01/oracle/config/test1.ldif. \u0026mdash;\u0026mdash; dsconfigBatchFile_n \u0026lsquo;n\u0026rsquo; in the variable name represents a numeric value between 1 and 50. This variable is used to set the full path of LDIF files that need to be passed to the Oracle Unified Directory instance for batch processing by the dsconfig command. If required this should specify the path, for example, dsconfigBatchFile_1=/u01/oracle/config/dsconfig_1.txt. When executing the dsconfig command the following values are added implicitly to the arguments contained in the batch file : ${hostname}, ${adminConnectorPort}, ${bindDN} and ${bindPasswordFile} \u0026mdash;\u0026mdash; dstune_n \u0026lsquo;n\u0026rsquo; in the variable name represents a numeric value between 1 and 50. Allows commands and options to be passed to the dstune utility as a full command. \u0026mdash;\u0026mdash; dsconfig_n \u0026lsquo;n\u0026rsquo; in the variable name represents a numeric value between 1 and 300. Each file represents a set of execution parameters for the dsconfig command. For each dsconfig execution, the following variables are added implicitly : ${hostname}, ${adminConnectorPort}, ${bindDN}, ${bindPasswordFile}. \u0026mdash;\u0026mdash; dsreplication_n \u0026lsquo;n\u0026rsquo; in the variable name represents a numeric value between 1 and 50. Each file represents a set of execution parameters for the dsreplication command. For each dsreplication execution, the following variables are added implicitly : ${hostname}, ${ldapPort}, ${ldapsPort}, ${adminConnectorPort}, ${replicationPort}, ${sourceHost}, ${initializeFromHost}, and ${baseDN}. Depending on the dsreplication sub-command, the following variables are added implicitly : ${bindDN1}, ${bindPasswordFile1}, ${bindDN2}, ${bindPasswordFile2}, ${adminUID}, and ${adminPasswordFile}. \u0026mdash;\u0026mdash; post_dsreplication_dsconfig_n \u0026lsquo;n\u0026rsquo; in the variable name represents a numeric value between 1 and 300. Each file represents a set of execution parameters for the dsconfig command to be run following execution of the dsreplication command. For each dsconfig execution, the following variables/values are added implicitly : \u0026ndash;provider-name \u0026ldquo;Multimaster Synchronization\u0026rdquo;, ${hostname}, ${adminConnectorPort}, ${bindDN}, ${bindPasswordFile}. \u0026mdash;\u0026mdash; rebuildIndex_n \u0026lsquo;n\u0026rsquo; in the variable name represents a numeric value between 1 and 50. Each file represents a set of execution parameters for the rebuild-index command. For each rebuild-index execution, the following variables are added implicitly : ${hostname}, ${adminConnectorPort}, ${bindDN}, ${bindPasswordFile}, and ${baseDN}. \u0026mdash;\u0026mdash; manageSuffix_n \u0026lsquo;n\u0026rsquo; in the variable name represents a numeric value between 1 and 50. Each file represents a set of execution parameters for the manage-suffix command. For each manage-suffix execution, the following variables are added implicitly : ${hostname}, ${adminConnectorPort}, ${bindDN}, ${bindPasswordFile}. \u0026mdash;\u0026mdash; importLdif_n \u0026lsquo;n\u0026rsquo; in the variable name represents a numeric value between 1 and 50. Each file represents a set of execution parameters for the import-ldif command. For each import-ldif execution, the following variables are added implicitly : ${hostname}, ${adminConnectorPort}, ${bindDN}, ${bindPasswordFile}. \u0026mdash;\u0026mdash; execCmd_n \u0026lsquo;n\u0026rsquo; in the variable name represents a numeric value between 1 and 300. Each file represents a command to be executed in the container. For each command execution, the following variables are replaced, if present in the command : ${hostname}, ${ldapPort}, ${ldapsPort}, ${adminConnectorPort}. \u0026mdash;\u0026mdash; restartAfterRebuildIndex Specifies whether to restart the server after building the index. false restartAfterSchemaConfig Specifies whether to restart the server after configuring the schema. false Note For the following parameters above, the following statement applies:\n dsconfig_n dsreplication_n post_dsreplication_dsconfig_n rebuildIndex_n manageSuffix_n importLdif_n execCmd_n If values are provided the following variables will be substituted with their values: ${hostname},${ldapPort},${ldapsPort},${adminConnectorPort},${replicationPort},${sourceHost},${initializeFromHost},${sourceAdminConnectorPort},${sourceReplicationPort},${baseDN},${rootUserDN},${adminUID},${rootPwdFile},${bindPasswordFile},${adminPwdFile},${bindPwdFile1},${bindPwdFile2}\n" +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oudsm/create-oudsm-instances/", + "title": "Create Oracle Unified Directory Services Manager Instances", + "tags": [], + "description": "This document provides details of the oudsm Helm chart.", + "content": " Introduction Create a Kubernetes namespace Create a Kubernetes secret for the container registry Create a persistent volume directory The oudsm Helm chart Create OUDSM instances Helm command output Verify the OUDSM deployment Undeploy an OUDSM deployment Appendix: Configuration parameters Introduction This chapter demonstrates how to deploy Oracle Unified Directory Services Manager (OUDSM) 12c instance(s) using the Helm package manager for Kubernetes.\nBased on the configuration, this chart deploys the following objects in the specified namespace of a Kubernetes cluster.\n Service Account Secret Persistent Volume and Persistent Volume Claim Pod(s)/Container(s) for Oracle Unified Directory Services Manager Instances Services for interfaces exposed through Oracle Unified Directory Services Manager Instances Ingress configuration Create a Kubernetes namespace Create a Kubernetes namespace for the OUDSM deployment by running the following command:\n$ kubectl create namespace \u0026lt;namespace\u0026gt; For example:\n$ kubectl create namespace oudsmns The output will look similar to the following:\nnamespace/oudsmns created Create a Kubernetes secret for the container registry Create a Kubernetes secret that stores the credentials for the container registry where the OUDSM image is stored. This step must be followed if using Oracle Container Registry or your own private container registry. If you are not using a container registry and have loaded the images on each of the master and worker nodes, you can skip this step.\n Run the following command to create the secret:\nkubectl create secret docker-registry \u0026#34;orclcred\u0026#34; --docker-server=\u0026lt;CONTAINER_REGISTRY\u0026gt; \\ --docker-username=\u0026#34;\u0026lt;USER_NAME\u0026gt;\u0026#34; \\ --docker-password=\u0026lt;PASSWORD\u0026gt; --docker-email=\u0026lt;EMAIL_ID\u0026gt; \\ --namespace=\u0026lt;domain_namespace\u0026gt; For example, if using Oracle Container Registry:\nkubectl create secret docker-registry \u0026#34;orclcred\u0026#34; --docker-server=container-registry.oracle.com \\ --docker-username=\u0026#34;user@example.com\u0026#34; \\ --docker-password=password --docker-email=user@example.com \\ --namespace=oudsmns Replace \u0026lt;USER_NAME\u0026gt; and \u0026lt;PASSWORD\u0026gt; with the credentials for the registry with the following caveats:\n If using Oracle Container Registry to pull the OUDSM container image, this is the username and password used to login to Oracle Container Registry. Before you can use this image you must login to Oracle Container Registry, navigate to Middleware \u0026gt; oudsm_cpu and accept the license agreement.\n If using your own container registry to store the OUDSM container image, this is the username and password (or token) for your container registry.\n The output will look similar to the following:\nsecret/orclcred created Create a persistent volume directory As referenced in Prerequisites the nodes in the Kubernetes cluster must have access to a persistent volume such as a Network File System (NFS) mount or a shared file system.\nIn this example /scratch/shared/ is a shared directory accessible from all nodes.\n On the master node run the following command to create a user_projects directory:\n$ cd \u0026lt;persistent_volume\u0026gt; $ mkdir oudsm_user_projects $ sudo chown -R 1000:0 oudsm_user_projects For example:\n$ cd /scratch/shared $ mkdir oudsm_user_projects $ sudo chown -R 1000:0 oudsm_user_projects On the master node run the following to ensure it is possible to read and write to the persistent volume:\n$ cd \u0026lt;persistent_volume\u0026gt;/oudsm_user_projects $ touch file.txt $ ls filemaster.txt For example:\n$ cd /scratch/shared/oudsm_user_projects $ touch filemaster.txt $ ls filemaster.txt On the first worker node run the following to ensure it is possible to read and write to the persistent volume:\n$ cd /scratch/shared/oudsm_user_projects $ ls filemaster.txt $ touch fileworker1.txt $ ls fileworker1.txt Repeat the above for any other worker nodes e.g fileworker2.txt etc. Once proven that it\u0026rsquo;s possible to read and write from each node to the persistent volume, delete the files created.\n The oudsm Helm chart The oudsm Helm chart allows you to create or deploy Oracle Unified Directory Services Manager instances along with Kubernetes objects in a specified namespace.\nThe deployment can be initiated by running the following Helm command with reference to the oudsm Helm chart, along with configuration parameters according to your environment.\ncd $WORKDIR/kubernetes/helm $ helm install --namespace \u0026lt;namespace\u0026gt; \\ \u0026lt;Configuration Parameters\u0026gt; \\ \u0026lt;deployment/release name\u0026gt; \\ \u0026lt;Helm Chart Path/Name\u0026gt; Configuration Parameters (override values in chart) can be passed on with --set arguments on the command line and/or with -f / --values arguments when referring to files.\nNote: The examples in Create OUDSM instances below provide values which allow the user to override the default values provided by the Helm chart. A full list of configuration parameters and their default values is shown in Appendix: Configuration parameters.\nFor more details about the helm command and parameters, please execute helm --help and helm install --help.\nCreate OUDSM instances You can create OUDSM instances using one of the following methods:\n Using a YAML file Using --set argument Using a YAML file Navigate to the $WORKDIR/kubernetes/helm directory:\n$ cd $WORKDIR/kubernetes/helm Create an oudsm-values-override.yaml as follows:\nimage: repository: \u0026lt;image_location\u0026gt; tag: \u0026lt;image_tag\u0026gt; pullPolicy: IfNotPresent imagePullSecrets: - name: orclcred oudsm: adminUser: weblogic adminPass: \u0026lt;password\u0026gt; persistence: type: filesystem filesystem: hostPath: path: \u0026lt;persistent_volume\u0026gt;/oudsm_user_projects For example:\nimage: repository: container-registry.oracle.com/middleware/oudsm_cpu tag: 12.2.1.4-jdk8-ol7-\u0026lt;October\u0026#39;23\u0026gt; pullPolicy: IfNotPresent imagePullSecrets: - name: orclcred oudsm: adminUser: weblogic adminPass: \u0026lt;password\u0026gt; persistence: type: filesystem filesystem: hostPath: path: /scratch/shared/oudsm_user_projects The following caveats exist:\n Replace \u0026lt;password\u0026gt; with a the relevant passwords.\n If you are not using Oracle Container Registry or your own container registry for your OUD container image, then you can remove the following:\nimagePullSecrets: - name: orclcred If using NFS for your persistent volume the change the persistence section as follows:\n persistence: type: networkstorage networkstorage: nfs: path: \u0026lt;persistent_volume\u0026gt;/oudsm_user_projects server: \u0026lt;NFS IP address\u0026gt; Run the following command to deploy OUDSM:\n$ helm install --namespace \u0026lt;namespace\u0026gt; \\ --values oudsm-values-override.yaml \\ \u0026lt;release_name\u0026gt; oudsm $ helm install --namespace oudsmns \\ --values oudsm-values-override.yaml \\ oudsm oudsm Check the OUDSM deployment as per Verify the OUDSM deployment\n Using --set argument Navigate to the $WORKDIR/kubernetes/helm directory:\n$ cd $WORKDIR/kubernetes/helm Run the following command to create OUDSM instance:\n$ helm install --namespace oudsmns \\ --set oudsm.adminUser=weblogic,oudsm.adminPass=\u0026lt;password\u0026gt;,persistence.filesystem.hostPath.path=\u0026lt;persistent_volume\u0026gt;/oudsm_user_projects,image.repository=\u0026lt;image_location\u0026gt;,image.tag=\u0026lt;image_tag\u0026gt; \\ --set imagePullSecrets[0].name=\u0026#34;orclcred\u0026#34; \\ \u0026lt;release_name\u0026gt; oudsm For example:\n$ helm install --namespace oudsmns \\ --set oudsm.adminUser=weblogic,oudsm.adminPass=\u0026lt;password\u0026gt;,persistence.filesystem.hostPath.path=/scratch/shared/oudsm_user_projects,image.repository=container-registry.oracle.com/middleware/oudsm_cpu,image.tag=12.2.1.4-jdk8-ol7-\u0026lt;October\u0026#39;23\u0026gt; \\ --set imagePullSecrets[0].name=\u0026#34;orclcred\u0026#34; \\ oudsm oudsm The following caveats exist:\n Replace \u0026lt;password\u0026gt; with a the relevant password. If you are not using Oracle Container Registry or your own container registry for your OUDSM container image, then you can remove the following: --set imagePullSecrets[0].name=\u0026quot;orclcred\u0026quot; If using using NFS for your persistent volume then use persistence.networkstorage.nfs.path=\u0026lt;persistent_volume\u0026gt;/oudsm_user_projects,persistence.networkstorage.nfs.server:\u0026lt;NFS IP address\u0026gt;. Check the OUDSM deployment as per Verify the OUDSM deployment\n Helm command output In all the examples above, the following output is shown following a successful execution of the helm install command.\nNAME: oudsm LAST DEPLOYED: \u0026lt;DATE\u0026gt; NAMESPACE: oudsmns STATUS: deployed REVISION: 1 TEST SUITE: None Verify the OUDSM deployment Run the following command to verify the OUDSM deployment:\n$ kubectl --namespace \u0026lt;namespace\u0026gt; get pod,service,secret,pv,pvc,ingress -o wide For example:\n$ kubectl --namespace oudsmns get pod,service,secret,pv,pvc,ingress -o wide The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/oudsm-1 1/1 Running 0 73m 10.244.0.19 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR service/oudsm-1 ClusterIP 10.96.108.200 \u0026lt;none\u0026gt; 7001/TCP,7002/TCP 73m app.kubernetes.io/instance=oudsm,app.kubernetes.io/name=oudsm,oudsm/instance=oudsm-1 service/oudsm-lbr ClusterIP 10.96.41.201 \u0026lt;none\u0026gt; 7001/TCP,7002/TCP 73m app.kubernetes.io/instance=oudsm,app.kubernetes.io/name=oudsm NAME TYPE DATA AGE secret/orclcred kubernetes.io/dockerconfigjson 1 3h13m secret/oudsm-creds opaque 2 73m secret/oudsm-token-ksr4g kubernetes.io/service-account-token 3 73m secret/sh.helm.release.v1.oudsm.v1 helm.sh/release.v1 1 73m NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE VOLUMEMODE persistentvolume/oudsm-pv 30Gi RWX Retain Bound myoudsmns/oudsm-pvc manual 73m Filesystem NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE VOLUMEMODE persistentvolumeclaim/oudsm-pvc Bound oudsm-pv 30Gi RWX manual 73m Filesystem NAME HOSTS ADDRESS PORTS AGE ingress.extensions/oudsm-ingress-nginx oudsm-1,oudsm-2,oudsm + 1 more... 100.102.51.230 80 73m Note: It will take several minutes before all the services listed above show. While the oudsm pods have a STATUS of 0/1 the pod is started but the OUDSM server associated with it is currently starting. While the pod is starting you can check the startup status in the pod logs, by running the following command:\n$ kubectl logs oudsm-1 -n oudsmns Note : If the OUDSM deployment fails additionally refer to Troubleshooting for instructions on how describe the failing pod(s). Once the problem is identified follow Undeploy an OUDSM deployment to clean down the deployment before deploying again.\nKubernetes Objects Kubernetes objects created by the Helm chart are detailed in the table below:\n Type Name Example Name Purpose Service Account \u0026lt;deployment/release name\u0026gt; oudsm Kubernetes Service Account for the Helm Chart deployment Secret \u0026lt;deployment/release name\u0026gt;-creds oudsm-creds Secret object for Oracle Unified Directory Services Manager related critical values like passwords Persistent Volume \u0026lt;deployment/release name\u0026gt;-pv oudsm-pv Persistent Volume for user_projects mount. Persistent Volume Claim \u0026lt;deployment/release name\u0026gt;-pvc oudsm-pvc Persistent Volume Claim for user_projects mount. Pod \u0026lt;deployment/release name\u0026gt;-N oudsm-1, oudsm-2, \u0026hellip; Pod(s)/Container(s) for Oracle Unified Directory Services Manager Instances Service \u0026lt;deployment/release name\u0026gt;-N oudsm-1, oudsm-2, \u0026hellip; Service(s) for HTTP and HTTPS interfaces from Oracle Unified Directory Services Manager instance \u0026lt;deployment/release name\u0026gt;-N Ingress \u0026lt;deployment/release name\u0026gt;-ingress-nginx oudsm-ingress-nginx Ingress Rules for HTTP and HTTPS interfaces. In the table above, the Example Name for each Object is based on the value \u0026lsquo;oudsm\u0026rsquo; as the deployment/release name for the Helm chart installation. Ingress Configuration With an OUDSM instance now deployed you are now ready to configure an ingress controller to direct traffic to OUDSM as per Configure an ingress for an OUDSM.\nUndeploy an OUDSM deployment Delete the OUDSM deployment Find the deployment release name:\n$ helm --namespace \u0026lt;namespace\u0026gt; list For example:\n$ helm --namespace oudsmns list The output will look similar to the following:\nNAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION oudsm oudsmns 2 \u0026lt;DATE\u0026gt; deployed oudsm-0.1 12.2.1.4.0 Delete the deployment using the following command:\n$ helm uninstall --namespace \u0026lt;namespace\u0026gt; \u0026lt;release\u0026gt; For example:\n$ helm uninstall --namespace oudsmns oudsm release \u0026#34;oudsm\u0026#34; uninstalled Delete the persistent volume contents Delete the contents of the oudsm_user_projects directory in the persistent volume:\n$ cd \u0026lt;persistent_volume\u0026gt;/oudsm_user_projects $ rm -rf * For example:\n$ cd /scratch/shared/oudsm_user_projects $ rm -rf * Appendix: Configuration Parameters The following table lists the configurable parameters of the \u0026lsquo;oudsm\u0026rsquo; chart and their default values.\n Parameter Description Default Value replicaCount Number of Oracle Unified Directory Services Manager instances/pods/services to be created 1 restartPolicyName restartPolicy to be configured for each POD containing Oracle Unified Directory Services Manager instance OnFailure image.repository Oracle Unified Directory Services Manager Image Registry/Repository and name. Based on this, image parameter would be configured for Oracle Unified Directory Services Manager pods/containers oracle/oudsm image.tag Oracle Unified Directory Services Manager Image Tag. Based on this, image parameter would be configured for Oracle Unified Directory Services Manager pods/containers 12.2.1.4.0 image.pullPolicy policy to pull the image IfnotPresent imagePullSecrets.name name of Secret resource containing private registry credentials regcred nameOverride override the fullname with this name fullnameOverride Overrides the fullname with the provided string serviceAccount.create Specifies whether a service account should be created true serviceAccount.name If not set and create is true, a name is generated using the fullname template oudsm-\u0026lt; fullname \u0026gt;-token-\u0026lt; randomalphanum \u0026gt; podSecurityContext Security context policies to add to the controller pod securityContext Security context policies to add by default service.type type of controller service to create ClusterIP nodeSelector node labels for pod assignment tolerations node taints to tolerate affinity node/pod affinities ingress.enabled true ingress.type Supported value: nginx nginx ingress.host Hostname to be used with Ingress Rules. If not set, hostname would be configured according to fullname. Hosts would be configured as \u0026lt; fullname \u0026gt;-http.\u0026lt; domain \u0026gt;, \u0026lt; fullname \u0026gt;-http-0.\u0026lt; domain \u0026gt;, \u0026lt; fullname \u0026gt;-http-1.\u0026lt; domain \u0026gt;, etc. ingress.domain Domain name to be used with Ingress Rules. In ingress rules, hosts would be configured as \u0026lt; host \u0026gt;.\u0026lt; domain \u0026gt;, \u0026lt; host \u0026gt;-0.\u0026lt; domain \u0026gt;, \u0026lt; host \u0026gt;-1.\u0026lt; domain \u0026gt;, etc. ingress.backendPort http ingress.nginxAnnotations { kubernetes.io/ingress.class: \u0026ldquo;nginx\u0026quot;nginx.ingress.kubernetes.io/affinity-mode: \u0026ldquo;persistent\u0026rdquo; nginx.ingress.kubernetes.io/affinity: \u0026ldquo;cookie\u0026rdquo; } ingress.ingress.tlsSecret Secret name to use an already created TLS Secret. If such secret is not provided, one would be created with name \u0026lt; fullname \u0026gt;-tls-cert. If the TLS Secret is in different namespace, name can be mentioned as \u0026lt; namespace \u0026gt;/\u0026lt; tlsSecretName \u0026gt; ingress.certCN Subject\u0026rsquo;s common name (cn) for SelfSigned Cert. \u0026lt; fullname \u0026gt; ingress.certValidityDays Validity of Self-Signed Cert in days 365 secret.enabled If enabled it will use the secret created with base64 encoding. if value is false, secret would not be used and input values (through \u0026ndash;set, \u0026ndash;values, etc.) would be used while creation of pods. true secret.name secret name to use an already created Secret oudsm-\u0026lt; fullname \u0026gt;-creds secret.type Specifies the type of the secret Opaque persistence.enabled If enabled, it will use the persistent volume. if value is false, PV and PVC would not be used and pods would be using the default emptyDir mount volume. true persistence.pvname pvname to use an already created Persistent Volume , If blank will use the default name oudsm-\u0026lt; fullname \u0026gt;-pv persistence.pvcname pvcname to use an already created Persistent Volume Claim , If blank will use default name oudsm-\u0026lt; fullname \u0026gt;-pvc persistence.type supported values: either filesystem or networkstorage or custom filesystem persistence.filesystem.hostPath.path The path location mentioned should be created and accessible from the local host provided with necessary privileges for the user. /scratch/shared/oudsm_user_projects persistence.networkstorage.nfs.path Path of NFS Share location /scratch/shared/oudsm_user_projects persistence.networkstorage.nfs.server IP or hostname of NFS Server 0.0.0.0 persistence.custom.* Based on values/data, YAML content would be included in PersistenceVolume Object persistence.accessMode Specifies the access mode of the location provided ReadWriteMany persistence.size Specifies the size of the storage 10Gi persistence.storageClass Specifies the storageclass of the persistence volume. empty persistence.annotations specifies any annotations that will be used { } oudsm.adminUser Weblogic Administration User weblogic oudsm.adminPass Password for Weblogic Administration User oudsm.startupTime Expected startup time. After specified seconds readinessProbe would start 900 oudsm.livenessProbeInitialDelay Paramter to decide livenessProbe initialDelaySeconds 1200 elk.logStashImage The version of logstash you want to install logstash:8.3.1 elk.sslenabled If SSL is enabled for ELK set the value to true, or if NON-SSL set to false. This value must be lowercase TRUE elk.eshosts The URL for sending logs to Elasticsearch. HTTP if NON-SSL is used https://elasticsearch.example.com:9200 elk.esuser The name of the user for logstash to access Elasticsearch logstash_internal elk.espassword The password for ELK_USER password elk.esapikey The API key details apikey elk.esindex The log name oudsmlogs-00001 elk.imagePullSecrets secret to be used for pulling logstash image dockercred " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oam/configure-ingress/", + "title": "Configure an Ingress for an OAM domain", + "tags": [], + "description": "This document provides steps to configure an Ingress to direct traffic to the OAM domain.", + "content": "Setting up an ingress for NGINX for the OAM Domain The instructions below explain how to set up NGINX as an ingress for the OAM domain with SSL termination.\nNote: All the steps below should be performed on the master node.\n Generate a SSL Certificate Install NGINX Create an Ingress for the Domain Verify that you can access the domain URL Generate a SSL Certificate Generate a private key and certificate signing request (CSR) using a tool of your choice. Send the CSR to your certificate authority (CA) to generate the certificate.\nIf you want to use a certificate for testing purposes you can generate a self signed certificate using openssl:\n$ mkdir \u0026lt;workdir\u0026gt;/ssl $ cd \u0026lt;workdir\u0026gt;/ssl $ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj \u0026#34;/CN=\u0026lt;nginx-hostname\u0026gt;\u0026#34; For example:\n$ mkdir /scratch/OAMK8S/ssl $ cd /scratch/OAMK8S/ssl $ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj \u0026#34;/CN=masternode.example.com\u0026#34; Note: The CN should match the host.domain of the master node in order to prevent hostname problems during certificate verification.\nThe output will look similar to the following:\nGenerating a 2048 bit RSA private key ..........................................+++ .......................................................................................................+++ writing new private key to 'tls.key' ----- Create a secret for SSL by running the following command:\n$ kubectl -n oamns create secret tls \u0026lt;domain_uid\u0026gt;-tls-cert --key \u0026lt;workdir\u0026gt;/tls.key --cert \u0026lt;workdir\u0026gt;/tls.crt For example:\n$ kubectl -n oamns create secret tls accessdomain-tls-cert --key /scratch/OAMK8S/ssl/tls.key --cert /scratch/OAMK8S/ssl/tls.crt The output will look similar to the following:\nsecret/accessdomain-tls-cert created Install NGINX Use helm to install NGINX.\n Add the helm chart repository for NGINX using the following command:\n$ helm repo add stable https://kubernetes.github.io/ingress-nginx The output will look similar to the following:\n\u0026quot;stable\u0026quot; has been added to your repositories Update the repository using the following command:\n$ helm repo update The output will look similar to the following:\nHang tight while we grab the latest from your chart repositories... ...Successfully got an update from the \u0026quot;stable\u0026quot; chart repository Update Complete. ⎈ Happy Helming!⎈ Install NGINX using helm If you can connect directly to the master node IP address from a browser, then install NGINX with the --set controller.service.type=NodePort parameter.\nIf you are using a Managed Service for your Kubernetes cluster, for example Oracle Kubernetes Engine (OKE) on Oracle Cloud Infrastructure (OCI), and connect from a browser to the Load Balancer IP address, then use the --set controller.service.type=LoadBalancer parameter. This instructs the Managed Service to setup a Load Balancer to direct traffic to the NGINX ingress.\n To install NGINX use the following helm command depending on if you are using NodePort or LoadBalancer:\na) Using NodePort\n$ helm install nginx-ingress -n \u0026lt;domain_namespace\u0026gt; --set controller.extraArgs.default-ssl-certificate=\u0026lt;domain_namespace\u0026gt;/\u0026lt;ssl_secret\u0026gt; --set controller.service.type=NodePort --set controller.admissionWebhooks.enabled=false stable/ingress-nginx For example:\n$ helm install nginx-ingress -n oamns --set controller.extraArgs.default-ssl-certificate=oamns/accessdomain-tls-cert --set controller.service.type=NodePort --set controller.admissionWebhooks.enabled=false stable/ingress-nginx The output will look similar to the following:\nNAME: nginx-ingress LAST DEPLOYED: \u0026lt;DATE\u0026gt; NAMESPACE: oamns STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: The nginx-ingress controller has been installed. Get the application URL by running these commands: export HTTP_NODE_PORT=$(kubectl --namespace oamns get services -o jsonpath=\u0026quot;{.spec.ports[0].nodePort}\u0026quot; nginx-ingress-controller) export HTTPS_NODE_PORT=$(kubectl --namespace oamns get services -o jsonpath=\u0026quot;{.spec.ports[1].nodePort}\u0026quot; nginx-ingress-controller) export NODE_IP=$(kubectl --namespace oamns get nodes -o jsonpath=\u0026quot;{.items[0].status.addresses[1].address}\u0026quot;) echo \u0026quot;Visit http://$NODE_IP:$HTTP_NODE_PORT to access your application via HTTP.\u0026quot; echo \u0026quot;Visit https://$NODE_IP:$HTTPS_NODE_PORT to access your application via HTTPS.\u0026quot; An example Ingress that makes use of the controller: apiVersion: networking.k8s.io/v1 kind: Ingress metadata: annotations: kubernetes.io/ingress.class: nginx name: example namespace: foo spec: ingressClassName: example-class rules: - host: www.example.com http: paths: - path: / pathType: Prefix backend: service: name: exampleService port: 80 # This section is only required if TLS is to be enabled for the Ingress tls: - hosts: - www.example.com secretName: example-tls If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: apiVersion: v1 kind: Secret metadata: name: example-tls namespace: foo data: tls.crt: \u0026lt;base64 encoded cert\u0026gt; tls.key: \u0026lt;base64 encoded key\u0026gt; type: kubernetes.io/tls b) Using LoadBalancer\n$ helm install nginx-ingress -n oamns --set controller.extraArgs.default-ssl-certificate=oamns/accessdomain-tls-cert --set controller.service.type=LoadBalancer --set controller.admissionWebhooks.enabled=false stable/ingress-nginx The output will look similar to the following:\n$ helm install nginx-ingress -n oamns --set controller.extraArgs.default-ssl-certificate=oamns/accessdomain-tls-cert --set controller.service.type=LoadBalancer --set controller.admissionWebhooks.enabled=false stable/ingress-nginx NAME: nginx-ingress LAST DEPLOYED: \u0026lt;DATE\u0026gt; NAMESPACE: nginxssl STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: The ingress-nginx controller has been installed. It may take a few minutes for the LoadBalancer IP to be available. You can watch the status by running 'kubectl --namespace oamns get services -o wide -w nginx-ingress-ingress-nginx-controller' An example Ingress that makes use of the controller: apiVersion: networking.k8s.io/v1 kind: Ingress metadata: annotations: kubernetes.io/ingress.class: nginx name: example namespace: foo spec: ingressClassName: example-class rules: - host: www.example.com http: paths: - path: / pathType: Prefix backend: service: name: exampleService port: 80 # This section is only required if TLS is to be enabled for the Ingress tls: - hosts: - www.example.com secretName: example-tls If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: apiVersion: v1 kind: Secret metadata: name: example-tls namespace: foo data: tls.crt: \u0026lt;base64 encoded cert\u0026gt; tls.key: \u0026lt;base64 encoded key\u0026gt; type: kubernetes.io/tls Create an Ingress for the Domain Navigate to the following directory:\n$ cd $WORKDIR/kubernetes/charts/ingress-per-domain Edit the values.yaml and change the domainUID: parameter to match your domainUID, for example domainUID: accessdomain. The file should look as follows:\n# Load balancer type. Supported values are: NGINX type: NGINX # Type of Configuration Supported Values are : SSL and NONSSL sslType: SSL # domainType. Supported values are: oam domainType: oam #WLS domain as backend to the load balancer wlsDomain: domainUID: accessdomain adminServerName: AdminServer adminServerPort: 7001 adminServerSSLPort: oamClusterName: oam_cluster oamManagedServerPort: 14100 oamManagedServerSSLPort: policyClusterName: policy_cluster policyManagedServerPort: 15100 policyManagedServerSSLPort: # Host specific values hostName: enabled: false admin: runtime: Run the following helm command to install the ingress:\n$ cd $WORKDIR $ helm install oam-nginx kubernetes/charts/ingress-per-domain --namespace \u0026lt;domain_namespace\u0026gt; --values kubernetes/charts/ingress-per-domain/values.yaml For example:\n$ cd $WORKDIR $ helm install oam-nginx kubernetes/charts/ingress-per-domain --namespace oamns --values kubernetes/charts/ingress-per-domain/values.yaml The output will look similar to the following:\nNAME: oam-nginx LAST DEPLOYED: \u0026lt;DATE\u0026gt; NAMESPACE: oamns STATUS: deployed REVISION: 1 TEST SUITE: None Run the following command to show the ingress is created successfully:\n$ kubectl get ing -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get ing -n oamns The output will look similar to the following:\nNAME CLASS HOSTS ADDRESS PORTS AGE accessdomain-nginx \u0026lt;none\u0026gt; * 80 5s Find the node port of NGINX using the following command:\n$ kubectl --namespace \u0026lt;domain_namespace\u0026gt; get services -o jsonpath=\u0026#34;{.spec.ports[1].nodePort}\u0026#34; nginx-ingress-ingress-nginx-controller For example:\n$ kubectl --namespace oamns get services -o jsonpath=\u0026#34;{.spec.ports[1].nodePort}\u0026#34; nginx-ingress-ingress-nginx-controller The output will look similar to the following:\n31051 Run the following command to check the ingress:\n$ kubectl describe ing \u0026lt;domainUID\u0026gt;-nginx -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl describe ing accessdomain-nginx -n oamns The output will look similar to the following:\nName: accessdomain-nginx Namespace: oamns Address: 10.106.70.55 Ingress Class: \u0026lt;none\u0026gt; Default backend: default-http-backend:80 (\u0026lt;error: endpoints \u0026quot;default-http-backend\u0026quot; not found\u0026gt;) Rules: Host Path Backends ---- ---- -------- * /console accessdomain-adminserver:7001 (10.244.1.18:7001) /consolehelp accessdomain-adminserver:7001 (10.244.1.18:7001) /rreg/rreg accessdomain-adminserver:7001 (10.244.1.18:7001) /em accessdomain-adminserver:7001 (10.244.1.18:7001) /oamconsole accessdomain-adminserver:7001 (10.244.1.18:7001) /dms accessdomain-adminserver:7001 (10.244.1.18:7001) /oam/services/rest accessdomain-adminserver:7001 (10.244.1.18:7001) /iam/admin/config accessdomain-adminserver:7001 (10.244.1.18:7001) /iam/admin/diag accessdomain-adminserver:7001 (10.244.1.18:7001) /iam/access accessdomain-cluster-oam-cluster:14100 (10.244.1.20:14100,10.244.2.13:14100) /oam/admin/api accessdomain-adminserver:7001 (10.244.1.18:7001) /oam/services/rest/access/api accessdomain-cluster-oam-cluster:14100 (10.244.1.20:14100,10.244.2.13:14100) /access accessdomain-cluster-policy-cluster:15100 (10.244.1.19:15100,10.244.2.12:15100) / accessdomain-cluster-oam-cluster:14100 (10.244.1.20:14100,10.244.2.13:14100) Annotations: kubernetes.io/ingress.class: nginx meta.helm.sh/release-name: oam-nginx meta.helm.sh/release-namespace: oamns nginx.ingress.kubernetes.io/configuration-snippet: more_clear_input_headers \u0026quot;WL-Proxy-Client-IP\u0026quot; \u0026quot;WL-Proxy-SSL\u0026quot;; more_set_input_headers \u0026quot;X-Forwarded-Proto: https\u0026quot;; more_set_input_headers \u0026quot;WL-Proxy-SSL: true\u0026quot;; nginx.ingress.kubernetes.io/enable-access-log: false nginx.ingress.kubernetes.io/ingress.allow-http: false nginx.ingress.kubernetes.io/proxy-buffer-size: 2000k Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Sync 14m (x2 over 15m) nginx-ingress-controller Scheduled for sync To confirm that the new ingress is successfully routing to the domain\u0026rsquo;s server pods, run the following command to send a request to the URL for the \u0026lsquo;WebLogic ReadyApp framework\u0026rsquo;:\n$ curl -v -k https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/weblogic/ready For example:\na) For NodePort\n$ curl -v -k https://masternode.example.com:31051/weblogic/ready b) For LoadBalancer:\n$ curl -v -k https://loadbalancer.example.com/weblogic/ready The output will look similar to the following:\n* Trying 12.345.67.89... * Connected to 12.345.67.89 (12.345.67.89) port 31051 (#0) * Initializing NSS with certpath: sql:/etc/pki/nssdb * skipping SSL peer certificate verification * SSL connection using TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 * Server certificate: * subject: CN=masternode.example.com * start date: \u0026lt;DATE\u0026gt; * expire date: \u0026lt;DATE\u0026gt; * common name: masternode.example.com * issuer: CN=masternode.example.com \u0026gt; GET /weblogic/ready HTTP/1.1 \u0026gt; User-Agent: curl/7.29.0 \u0026gt; Host: masternode.example.com:31051 \u0026gt; Accept: */* \u0026gt; \u0026lt; HTTP/1.1 200 OK \u0026lt; Date: Mon, 12 Jul 2021 15:06:12 GMT \u0026lt; Content-Length: 0 \u0026lt; Connection: keep-alive \u0026lt; Strict-Transport-Security: max-age=15724800; includeSubDomains \u0026lt; * Connection #0 to host 12.345.67.89 left intact Verify that you can access the domain URL After setting up the NGINX ingress, verify that the domain applications are accessible through the NGINX ingress port (for example 31051) as per Validate Domain URLs \n" +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oig/configure-ingress/", + "title": "Configure an ingress for an OIG domain", + "tags": [], + "description": "This document provides steps to configure an Ingress to direct traffic to the OIG domain.", + "content": "Choose one of the following supported methods to configure an Ingress to direct traffic for your OIG domain.\n a. Using an Ingress with NGINX (non-SSL) Steps to set up an Ingress for NGINX to direct traffic to the OIG domain (non-SSL).\n b. Using an Ingress with NGINX (SSL) Steps to set up an Ingress for NGINX to direct traffic to the OIG domain using SSL.\n " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oud/configure-ingress/", + "title": "Configure an Ingress for OUD", + "tags": [], + "description": "This document provides steps to configure an ingress controller to direct traffic to OUD.", + "content": " Introduction\n Install NGINX\na. Configure the repository\nb. Create a namespace\nc. Install NGINX using helm\n Access to interfaces through ingress\na. Changes in /etc/hosts to validate hostname based ingress rules\nb. Using LDAP utilities\nc. Validate access using LDAP\nd. Validate access using HTTPS\n Introduction The instructions below explain how to set up NGINX as an ingress for OUD.\nBy default the ingress configuration only supports HTTP and HTTPS ports. To allow LDAP and LDAPS communication over TCP, configuration is required at the ingress controller/implementation level.\nInstall NGINX Use Helm to install NGINX.\nConfigure the repository Add the Helm chart repository for installing NGINX using the following command:\n$ helm repo add stable https://kubernetes.github.io/ingress-nginx The output will look similar to the following:\n\u0026quot;stable\u0026quot; has been added to your repositories Update the repository using the following command:\n$ helm repo update The output will look similar to the following:\nHang tight while we grab the latest from your chart repositories... ...Successfully got an update from the \u0026quot;stable\u0026quot; chart repository Update Complete. Happy Helming! Create a namespace Create a Kubernetes namespace for NGINX:\n$ kubectl create namespace \u0026lt;namespace\u0026gt; For example:\n$ kubectl create namespace mynginx The output will look similar to the following:\nnamespace/mynginx created Install NGINX using helm Create a $WORKDIR/kubernetes/helm/nginx-ingress-values-override.yaml that contains the following:\nNote: The configuration below:\n Assumes that you have oud-ds-rs installed with value oud-ds-rs as a deployment/release name in the namespace oudns. If using a different deployment name and/or namespace change appropriately. Deploys an ingress using LoadBalancer. If you prefer to use NodePort, change the configuration accordingly. For more details about NGINX configuration see: NGINX Ingress Controller. # Configuration for additional TCP ports to be exposed through Ingress # Format for each port would be like: # \u0026lt;PortNumber\u0026gt;: \u0026lt;Namespace\u0026gt;/\u0026lt;Service\u0026gt; tcp: # Map 1389 TCP port to LBR LDAP service to get requests handled through any available POD/Endpoint serving LDAP Port 1389: oudns/oud-ds-rs-lbr-ldap:ldap # Map 1636 TCP port to LBR LDAP service to get requests handled through any available POD/Endpoint serving LDAPS Port 1636: oudns/oud-ds-rs-lbr-ldap:ldaps controller: admissionWebhooks: enabled: false extraArgs: # The secret referred to by this flag contains the default certificate to be used when accessing the catch-all server. # If this flag is not provided NGINX will use a self-signed certificate. # If the TLS Secret is in different namespace, name can be mentioned as \u0026lt;namespace\u0026gt;/\u0026lt;tlsSecretName\u0026gt; default-ssl-certificate: oudns/oud-ds-rs-tls-cert service: # controller service external IP addresses # externalIPs: # - \u0026lt; External IP Address \u0026gt; # To configure Ingress Controller Service as LoadBalancer type of Service # Based on the Kubernetes configuration, External LoadBalancer would be linked to the Ingress Controller Service type: LoadBalancer # Configuration for NodePort to be used for Ports exposed through Ingress # If NodePorts are not defied/configured, Node Port would be assigend automatically by Kubernetes # These NodePorts are helpful while accessing services directly through Ingress and without having External Load Balancer. nodePorts: # For HTTP Interface exposed through LoadBalancer/Ingress http: 30080 # For HTTPS Interface exposed through LoadBalancer/Ingress https: 30443 tcp: # For LDAP Interface 1389: 31389 # For LDAPS Interface 1636: 31636 To install and configure NGINX Ingress issue the following command:\n$ helm install --namespace \u0026lt;namespace\u0026gt; \\ --values nginx-ingress-values-override.yaml \\ lbr-nginx stable/ingress-nginx Where:\n lbr-nginx is your deployment name stable/ingress-nginx is the chart reference For example:\n$ helm install --namespace mynginx \\ --values nginx-ingress-values-override.yaml \\ lbr-nginx stable/ingress-nginx The output will look similar to the following:\nNAME: lbr-nginx LAST DEPLOYED: \u0026lt;DATE\u0026gt; NAMESPACE: mynginx STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: The ingress-nginx controller has been installed. It may take a few minutes for the LoadBalancer IP to be available. You can watch the status by running 'kubectl --namespace mynginx get services -o wide -w lbr-nginx-ingress-nginx-controller' An example Ingress that makes use of the controller: apiVersion: networking.k8s.io/v1beta1 kind: Ingress metadata: annotations: kubernetes.io/ingress.class: nginx name: example namespace: foo spec: rules: - host: www.example.com http: paths: - backend: serviceName: exampleService servicePort: 80 path: / # This section is only required if TLS is to be enabled for the Ingress tls: - hosts: - www.example.com secretName: example-tls If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: apiVersion: v1 kind: Secret metadata: name: example-tls namespace: foo data: tls.crt: \u0026lt;base64 encoded cert\u0026gt; tls.key: \u0026lt;base64 encoded key\u0026gt; type: kubernetes.io/tls Optional: Command helm upgrade to update nginx-ingress If required, an nginx-ingress deployment can be updated/upgraded with following command. In this example, nginx-ingress configuration is updated with an additional TCP port and Node Port for accessing the LDAP/LDAPS port of a specific POD:\n Create a nginx-ingress-values-override.yaml that contains the following:\n# Configuration for additional TCP ports to be exposed through Ingress # Format for each port would be like: # \u0026lt;PortNumber\u0026gt;: \u0026lt;Namespace\u0026gt;/\u0026lt;Service\u0026gt; tcp: # Map 1389 TCP port to LBR LDAP service to get requests handled through any available POD/Endpoint serving LDAP Port 1389: oudns/oud-ds-rs-lbr-ldap:ldap # Map 1636 TCP port to LBR LDAP service to get requests handled through any available POD/Endpoint serving LDAPS Port 1636: oudns/oud-ds-rs-lbr-ldap:ldaps # Map specific ports for LDAP and LDAPS communication from individual Services/Pods # To redirect requests on 3890 port to oudns/oud-ds-rs-ldap-0:ldap 3890: oudns/oud-ds-rs-ldap-0:ldap # To redirect requests on 6360 port to oudns/oud-ds-rs-ldaps-0:ldap 6360: oudns/oud-ds-rs-ldap-0:ldaps # To redirect requests on 3891 port to oudns/oud-ds-rs-ldap-1:ldap 3891: oudns/oud-ds-rs-ldap-1:ldap # To redirect requests on 6361 port to oudns/oud-ds-rs-ldaps-1:ldap 6361: oudns/oud-ds-rs-ldap-1:ldaps # To redirect requests on 3892 port to oudns/oud-ds-rs-ldap-2:ldap 3892: oudns/oud-ds-rs-ldap-2:ldap # To redirect requests on 6362 port to oudns/oud-ds-rs-ldaps-2:ldap 6362: oudns/oud-ds-rs-ldap-2:ldaps # Map 1444 TCP port to LBR Admin service to get requests handled through any available POD/Endpoint serving Admin LDAPS Port 1444: oudns/oud-ds-rs-lbr-admin:adminldaps # To redirect requests on 4440 port to oudns/oud-ds-rs-0:adminldaps 4440: oudns/oud-ds-rs-0:adminldaps # To redirect requests on 4441 port to oudns/oud-ds-rs-1:adminldaps 4441: oudns/oud-ds-rs-1:adminldaps # To redirect requests on 4442 port to oudns/oud-ds-rs-2:adminldaps 4442: oudns/oud-ds-rs-2:adminldaps controller: admissionWebhooks: enabled: false extraArgs: # The secret referred to by this flag contains the default certificate to be used when accessing the catch-all server. # If this flag is not provided NGINX will use a self-signed certificate. # If the TLS Secret is in different namespace, name can be mentioned as \u0026lt;namespace\u0026gt;/\u0026lt;tlsSecretName\u0026gt; default-ssl-certificate: oudns/oud-ds-rs-tls-cert service: # controller service external IP addresses # externalIPs: # - \u0026lt; External IP Address \u0026gt; # To configure Ingress Controller Service as LoadBalancer type of Service # Based on the Kubernetes configuration, External LoadBalancer would be linked to the Ingress Controller Service type: LoadBalancer # Configuration for NodePort to be used for Ports exposed through Ingress # If NodePorts are not defied/configured, Node Port would be assigend automatically by Kubernetes # These NodePorts are helpful while accessing services directly through Ingress and without having External Load Balancer. nodePorts: # For HTTP Interface exposed through LoadBalancer/Ingress http: 30080 # For HTTPS Interface exposed through LoadBalancer/Ingress https: 30443 tcp: # For LDAP Interface referring to LBR LDAP services serving LDAP port 1389: 31389 # For LDAPS Interface referring to LBR LDAP services serving LDAPS port 1636: 31636 # For LDAP Interface from specific service oud-ds-rs-ldap-0 3890: 30890 # For LDAPS Interface from specific service oud-ds-rs-ldap-0 6360: 30360 # For LDAP Interface from specific service oud-ds-rs-ldap-1 3891: 30891 # For LDAPS Interface from specific service oud-ds-rs-ldap-1 6361: 30361 # For LDAP Interface from specific service oud-ds-rs-ldap-2 3892: 30892 # For LDAPS Interface from specific service oud-ds-rs-ldap-2 6362: 30362 # For LDAPS Interface referring to LBR Admin services serving adminldaps port 1444: 31444 # For Admin LDAPS Interface from specific service oud-ds-rs-0 4440: 30440 # For Admin LDAPS Interface from specific service oud-ds-rs-1 4441: 30441 # For Admin LDAPS Interface from specific service oud-ds-rs-2 4442: 30442 Run the following command to upgrade the ingress:\n$ helm upgrade --namespace \u0026lt;namespace\u0026gt; \\ --values nginx-ingress-values-override.yaml \\ lbr-nginx stable/ingress-nginx Where:\n lbr-nginx is your deployment name stable/ingress-nginx is the chart reference For example:\n$ helm upgrade --namespace mynginx \\ --values nginx-ingress-values-override.yaml \\ lbr-nginx stable/ingress-nginx Access to interfaces through ingress Using the Helm chart, ingress objects are created according to configuration. The following table details the rules configured in ingress object(s) for access to Oracle Unified Directory Interfaces through ingress.\n Port NodePort Host Example Hostname Path Backend Service:Port Example Service Name:Port http/https 30080/30443 \u0026lt;deployment/release name\u0026gt;-admin-0 oud-ds-rs-admin-0 * \u0026lt;deployment/release name\u0026gt;-0:adminhttps oud-ds-rs-0:adminhttps http/https 30080/30443 \u0026lt;deployment/release name\u0026gt;-admin-N oud-ds-rs-admin-N * \u0026lt;deployment/release name\u0026gt;-N:adminhttps oud-ds-rs-1:adminhttps http/https 30080/30443 \u0026lt;deployment/release name\u0026gt;-admin oud-ds-rs-admin * \u0026lt;deployment/release name\u0026gt;-lbr-admin:adminhttps oud-ds-rs-lbr-admin:adminhttps http/https 30080/30443 * * /rest/v1/admin \u0026lt;deployment/release name\u0026gt;-lbr-admin:adminhttps oud-ds-rs-lbr-admin:adminhttps http/https 30080/30443 \u0026lt;deployment/release name\u0026gt;-http-0 oud-ds-rs-http-0 * \u0026lt;deployment/release name\u0026gt;-http-0:http oud-ds-rs-http-0:http http/https 30080/30443 \u0026lt;deployment/release name\u0026gt;-http-N oud-ds-rs-http-N * \u0026lt;deployment/release name\u0026gt;-http-N:http oud-ds-rs-http-N:http http/https 30080/30443 \u0026lt;deployment/release name\u0026gt;-http oud-ds-rs-http * \u0026lt;deployment/release name\u0026gt;-lbr-http:http oud-ds-rs-lbr-http:http http/https 30080/30443 * * /rest/v1/directory \u0026lt;deployment/release name\u0026gt;-lbr-http:http oud-ds-rs-lbr-http:http http/https 30080/30443 * * /iam/directory \u0026lt;deployment/release name\u0026gt;-lbr-http:http oud-ds-rs-lbr-http:http In the table above, example values are based on the value \u0026lsquo;oud-ds-rs\u0026rsquo; as the deployment/release name for Helm chart installation.The NodePorts mentioned in the table are according to ingress configuration described in previous section.When External LoadBalancer is not available/configured, interfaces can be accessed through NodePort on a Kubernetes node.\n For LDAP/LDAPS access (based on the updated/upgraded configuration mentioned in previous section)\n Port NodePort Backend Service:Port Example Service Name:Port 1389 31389 \u0026lt;deployment/release name\u0026gt;-lbr-ldap:ldap oud-ds-rs-lbr-ldap:ldap 1636 31636 \u0026lt;deployment/release name\u0026gt;-lbr-ldap:ldap oud-ds-rs-lbr-ldap:ldaps 1444 31444 \u0026lt;deployment/release name\u0026gt;-lbr-admin:adminldaps oud-ds-rs-lbr-admin:adminldaps 3890 30890 \u0026lt;deployment/release name\u0026gt;-ldap-0:ldap oud-ds-rs-ldap-0:ldap 6360 30360 \u0026lt;deployment/release name\u0026gt;-ldap-0:ldaps oud-ds-rs-ldap-0:ldaps 3891 30891 \u0026lt;deployment/release name\u0026gt;-ldap-1:ldap oud-ds-rs-ldap-1:ldap 6361 30361 \u0026lt;deployment/release name\u0026gt;-ldap-1:ldaps oud-ds-rs-ldap-1:ldaps 3892 30892 \u0026lt;deployment/release name\u0026gt;-ldap-2:ldap oud-ds-rs-ldap-2:ldap 6362 30362 \u0026lt;deployment/release name\u0026gt;-ldap-2:ldaps oud-ds-rs-ldap-2:ldaps 4440 30440 \u0026lt;deployment/release name\u0026gt;-0:adminldaps oud-ds-rs-ldap-0:adminldaps 4441 30441 \u0026lt;deployment/release name\u0026gt;-1:adminldaps oud-ds-rs-ldap-1:adminldaps 4442 30442 \u0026lt;deployment/release name\u0026gt;-2:adminldaps oud-ds-rs-ldap-2:adminldaps In the table above, example values are based on value \u0026lsquo;oud-ds-rs\u0026rsquo; as the deployment/release name for helm chart installation. The NodePorts mentioned in the table are according to Ingress configuration described in previous section. When external LoadBalancer is not available/configured, Interfaces can be accessed through NodePort on a Kubernetes Node. Changes in /etc/hosts to validate hostname based ingress rules If it is not possible to have a LoadBalancer configuration updated to have host names added for Oracle Unified Directory Interfaces then the following entries can be added in /etc/hosts files on the host from where Oracle Unified Directory interfaces will be accessed.\n\u0026lt;IP Address of External LBR or Kubernetes Node\u0026gt;\toud-ds-rs-http oud-ds-rs-http-0 oud-ds-rs-http-1 oud-ds-rs-http-2 oud-ds-rs-http-N \u0026lt;IP Address of External LBR or Kubernetes Node\u0026gt;\toud-ds-rs-admin oud-ds-rs-admin-0 oud-ds-rs-admin-1 oud-ds-rs-admin-2 oud-ds-rs-admin-N In the table above, host names are based on the value \u0026lsquo;oud-ds-rs\u0026rsquo; as the deployment/release name for Helm chart installation. When External LoadBalancer is not available/configured, Interfaces can be accessed through NodePort on Kubernetes Node. Using LDAP utilities To use Oracle LDAP utilities such as ldapbind, ldapsearch, ldapmodify etc. you can either:\n Run the LDAP commands from an OUD installation outside the Kubernetes cluster. This requires access to an On-Premises OUD installation oustide the Kubernetes cluster.\n Run the LDAP commands from inside the OUD Kubernetes pod.\n$ kubectl exec -ti \u0026lt;pod\u0026gt; -n \u0026lt;namespace\u0026gt; -- bash For example:\n$ kubectl exec -ti oud-ds-rs-0 -n oudns -- bash This will take you into a bash session in the pod:\n[oracle@oud-ds-rs-0 oracle]$ Inside the container navigate to /u01/oracle/oud/bin to view the LDAP utilties:\n[oracle@oud-ds-rs-0 oracle]$ cd /u01/oracle/oud/bin [oracle@oud-ds-rs-0 bin]$ ls ldap* ldapcompare ldapdelete ldapmodify ldappasswordmodify ldapsearch Note: For commands that require an ldif file, copy the file into the \u0026lt;persistent_volume\u0026gt;/oud_user_projects directory:\n$ cp file.ldif \u0026lt;peristent_volume\u0026gt;/oud_user_projects For example:\n$ cp file.ldif /scratch/shared/oud_user_projects The file can then be viewed inside the pod:\n[oracle@oud-ds-rs-0 bin]$ cd /u01/oracle/user_projects [oracle@oud-ds-rs-0 user_projects]$ ls *.ldif file.ldif Validate access using LDAP Note: The examples assume sample data was installed when creating the OUD instance.\nLDAP against External Load Balancer Note If your ingress is configured with type: LoadBalancer then you cannot connect to the external LoadBalancer hostname and ports from inside the pod and must connect from an OUD installation outside the cluster.\n Command to perform ldapsearch against External LBR and LDAP port\n$OUD_HOME/bin/ldapsearch --hostname \u0026lt;External LBR\u0026gt; --port 1389 \\ -D \u0026#34;\u0026lt;Root User DN\u0026gt;\u0026#34; -w \u0026lt;Password for Root User DN\u0026gt; \\ -b \u0026#34;\u0026#34; -s base \u0026#34;(objectClass=*)\u0026#34; \u0026#34;*\u0026#34; The output will look similar to the following:\ndn: objectClass: top objectClass: ds-root-dse lastChangeNumber: 0 firstChangeNumber: 0 changelog: cn=changelog entryDN: pwdPolicySubentry: cn=Default Password Policy,cn=Password Policies,cn=config subschemaSubentry: cn=schema supportedAuthPasswordSchemes: SHA256 supportedAuthPasswordSchemes: SHA1 supportedAuthPasswordSchemes: SHA384 supportedAuthPasswordSchemes: SHA512 supportedAuthPasswordSchemes: MD5 numSubordinates: 1 supportedFeatures: 1.3.6.1.1.14 supportedFeatures: 1.3.6.1.4.1.4203.1.5.1 supportedFeatures: 1.3.6.1.4.1.4203.1.5.2 supportedFeatures: 1.3.6.1.4.1.4203.1.5.3 lastExternalChangelogCookie: vendorName: Oracle Corporation vendorVersion: Oracle Unified Directory 12.2.1.4.0 componentVersion: 4 releaseVersion: 1 platformVersion: 0 supportedLDAPVersion: 2 supportedLDAPVersion: 3 supportedControl: 1.2.826.0.1.3344810.2.3 supportedControl: 1.2.840.113556.1.4.1413 supportedControl: 1.2.840.113556.1.4.319 supportedControl: 1.2.840.113556.1.4.473 supportedControl: 1.2.840.113556.1.4.805 supportedControl: 1.3.6.1.1.12 supportedControl: 1.3.6.1.1.13.1 supportedControl: 1.3.6.1.1.13.2 supportedControl: 1.3.6.1.4.1.26027.1.5.2 supportedControl: 1.3.6.1.4.1.26027.1.5.4 supportedControl: 1.3.6.1.4.1.26027.1.5.5 supportedControl: 1.3.6.1.4.1.26027.1.5.6 supportedControl: 1.3.6.1.4.1.26027.2.3.1 supportedControl: 1.3.6.1.4.1.26027.2.3.2 supportedControl: 1.3.6.1.4.1.26027.2.3.4 supportedControl: 1.3.6.1.4.1.42.2.27.8.5.1 supportedControl: 1.3.6.1.4.1.42.2.27.9.5.2 supportedControl: 1.3.6.1.4.1.42.2.27.9.5.8 supportedControl: 1.3.6.1.4.1.4203.1.10.1 supportedControl: 1.3.6.1.4.1.4203.1.10.2 supportedControl: 2.16.840.1.113730.3.4.12 supportedControl: 2.16.840.1.113730.3.4.16 supportedControl: 2.16.840.1.113730.3.4.17 supportedControl: 2.16.840.1.113730.3.4.18 supportedControl: 2.16.840.1.113730.3.4.19 supportedControl: 2.16.840.1.113730.3.4.2 supportedControl: 2.16.840.1.113730.3.4.3 supportedControl: 2.16.840.1.113730.3.4.4 supportedControl: 2.16.840.1.113730.3.4.5 supportedControl: 2.16.840.1.113730.3.4.9 supportedControl: 2.16.840.1.113894.1.8.21 supportedControl: 2.16.840.1.113894.1.8.31 supportedControl: 2.16.840.1.113894.1.8.36 maintenanceVersion: 2 supportedSASLMechanisms: PLAIN supportedSASLMechanisms: EXTERNAL supportedSASLMechanisms: CRAM-MD5 supportedSASLMechanisms: DIGEST-MD5 majorVersion: 12 orclGUID: D41D8CD98F003204A9800998ECF8427E entryUUID: d41d8cd9-8f00-3204-a980-0998ecf8427e ds-private-naming-contexts: cn=schema hasSubordinates: true nsUniqueId: d41d8cd9-8f003204-a9800998-ecf8427e structuralObjectClass: ds-root-dse supportedExtension: 1.3.6.1.4.1.4203.1.11.1 supportedExtension: 1.3.6.1.4.1.4203.1.11.3 supportedExtension: 1.3.6.1.1.8 supportedExtension: 1.3.6.1.4.1.26027.1.6.3 supportedExtension: 1.3.6.1.4.1.26027.1.6.2 supportedExtension: 1.3.6.1.4.1.26027.1.6.1 supportedExtension: 1.3.6.1.4.1.1466.20037 namingContexts: cn=changelog namingContexts: dc=example,dc=com Command to perform ldapsearch against External LBR and LDAP port for specific Oracle Unified Directory Interface\n$OUD_HOME/bin/ldapsearch --hostname \u0026lt;External LBR\u0026gt; --port 3890 \\ -D \u0026#34;\u0026lt;Root User DN\u0026gt;\u0026#34; -w \u0026lt;Password for Root User DN\u0026gt; \\ -b \u0026#34;\u0026#34; -s base \u0026#34;(objectClass=*)\u0026#34; \u0026#34;*\u0026#34; LDAPS against Kubernetes NodePort for Ingress Controller Service In the example below LDAP utilities are executed from inside the oud-ds-rs-0 pod. If your ingress is configured with type: LoadBalancer you can connect to the Kubernetes hostname where the ingress is deployed using the NodePorts.\n Command to perform ldapsearch against Kubernetes NodePort and LDAP port\n[oracle@oud-ds-rs-0 bin]$ ./ldapsearch --hostname \u0026lt;Kubernetes Node\u0026gt; --port 31636 \\ --useSSL --trustAll \\ -D \u0026#34;\u0026lt;Root User DN\u0026gt;\u0026#34; -w \u0026lt;Password for Root User DN\u0026gt; \\ -b \u0026#34;\u0026#34; -s base \u0026#34;(objectClass=*)\u0026#34; \u0026#34;*\u0026#34; Validate access using HTTPS HTTPS/REST API against External LBR Host:Port Note: In all the examples below:\na) You need to have an external IP assigned at ingress level.\nb) | json_pp is used to format output in readable json format on the client side. It can be ignored if you do not have the json_pp library.\nc) Base64 of userDN:userPassword can be generated using echo -n \u0026quot;userDN:userPassword\u0026quot; | base64.\n Command to invoke Data REST API:\n$curl --noproxy \u0026#34;*\u0026#34; -k --location \\ --request GET \u0026#39;https://\u0026lt;External LBR Host\u0026gt;/rest/v1/directory/uid=user.1,ou=People,dc=example,dc=com?scope=sub\u0026amp;attributes=*\u0026#39; \\ --header \u0026#39;Authorization: Basic \u0026lt;Base64 of userDN:userPassword\u0026gt;\u0026#39; | json_pp The output will look similar to the following:\n{ \u0026#34;msgType\u0026#34; : \u0026#34;urn:ietf:params:rest:schemas:oracle:oud:1.0:SearchResponse\u0026#34;, \u0026#34;totalResults\u0026#34; : 1, \u0026#34;searchResultEntries\u0026#34; : [ { \u0026#34;dn\u0026#34; : \u0026#34;uid=user.1,ou=People,dc=example,dc=com\u0026#34;, \u0026#34;attributes\u0026#34; : { \u0026#34;st\u0026#34; : \u0026#34;OH\u0026#34;, \u0026#34;employeeNumber\u0026#34; : \u0026#34;1\u0026#34;, \u0026#34;postalCode\u0026#34; : \u0026#34;93694\u0026#34;, \u0026#34;description\u0026#34; : \u0026#34;This is the description for Aaren Atp.\u0026#34;, \u0026#34;telephoneNumber\u0026#34; : \u0026#34;+1 390 103 6917\u0026#34;, \u0026#34;homePhone\u0026#34; : \u0026#34;+1 280 375 4325\u0026#34;, \u0026#34;initials\u0026#34; : \u0026#34;ALA\u0026#34;, \u0026#34;objectClass\u0026#34; : [ \u0026#34;top\u0026#34;, \u0026#34;inetorgperson\u0026#34;, \u0026#34;organizationalperson\u0026#34;, \u0026#34;person\u0026#34; ], \u0026#34;uid\u0026#34; : \u0026#34;user.1\u0026#34;, \u0026#34;sn\u0026#34; : \u0026#34;Atp\u0026#34;, \u0026#34;street\u0026#34; : \u0026#34;70110 Fourth Street\u0026#34;, \u0026#34;mobile\u0026#34; : \u0026#34;+1 680 734 6300\u0026#34;, \u0026#34;givenName\u0026#34; : \u0026#34;Aaren\u0026#34;, \u0026#34;mail\u0026#34; : \u0026#34;user.1@maildomain.net\u0026#34;, \u0026#34;l\u0026#34; : \u0026#34;New Haven\u0026#34;, \u0026#34;postalAddress\u0026#34; : \u0026#34;Aaren Atp$70110 Fourth Street$New Haven, OH 93694\u0026#34;, \u0026#34;pager\u0026#34; : \u0026#34;+1 850 883 8888\u0026#34;, \u0026#34;cn\u0026#34; : \u0026#34;Aaren Atp\u0026#34; } } ] } Command to invoke Data REST API against specific Oracle Unified Directory Interface:\n$ curl --noproxy \u0026#34;*\u0026#34; -k --location \\ --request GET \u0026#39;https://oud-ds-rs-http-0/rest/v1/directory/uid=user.1,ou=People,dc=example,dc=com?scope=sub\u0026amp;attributes=*\u0026#39; \\ --header \u0026#39;Authorization: Basic \u0026lt;Base64 of userDN:userPassword\u0026gt;\u0026#39; | json_pp For this example, it is assumed that the value \u0026lsquo;oud-ds-rs\u0026rsquo; is used as the deployment/release name for helm chart installation. It is assumed that \u0026lsquo;oud-ds-rs-http-0\u0026rsquo; points to an External LoadBalancer HTTPS/REST API against Kubernetes NodePort for Ingress Controller Service Note: In all the examples below:\na) | json_pp is used to format output in readable json format on the client side. It can be ignored if you do not have the json_pp library.\nb) Base64 of userDN:userPassword can be generated using echo -n \u0026quot;userDN:userPassword\u0026quot; | base64.\nc) It is assumed that the value \u0026lsquo;oud-ds-rs\u0026rsquo; is used as the deployment/release name for helm chart installation.\n Command to invoke Data SCIM API:\n$ curl --noproxy \u0026#34;*\u0026#34; -k --location \\ --request GET \u0026#39;https://\u0026lt;Kubernetes Node\u0026gt;:30443/iam/directory/oud/scim/v1/Users\u0026#39; \\ --header \u0026#39;Authorization: Basic \u0026lt;Base64 of userDN:userPassword\u0026gt;\u0026#39; | json_pp The output will look similar to the following:\n{ \u0026#34;Resources\u0026#34; : [ { \u0026#34;id\u0026#34; : \u0026#34;ad55a34a-763f-358f-93f9-da86f9ecd9e4\u0026#34;, \u0026#34;userName\u0026#34; : [ { \u0026#34;value\u0026#34; : \u0026#34;user.0\u0026#34; } ], \u0026#34;schemas\u0026#34; : [ \u0026#34;urn:ietf:params:scim:schemas:core:2.0:User\u0026#34;, \u0026#34;urn:ietf:params:scim:schemas:extension:oracle:2.0:OUD:User\u0026#34;, \u0026#34;urn:ietf:params:scim:schemas:extension:enterprise:2.0:User\u0026#34; ], \u0026#34;meta\u0026#34; : { \u0026#34;location\u0026#34; : \u0026#34;http://\u0026lt;Kubernetes Node\u0026gt;:30443/iam/directory/oud/scim/v1/Users/ad55a34a-763f-358f-93f9-da86f9ecd9e4\u0026#34;, \u0026#34;resourceType\u0026#34; : \u0026#34;User\u0026#34; }, \u0026#34;addresses\u0026#34; : [ { \u0026#34;postalCode\u0026#34; : \u0026#34;50369\u0026#34;, \u0026#34;formatted\u0026#34; : \u0026#34;Aaccf Amar$01251 Chestnut Street$Panama City, DE 50369\u0026#34;, \u0026#34;streetAddress\u0026#34; : \u0026#34;01251 Chestnut Street\u0026#34;, \u0026#34;locality\u0026#34; : \u0026#34;Panama City\u0026#34;, \u0026#34;region\u0026#34; : \u0026#34;DE\u0026#34; } ], \u0026#34;urn:ietf:params:scim:schemas:extension:oracle:2.0:OUD:User\u0026#34; : { \u0026#34;description\u0026#34; : [ { \u0026#34;value\u0026#34; : \u0026#34;This is the description for Aaccf Amar.\u0026#34; } ], \u0026#34;mobile\u0026#34; : [ { \u0026#34;value\u0026#34; : \u0026#34;+1 010 154 3228\u0026#34; } ], \u0026#34;pager\u0026#34; : [ { \u0026#34;value\u0026#34; : \u0026#34;+1 779 041 6341\u0026#34; } ], \u0026#34;objectClass\u0026#34; : [ { \u0026#34;value\u0026#34; : \u0026#34;top\u0026#34; }, { \u0026#34;value\u0026#34; : \u0026#34;organizationalperson\u0026#34; }, { \u0026#34;value\u0026#34; : \u0026#34;person\u0026#34; }, { \u0026#34;value\u0026#34; : \u0026#34;inetorgperson\u0026#34; } ], \u0026#34;initials\u0026#34; : [ { \u0026#34;value\u0026#34; : \u0026#34;ASA\u0026#34; } ], \u0026#34;homePhone\u0026#34; : [ { \u0026#34;value\u0026#34; : \u0026#34;+1 225 216 5900\u0026#34; } ] }, \u0026#34;name\u0026#34; : [ { \u0026#34;givenName\u0026#34; : \u0026#34;Aaccf\u0026#34;, \u0026#34;familyName\u0026#34; : \u0026#34;Amar\u0026#34;, \u0026#34;formatted\u0026#34; : \u0026#34;Aaccf Amar\u0026#34; } ], \u0026#34;emails\u0026#34; : [ { \u0026#34;value\u0026#34; : \u0026#34;user.0@maildomain.net\u0026#34; } ], \u0026#34;phoneNumbers\u0026#34; : [ { \u0026#34;value\u0026#34; : \u0026#34;+1 685 622 6202\u0026#34; } ], \u0026#34;urn:ietf:params:scim:schemas:extension:enterprise:2.0:User\u0026#34; : { \u0026#34;employeeNumber\u0026#34; : [ { \u0026#34;value\u0026#34; : \u0026#34;0\u0026#34; } ] } } , . . . } Command to invoke Data SCIM API against specific Oracle Unified Directory Interface:\n$ curl --noproxy \u0026#34;*\u0026#34; -k --location \\ --request GET \u0026#39;https://oud-ds-rs-http-0:30443/iam/directory/oud/scim/v1/Users\u0026#39; \\ --header \u0026#39;Authorization: Basic \u0026lt;Base64 of userDN:userPassword\u0026gt;\u0026#39; | json_pp HTTPS/REST Admin API Note: In all the examples below:\na) | json_pp is used to format output in readable json format on the client side. It can be ignored if you do not have the json_pp library.\nb) Base64 of userDN:userPassword can be generated using echo -n \u0026quot;userDN:userPassword\u0026quot; | base64.\n Command to invoke Admin REST API against External LBR:\n$ curl --noproxy \u0026#34;*\u0026#34; -k --insecure --location \\ --request GET \u0026#39;https://\u0026lt;External LBR Host\u0026gt;/rest/v1/admin/?scope=base\u0026amp;attributes=vendorName\u0026amp;attributes=vendorVersion\u0026amp;attributes=ds-private-naming-contexts\u0026amp;attributes=subschemaSubentry\u0026#39; \\ --header \u0026#39;Content-Type: application/json\u0026#39; \\ --header \u0026#39;Authorization: Basic \u0026lt;Base64 of userDN:userPassword\u0026gt;\u0026#39; | json_pp The output will look similar to the following:\n{ \u0026#34;totalResults\u0026#34; : 1, \u0026#34;searchResultEntries\u0026#34; : [ { \u0026#34;dn\u0026#34; : \u0026#34;\u0026#34;, \u0026#34;attributes\u0026#34; : { \u0026#34;vendorVersion\u0026#34; : \u0026#34;Oracle Unified Directory 12.2.1.4.0\u0026#34;, \u0026#34;ds-private-naming-contexts\u0026#34; : [ \u0026#34;cn=admin data\u0026#34;, \u0026#34;cn=ads-truststore\u0026#34;, \u0026#34;cn=backups\u0026#34;, \u0026#34;cn=config\u0026#34;, \u0026#34;cn=monitor\u0026#34;, \u0026#34;cn=schema\u0026#34;, \u0026#34;cn=tasks\u0026#34;, \u0026#34;cn=virtual acis\u0026#34;, \u0026#34;dc=replicationchanges\u0026#34; ], \u0026#34;subschemaSubentry\u0026#34; : \u0026#34;cn=schema\u0026#34;, \u0026#34;vendorName\u0026#34; : \u0026#34;Oracle Corporation\u0026#34; } } ], \u0026#34;msgType\u0026#34; : \u0026#34;urn:ietf:params:rest:schemas:oracle:oud:1.0:SearchResponse\u0026#34; } Command to invoke Admin REST API against specific Oracle Unified Directory Admin Interface:\n$ curl --noproxy \u0026#34;*\u0026#34; -k --insecure --location \\ --request GET \u0026#39;https://oud-ds-rs-admin-0/rest/v1/admin/?scope=base\u0026amp;attributes=vendorName\u0026amp;attributes=vendorVersion\u0026amp;attributes=ds-private-naming-contexts\u0026amp;attributes=subschemaSubentry\u0026#39; \\ --header \u0026#39;Content-Type: application/json\u0026#39; \\ --header \u0026#39;Authorization: Basic \u0026lt;Base64 of userDN:userPassword\u0026gt;\u0026#39; | json_pp Command to invoke Admin REST API against Kubernetes NodePort for Ingress Controller Service\n$ curl --noproxy \u0026#34;*\u0026#34; -k --insecure --location \\ --request GET \u0026#39;https://oud-ds-rs-admin-0:30443/rest/v1/admin/?scope=base\u0026amp;attributes=vendorName\u0026amp;attributes=vendorVersion\u0026amp;attributes=ds-private-naming-contexts\u0026amp;attributes=subschemaSubentry\u0026#39; \\ --header \u0026#39;Content-Type: application/json\u0026#39; \\ --header \u0026#39;Authorization: Basic \u0026lt;Base64 of userDN:userPassword\u0026gt;\u0026#39; | json_pp " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oudsm/configure-ingress/", + "title": "Configure an Ingress for OUDSM", + "tags": [], + "description": "This document provides steps to configure an ingress controller to direct traffic to OUDSM.", + "content": " Introduction\n Install NGINX\na. Configure the repository\nb. Create a namespace\nc. Install NGINX using helm\n Access to interfaces through ingress\n Introduction The instructions below explain how to set up NGINX as an ingress for OUDSM.\nInstall NGINX Use Helm to install NGINX.\nConfigure the repository Add the Helm chart repository for installing NGINX using the following command:\n$ helm repo add stable https://kubernetes.github.io/ingress-nginx The output will look similar to the following:\n\u0026quot;stable\u0026quot; has been added to your repositories Update the repository using the following command:\n$ helm repo update The output will look similar to the following:\nHang tight while we grab the latest from your chart repositories... ...Successfully got an update from the \u0026quot;stable\u0026quot; chart repository Update Complete. Happy Helming! Create a namespace Create a Kubernetes namespace for NGINX:\n$ kubectl create namespace \u0026lt;namespace\u0026gt; For example:\n$ kubectl create namespace mynginx The output will look similar to the following:\nnamespace/mynginx created Install NGINX using helm Create a $WORKDIR/kubernetes/helm/nginx-ingress-values-override.yaml that contains the following:\nNote: The configuration below deploys an ingress using LoadBalancer. If you prefer to use NodePort, change the configuration accordingly. For more details about NGINX configuration see: NGINX Ingress Controller.\ncontroller: admissionWebhooks: enabled: false extraArgs: # The secret referred to by this flag contains the default certificate to be used when accessing the catch-all server. # If this flag is not provided NGINX will use a self-signed certificate. # If the TLS Secret is in different namespace, name can be mentioned as \u0026lt;namespace\u0026gt;/\u0026lt;tlsSecretName\u0026gt; default-ssl-certificate: oudsmns/oudsm-tls-cert service: # controller service external IP addresses # externalIPs: # - \u0026lt; External IP Address \u0026gt; # To configure Ingress Controller Service as LoadBalancer type of Service # Based on the Kubernetes configuration, External LoadBalancer would be linked to the Ingress Controller Service type: LoadBalancer # Configuration for NodePort to be used for Ports exposed through Ingress # If NodePorts are not defined/configured, Node Port would be assigned automatically by Kubernetes # These NodePorts are helpful while accessing services directly through Ingress and without having External Load Balancer. nodePorts: # For HTTP Interface exposed through LoadBalancer/Ingress http: 30080 # For HTTPS Interface exposed through LoadBalancer/Ingress https: 30443 To install and configure NGINX ingress issue the following command:\n$ helm install --namespace \u0026lt;namespace\u0026gt; \\ --values nginx-ingress-values-override.yaml \\ lbr-nginx stable/ingress-nginx Where:\n lbr-nginx is your deployment name stable/ingress-nginx is the chart reference For example:\n$ helm install --namespace mynginx \\ --values nginx-ingress-values-override.yaml \\ lbr-nginx stable/ingress-nginx The output will be similar to the following:\nNAME: lbr-nginx LAST DEPLOYED: \u0026lt;DATE\u0026gt; NAMESPACE: mynginx STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: The ingress-nginx controller has been installed. It may take a few minutes for the LoadBalancer IP to be available. You can watch the status by running 'kubectl --namespace mynginx get services -o wide -w lbr-nginx-ingress-nginx-controller' An example Ingress that makes use of the controller: apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: example namespace: foo spec: ingressClassName: nginx rules: - host: www.example.com http: paths: - pathType: Prefix backend: service: name: exampleService port: number: 80 path: / # This section is only required if TLS is to be enabled for the Ingress tls: - hosts: - www.example.com secretName: example-tls If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: apiVersion: v1 kind: Secret metadata: name: example-tls namespace: foo data: tls.crt: \u0026lt;base64 encoded cert\u0026gt; tls.key: \u0026lt;base64 encoded key\u0026gt; type: kubernetes.io/tls Access to interfaces through ingress Using the Helm chart, ingress objects are created according to configuration. The following table details the rules configured in ingress object(s) for access to Oracle Unified Directory Services Manager Interfaces through ingress.\n Port NodePort Host Example Hostname Path Backend Service:Port Example Service Name:Port http/https 30080/30443 \u0026lt;deployment/release name\u0026gt;-N oudsm-N * \u0026lt;deployment/release name\u0026gt;-N:http oudsm-1:http http/https 30080/30443 * * /oudsm/console \u0026lt;deployment/release name\u0026gt;-lbr:http oudsm-lbr:http In the table above, the Example Name for each Object is based on the value \u0026lsquo;oudsm\u0026rsquo; as the deployment/release name for the Helm chart installation. The NodePorts mentioned in the table are according to ingress configuration described in previous section. When an External LoadBalancer is not available/configured, interfaces can be accessed through NodePort on the Kubernetes node. Changes in /etc/hosts to validate hostname based ingress rules If it is not possible to have LoadBalancer configuration updated to have host names added for Oracle Unified Directory Services Manager Interfaces, then the following entries can be added in /etc/hosts files on the host from where Oracle Unified Directory Services Manager interfaces would be accessed.\n\u0026lt;IP Address of External LBR or Kubernetes Node\u0026gt;\toudsm oudsm-1 oudsm-2 oudsm-N In the table above, host names are based on the value \u0026lsquo;oudsm\u0026rsquo; as the deployment/release name for the Helm chart installation. When an External LoadBalancer is not available/configured, Interfaces can be accessed through NodePort on the Kubernetes Node. Validate OUDSM URL\u0026rsquo;s Launch a browser and access the OUDSM console.\n If using an External LoadBalancer: https://\u0026lt;External LBR Host\u0026gt;/oudsm. If not using an External LoadBalancer use https://\u0026lt;Kubernetes Node\u0026gt;:30443/oudsm. Access the WebLogic Administration console by accessing the following URL and login with weblogic/\u0026lt;password\u0026gt; where weblogic/\u0026lt;password\u0026gt; is the adminUser and adminPass set when creating the OUDSM instance.\n If using an External LoadBalancer: https://\u0026lt;External LBR Host\u0026gt;/console. If not using an External LoadBalancer use https://\u0026lt;Kubernetes Node\u0026gt;:30443/console. " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oud/manage-oud-containers/", + "title": "Manage Oracle Unified Directory Containers", + "tags": [], + "description": "This document provides steps manage Oracle Unified Directory containers.", + "content": "Important considerations for Oracle Unified Directory instances in Kubernetes.\n a) Scaling Up/Down OUD Pods Describes the steps for scaling up/down for OUD pods.\n b) Logging and Visualization for Helm Chart oud-ds-rs Deployment Describes the steps for logging and visualization with Elasticsearch and Kibana.\n c) Monitoring an Oracle Unified Directory Instance Describes the steps for Monitoring the Oracle Unified Directory environment.\n d. Kubernetes Horizontal Pod Autoscaler Describes the steps for implementing the Horizontal Pod Autoscaler.\n " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oudsm/manage-oudsm-containers/", + "title": "Manage Oracle Unified Directory Services Manager Containers", + "tags": [], + "description": "This document provides steps to manage Oracle Unified Directory Services Manager containers.", + "content": "Important considerations for Oracle Unified Directory Services Manager instances in Kubernetes.\n a) Scaling Up/Down OUDSM Pods Describes the steps for scaling up/down for OUDSM pods.\n b) Logging and Visualization for Helm Chart oudsm Deployment Describes the steps for logging and visualization with Elasticsearch and Kibana.\n c) Monitoring an Oracle Unified Directory Services Manager Instance Describes the steps for Monitoring the Oracle Unified Directory Services Manager environment.\n " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oam/validate-domain-urls/", + "title": "Validate Domain URLs", + "tags": [], + "description": "Sample for validating domain urls.", + "content": "In this section you validate the OAM domain URLs are accessible via the NGINX ingress.\nMake sure you know the master hostname and ingress port for NGINX before proceeding.\nValidate the OAM domain urls via the Ingress Launch a browser and access the following URL\u0026rsquo;s. Login with the weblogic username and password (weblogic/\u0026lt;password\u0026gt;).\nNote: If using a load balancer for your ingress replace ${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT} with ${LOADBALANCER-HOSTNAME}:${LOADBALANCER-PORT}.\n Console or Page URL WebLogic Administration Console https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console Oracle Enterprise Manager Console https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/em Oracle Access Management Console https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/oamconsole Oracle Access Management Console https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/access Logout URL https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/oam/server/logout Note: WebLogic Administration Console and Oracle Enterprise Manager Console should only be used to monitor the servers in the OAM domain. To control the Administration Server and OAM Managed Servers (start/stop) you must use Kubernetes. See Domain Life Cycle for more information.\nThe browser will give certificate errors if you used a self signed certificate and have not imported it into the browsers Certificate Authority store. If this occurs you can proceed with the connection and ignore the errors.\nAfter validating the URL\u0026rsquo;s proceed to Post Install Configuration.\n" +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oig/validate-domain-urls/", + "title": "Validate domain URLs", + "tags": [], + "description": "Sample for validating domain urls.", + "content": "In this section you validate the OIG domain URLs that are accessible via the NGINX ingress.\nMake sure you know the master hostname and port before proceeding.\nValidate the OIG domain urls via the ingress Launch a browser and access the following URL\u0026rsquo;s. Use http or https depending on whether you configured your ingress for non-ssl or ssl.\nLogin to the WebLogic Administration Console and Oracle Enterprise Manager Console with the WebLogic username and password (weblogic/\u0026lt;password\u0026gt;).\nLogin to Oracle Identity Governance with the xelsysadm username and password (xelsysadm/\u0026lt;password\u0026gt;).\nNote: If using a load balancer for your ingress replace ${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT} with ${LOADBALANCER-HOSTNAME}:${LOADBALANCER-PORT}.\n Console or Page URL WebLogic Administration Console https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console Oracle Enterprise Manager Console https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/em Oracle Identity System Administration https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/sysadmin Oracle Identity Self Service https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/identity Note: WebLogic Administration Console and Oracle Enterprise Manager Console should only be used to monitor the servers in the OIG domain. To control the Administration Server and OIG Managed Servers (start/stop) you must use Kubernetes. See Domain Life Cycle for more information.\nThe browser will give certificate errors if you used a self signed certifcate and have not imported it into the browsers Certificate Authority store. If this occurs you can proceed with the connection and ignore the errors.\nAfter the URL\u0026rsquo;s have been verified follow Post install configuration.\n" +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oud/create-or-update-image/", + "title": "Create or update an image", + "tags": [], + "description": "Create or update an Oracle Unified Directory (OUD) container image used for deploying OUD domains.", + "content": "As described in Prepare Your Environment you can create your own OUD container image. If you have access to the My Oracle Support (MOS), and there is a need to build a new image with an interim or one off patch, it is recommended to use the WebLogic Image Tool to build an Oracle Unified Directory image for production deployments.\nCreate or update an Oracle Unified Directory image using the WebLogic Image Tool Using the WebLogic Image Tool, you can create a new Oracle Unified Directory image with PSU\u0026rsquo;s and interim patches or update an existing image with one or more interim patches.\n Recommendations:\n Use create for creating a new Oracle Unified Directory image containing the Oracle Unified Directory binaries, bundle patch and interim patches. This is the recommended approach if you have access to the OUD patches because it optimizes the size of the image. Use update for patching an existing Oracle Unified Directory image with a single interim patch. Note that the patched image size may increase considerably due to additional image layers introduced by the patch application tool. Create an image Set up the WebLogic Image Tool Prerequisites Set up the WebLogic Image Tool Validate setup WebLogic Image Tool build directory WebLogic Image Tool cache Prerequisites Verify that your environment meets the following prerequisites:\n Docker client and daemon on the build machine, with minimum Docker version 18.03.1.ce. Bash version 4.0 or later, to enable the command complete feature. JAVA_HOME environment variable set to the appropriate JDK location e.g: /scratch/export/oracle/product/jdk Set up the WebLogic Image Tool To set up the WebLogic Image Tool:\n Create a working directory and change to it:\n$ mdir \u0026lt;workdir\u0026gt; $ cd \u0026lt;workdir\u0026gt; For example:\n$ mkdir /scratch/imagetool-setup $ cd /scratch/imagetool-setup Download the latest version of the WebLogic Image Tool from the releases page.\n$ wget https://github.com/oracle/weblogic-image-tool/releases/download/release-X.X.X/imagetool.zip where X.X.X is the latest release referenced on the releases page.\n Unzip the release ZIP file in the imagetool-setup directory.\n$ unzip imagetool.zip Execute the following commands to set up the WebLogic Image Tool:\n$ cd \u0026lt;workdir\u0026gt;/imagetool-setup/imagetool/bin $ source setup.sh For example:\n$ cd /scratch/imagetool-setup/imagetool/bin $ source setup.sh Validate setup To validate the setup of the WebLogic Image Tool:\n Enter the following command to retrieve the version of the WebLogic Image Tool:\n$ imagetool --version Enter imagetool then press the Tab key to display the available imagetool commands:\n$ imagetool \u0026lt;TAB\u0026gt; cache create help rebase update WebLogic Image Tool build directory The WebLogic Image Tool creates a temporary Docker context directory, prefixed by wlsimgbuilder_temp, every time the tool runs. Under normal circumstances, this context directory will be deleted. However, if the process is aborted or the tool is unable to remove the directory, it is safe for you to delete it manually. By default, the WebLogic Image Tool creates the Docker context directory under the user\u0026rsquo;s home directory. If you prefer to use a different directory for the temporary context, set the environment variable WLSIMG_BLDDIR:\n$ export WLSIMG_BLDDIR=\u0026#34;/path/to/buid/dir\u0026#34; WebLogic Image Tool cache The WebLogic Image Tool maintains a local file cache store. This store is used to look up where the Java, WebLogic Server installers, and WebLogic Server patches reside in the local file system. By default, the cache store is located in the user\u0026rsquo;s $HOME/cache directory. Under this directory, the lookup information is stored in the .metadata file. All automatically downloaded patches also reside in this directory. You can change the default cache store location by setting the environment variable WLSIMG_CACHEDIR:\n$ export WLSIMG_CACHEDIR=\u0026#34;/path/to/cachedir\u0026#34; Set up additional build scripts Creating an Oracle Unified Directory container image using the WebLogic Image Tool requires additional container scripts for Oracle Unified Directory domains.\n Clone the docker-images repository to set up those scripts. In these steps, this directory is DOCKER_REPO:\n$ cd \u0026lt;workdir\u0026gt;/imagetool-setup $ git clone https://github.com/oracle/docker-images.git For example:\n$ cd /scratch/imagetool-setup $ git clone https://github.com/oracle/docker-images.git Note: If you want to create the image continue with the following steps, otherwise to update the image see update an image.\n Create an image After setting up the WebLogic Image Tool, follow these steps to use the WebLogic Image Tool to create a new Oracle Unified Directory image.\nDownload the Oracle Unified Directory installation binaries and patches You must download the required Oracle Unified Directory installation binaries and patches as listed below from the Oracle Software Delivery Cloud and save them in a directory of your choice.\nThe installation binaries and patches required are:\n Oracle Unified Directory 12.2.1.4.0\n fmw_12.2.1.4.0_oud.jar OUD Patches:\n View document ID 2723908.1 on My Oracle Support. In the Container Image Download/Patch Details section, locate the Oracle Unified Directory (OUD) table. For the latest PSU click the README link in the Documentation column. In the README, locate the \u0026ldquo;Installed Software\u0026rdquo; section. All the patch numbers to be download are listed here. Download all these individual patches from My Oracle Support. Oracle JDK v8\n jdk-8uXXX-linux-x64.tar.gz where XXX is the JDK version referenced in the README above. Update required build files The following files in the code repository location \u0026lt;imagetool-setup-location\u0026gt;/docker-images/OracleUnifiedDirectory/imagetool/12.2.1.4.0 are used for creating the image:\n additionalBuildCmds.txt buildArgs Edit the \u0026lt;workdir\u0026gt;/imagetool-setup/docker-images/OracleUnifiedDirectory/imagetool/12.2.1.4.0/buildArgs file and change %DOCKER_REPO%,%JDK_VERSION% and %BUILDTAG% appropriately.\nFor example:\ncreate --jdkVersion=8u321 --type oud --version=12.2.1.4.0 --tag=oud-latestpsu:12.2.1.4.0 --pull --installerResponseFile /scratch/imagetool-setup/docker-images/OracleUnifiedDirectory/dockerfiles/12.2.1.4.0/install/oud.response --additionalBuildCommands /scratch/imagetool-setup/docker-images/OracleUnifiedDirectory/imagetool/12.2.1.4.0/additionalBuildCmds.txt --additionalBuildFiles /scratch/imagetool-setup/docker-images/OracleUnifiedDirectory/dockerfiles/12.2.1.4.0/container-scripts The \u0026lt;workdir\u0026gt;/imagetool-setup/imagetool/docker-images/OracleUnifiedDirectory/imagetool/12.2.1.4.0/additionalBuildCmds.txt contains additional build commands. You may edit this file if you want to customize the image further.\n Create the image Add a JDK package to the WebLogic Image Tool cache. For example:\n$ imagetool cache addInstaller --type jdk --version 8uXXX --path \u0026lt;download location\u0026gt;/jdk-8uXXX-linux-x64.tar.gz where XXX is the JDK version downloaded\n Add the downloaded installation binaries to the WebLogic Image Tool cache. For example:\n$ imagetool cache addInstaller --type OUD --version 12.2.1.4.0 --path \u0026lt;download location\u0026gt;/fmw_12.2.1.4.0_oud.jar Add the downloaded OPatch patch to the WebLogic Image Tool cache. For example:\n$ imagetool cache addEntry --key 28186730_13.9.4.2.8 --value \u0026lt;download location\u0026gt;/p28186730_139428_Generic.zip Add the rest of the downloaded product patches to the WebLogic Image Tool cache:\n$ imagetool cache addEntry --key \u0026lt;patch\u0026gt;_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p\u0026lt;patch\u0026gt;_122140_Generic.zip For example:\n$ imagetool cache addEntry --key 32971905_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33448950_122140_Generic.zip Edit the \u0026lt;workdir\u0026gt;/imagetool-setup/docker-images/OracleUnifiedDirectory/imagetool/12.2.1.4.0/buildArgs file and append the product patches and opatch patch as follows:\n--patches 33448950_12.2.1.4.0 --opatchBugNumber=28186730_13.9.4.2.8 An example buildArgs file is now as follows:\ncreate --jdkVersion=8u321 --type oud --version=12.2.1.4.0 --tag=oud-latestpsu:12.2.1.4.0 --pull --installerResponseFile /scratch/imagetool-setup/docker-images/OracleUnifiedDirectory/dockerfiles/12.2.1.4.0/install/oud.response --additionalBuildCommands /scratch/imagetool-setup/docker-images/OracleUnifiedDirectory/imagetool/12.2.1.4.0/additionalBuildCmds.txt --additionalBuildFiles /scratch/imagetool-setup/docker-images/OracleUnifiedDirectory/dockerfiles/12.2.1.4.0/container-scripts --patches 33448950_12.2.1.4.0 --opatchBugNumber=28186730_13.9.4.2.8 Note: In the buildArgs file:\n --jdkVersion value must match the --version value used in the imagetool cache addInstaller command for --type jdk. --version value must match the --version value used in the imagetool cache addInstaller command for --type OUD. Refer to this page for the complete list of options available with the WebLogic Image Tool create command.\n Create the Oracle Unified Directory image:\n$ imagetool @\u0026lt;absolute path to buildargs file\u0026gt; --fromImage ghcr.io/oracle/oraclelinux:7-slim Note: Make sure that the absolute path to the buildargs file is prepended with a @ character, as shown in the example above.\n For example:\n$ imagetool @\u0026lt;imagetool-setup-location\u0026gt;/docker-images/OracleUnifiedDirectory/imagetool/12.2.1.4.0/buildArgs --fromImage ghcr.io/oracle/oraclelinux:7-slim Check the created image using the docker images command:\n$ docker images | grep oud The output will look similar to the following:\noud-latestpsu 12.2.1.4.0 30b02a692fa3 About a minute ago 1.04GB Run the following command to save the container image to a tar file:\n$ docker save -o \u0026lt;path\u0026gt;/\u0026lt;file\u0026gt;.tar \u0026lt;image\u0026gt; For example:\n$ docker save -o $WORKDIR/oud-latestpsu.tar oud-latestpsu:12.2.1.4.0 Update an image The steps below show how to update an existing Oracle Unified Directory image with an interim patch.\nThe container image to be patched must be loaded in the local docker images repository before attempting these steps.\nIn the examples below the image oracle/oud:12.2.1.4.0 is updated with an interim patch.\n$ docker images REPOSITORY TAG IMAGE ID CREATED SIZE oracle/oud 12.2.1.4.0 b051804ba15f 3 months ago 1.04GB Set up the WebLogic Image Tool.\n Download the required interim patch and latest Opatch (28186730) from My Oracle Support. and save them in a directory of your choice.\n Add the OPatch patch to the WebLogic Image Tool cache, for example:\n$ imagetool cache addEntry --key 28186730_13.9.4.2.8 --value \u0026lt;downloaded-patches-location\u0026gt;/p28186730_139428_Generic.zip Execute the imagetool cache addEntry command for each patch to add the required patch(es) to the WebLogic Image Tool cache. For example, to add patch p32701831_12214210607_Generic.zip:\n$ imagetool cache addEntry --key=33521773_12.2.1.4.211008 --value \u0026lt;downloaded-patches-location\u0026gt;/p33521773_12214211008_Generic.zip Provide the following arguments to the WebLogic Image Tool update command:\n –-fromImage - Identify the image that needs to be updated. In the example below, the image to be updated is oracle/oud:12.2.1.4.0. –-patches - Multiple patches can be specified as a comma-separated list. --tag - Specify the new tag to be applied for the image being built. Refer here for the complete list of options available with the WebLogic Image Tool update command.\n Note: The WebLogic Image Tool cache should have the latest OPatch zip. The WebLogic Image Tool will update the OPatch if it is not already updated in the image.\n For example:\n$ imagetool update --fromImage oracle/oud:12.2.1.4.0 --tag=oracle/oud-new:12.2.1.4.0 --patches=33521773_12.2.1.4.211008 --opatchBugNumber=28186730_13.9.4.2.8 Note: If the command fails because the files in the image being upgraded are not owned by oracle:oracle, then add the parameter --chown \u0026lt;userid\u0026gt;:\u0026lt;groupid\u0026gt; to correspond with the values returned in the error.\n Check the built image using the docker images command:\n$ docker images | grep oud The output will look similar to the following:\nREPOSITORY TAG IMAGE ID CREATED SIZE oracle/oud-new 12.2.1.4.0 78ccd1ad67eb 5 minutes ago 1.11GB oracle/oud 12.2.1.4.0 b051804ba15f 3 months ago 1.04GB Run the following command to save the patched container image to a tar file:\n$ docker save -o \u0026lt;path\u0026gt;/\u0026lt;file\u0026gt;.tar \u0026lt;image\u0026gt; For example:\n$ docker save -o $WORKDIR/oud-new.tar oracle/oud-new:12.2.1.4.0 " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oudsm/create-or-update-image/", + "title": "Create or update an image", + "tags": [], + "description": "Create or update an Oracle Unified Directory Services Manager (OUDSM) container image used for deploying OUDSM domains.", + "content": "As described in Prepare Your Environment you can create your own OUDSM container image. If you have access to the My Oracle Support (MOS), and there is a need to build a new image with an interim or one off patch, it is recommended to use the WebLogic Image Tool to build an Oracle Unified Directory image for production deployments.\nCreate or update an Oracle Unified Directory Services Manager image using the WebLogic Image Tool Using the WebLogic Image Tool, you can create a new Oracle Unified Directory Services Manager image with PSU\u0026rsquo;s and interim patches or update an existing image with one or more interim patches.\n Recommendations:\n Use create for creating a new Oracle Unified Directory Services Manager image containing the Oracle Unified Directory Services Manager binaries, bundle patch and interim patches. This is the recommended approach if you have access to the OUDSM patches because it optimizes the size of the image. Use update for patching an existing Oracle Unified Directory Services Manager image with a single interim patch. Note that the patched image size may increase considerably due to additional image layers introduced by the patch application tool. Create an image Set up the WebLogic Image Tool Prerequisites Set up the WebLogic Image Tool Validate setup WebLogic Image Tool build directory WebLogic Image Tool cache Prerequisites Verify that your environment meets the following prerequisites:\n Docker client and daemon on the build machine, with minimum Docker version 18.03.1.ce. Bash version 4.0 or later, to enable the command complete feature. JAVA_HOME environment variable set to the appropriate JDK location e.g: /scratch/export/oracle/product/jdk Set up the WebLogic Image Tool To set up the WebLogic Image Tool:\n Create a working directory and change to it:\n$ mdir \u0026lt;workdir\u0026gt; $ cd \u0026lt;workdir\u0026gt; For example:\n$ mkdir /scratch/imagetool-setup $ cd /scratch/imagetool-setup Download the latest version of the WebLogic Image Tool from the releases page.\n$ wget https://github.com/oracle/weblogic-image-tool/releases/download/release-X.X.X/imagetool.zip where X.X.X is the latest release referenced on the releases page.\n Unzip the release ZIP file in the imagetool-setup directory.\n$ unzip imagetool.zip Execute the following commands to set up the WebLogic Image Tool:\n$ cd \u0026lt;workdir\u0026gt;/imagetool-setup/imagetool/bin $ source setup.sh For example:\n$ cd /scratch/imagetool-setup/imagetool/bin $ source setup.sh Validate setup To validate the setup of the WebLogic Image Tool:\n Enter the following command to retrieve the version of the WebLogic Image Tool:\n$ imagetool --version Enter imagetool then press the Tab key to display the available imagetool commands:\n$ imagetool \u0026lt;TAB\u0026gt; cache create help rebase update WebLogic Image Tool build directory The WebLogic Image Tool creates a temporary Docker context directory, prefixed by wlsimgbuilder_temp, every time the tool runs. Under normal circumstances, this context directory will be deleted. However, if the process is aborted or the tool is unable to remove the directory, it is safe for you to delete it manually. By default, the WebLogic Image Tool creates the Docker context directory under the user\u0026rsquo;s home directory. If you prefer to use a different directory for the temporary context, set the environment variable WLSIMG_BLDDIR:\n$ export WLSIMG_BLDDIR=\u0026#34;/path/to/buid/dir\u0026#34; WebLogic Image Tool cache The WebLogic Image Tool maintains a local file cache store. This store is used to look up where the Java, WebLogic Server installers, and WebLogic Server patches reside in the local file system. By default, the cache store is located in the user\u0026rsquo;s $HOME/cache directory. Under this directory, the lookup information is stored in the .metadata file. All automatically downloaded patches also reside in this directory. You can change the default cache store location by setting the environment variable WLSIMG_CACHEDIR:\n$ export WLSIMG_CACHEDIR=\u0026#34;/path/to/cachedir\u0026#34; Set up additional build scripts Creating an Oracle Unified Directory Services Manager container image using the WebLogic Image Tool requires additional container scripts for Oracle Unified Directory Services Manager domains.\n Clone the docker-images repository to set up those scripts. In these steps, this directory is DOCKER_REPO:\n$ cd \u0026lt;workdir\u0026gt;/imagetool-setup $ git clone https://github.com/oracle/docker-images.git For example:\n$ cd /scratch/imagetool-setup $ git clone https://github.com/oracle/docker-images.git Note: If you want to create the image continue with the following steps, otherwise to update the image see update an image.\n Create an image After setting up the WebLogic Image Tool, follow these steps to use the WebLogic Image Tool to create a new Oracle Unified Directory Services Manager image.\nDownload the Oracle Unified Directory Services Manager installation binaries and patches You must download the required Oracle Unified Directory Services Manager installation binaries and patches as listed below from the Oracle Software Delivery Cloud and save them in a directory of your choice.\nThe installation binaries and patches required are:\n Oracle Unified Directory 12.2.1.4.0\n fmw_12.2.1.4.0_oud.jar Oracle Fusion Middleware 12c Infrastructure 12.2.1.4.0\n fmw_12.2.1.4.0_infrastructure.jar OUDSM and FMW Infrastructure Patches:\n View document ID 2723908.1 on My Oracle Support. In the Container Image Download/Patch Details section, locate the Oracle Unified Directory Services Manager (OUDSM) table. For the latest PSU click the README link in the Documentation column. In the README, locate the \u0026ldquo;Installed Software\u0026rdquo; section. All the patch numbers to be download are listed here. Download all these individual patches from My Oracle Support. Oracle JDK v8\n jdk-8uXXX-linux-x64.tar.gz where XXX is the JDK version referenced in the README above. Update required build files The following files in the code repository location \u0026lt;imagetool-setup-location\u0026gt;/docker-images/OracleUnifiedDirectorySM/imagetool/12.2.1.4.0 are used for creating the image:\n additionalBuildCmds.txt buildArgs Edit the \u0026lt;workdir\u0026gt;/imagetool-setup/docker-images/OracleUnifiedDirectorySM/imagetool/12.2.1.4.0/buildArgs file and change %DOCKER_REPO%,%JDK_VERSION% and %BUILDTAG% appropriately.\nFor example:\ncreate --jdkVersion=8u321 --type oud_wls --version=12.2.1.4.0 --tag=oudsm-latestpsu:12.2.1.4.0 --pull --installerResponseFile /scratch/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file,/scratch/imagetool-setup/docker-images/OracleUnifiedDirectorySM/dockerfiles/12.2.1.4.0/install/oud.response --additionalBuildCommands /scratch/imagetool-setup/docker-images/OracleUnifiedDirectorySM/imagetool/12.2.1.4.0/additionalBuildCmds.txt --additionalBuildFiles /scratch/imagetool-setup/docker-images/OracleUnifiedDirectorySM/dockerfiles/12.2.1.4.0/container-scripts The \u0026lt;workdir\u0026gt;/imagetool-setup/imagetool/docker-images/OracleUnifiedDirectorySM/imagetool/12.2.1.4.0/additionalBuildCmds.txt contains additional build commands. You may edit this file if you want to customize the image further.\n Edit the \u0026lt;workdir\u0026gt;/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file and under the GENERIC section add the line INSTALL_TYPE=\u0026quot;Fusion Middleware Infrastructure\u0026rdquo;. For example:\n[GENERIC] INSTALL_TYPE=\u0026quot;Fusion Middleware Infrastructure\u0026quot; DECLINE_SECURITY_UPDATES=true SECURITY_UPDATES_VIA_MYORACLESUPPORT=false Create the image Add a JDK package to the WebLogic Image Tool cache. For example:\n$ imagetool cache addInstaller --type jdk --version 8uXXX --path \u0026lt;download location\u0026gt;/jdk-8uXXX-linux-x64.tar.gz where XXX is the JDK version downloaded\n Add the downloaded installation binaries to the WebLogic Image Tool cache. For example:\n$ imagetool cache addInstaller --type OUD --version 12.2.1.4.0 --path \u0026lt;download location\u0026gt;/fmw_12.2.1.4.0_oud.jar $ imagetool cache addInstaller --type fmw --version 12.2.1.4.0 --path \u0026lt;download location\u0026gt;/fmw_12.2.1.4.0_infrastructure.jar Add the downloaded OPatch patch to the WebLogic Image Tool cache. For example:\n$ imagetool cache addEntry --key 28186730_13.9.4.2.8 --value \u0026lt;download location\u0026gt;/p28186730_139428_Generic.zip Add the rest of the downloaded product patches to the WebLogic Image Tool cache:\n$ imagetool cache addEntry --key \u0026lt;patch\u0026gt;_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p\u0026lt;patch\u0026gt;_122140_Generic.zip For example:\n$ imagetool cache addEntry --key 33727616_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33727616_122140_Generic.zip $ imagetool cache addEntry --key 33093748_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33093748_122140_Generic.zip $ imagetool cache addEntry --key 32720458_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p32720458_122140_Generic.zip $ imagetool cache addEntry --key 33791665_12.2.1.4.220105 --value \u0026lt;download location\u0026gt;/p33791665_12214220105_Generic.zip $ imagetool cache addEntry --key 33723124_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33723124_122140_Generic.zip $ imagetool cache addEntry --key 32647448_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p32647448_122140_Linux-x86-64.zip $ imagetool cache addEntry --key 33591019_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33591019_122140_Generic.zip $ imagetool cache addEntry --key 32999272_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p32999272_122140_Generic.zip $ imagetool cache addEntry --key 33448950_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33448950_122140_Generic.zip $ imagetool cache addEntry --key 33697227_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33697227_122140_Generic.zip $ imagetool cache addEntry --key 33678607_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33678607_122140_Generic.zip $ imagetool cache addEntry --key 33735326_12.2.1.4.220105 --value \u0026lt;download location\u0026gt;/p33735326_12214220105_Generic.zip Edit the \u0026lt;workdir\u0026gt;/imagetool-setup/docker-images/OracleUnifiedDirectorySM/imagetool/12.2.1.4.0/buildArgs file and append the product patches and opatch patch as follows:\n--patches 33727616_12.2.1.4.0,33093748_12.2.1.4.0,32720458_12.2.1.4.0,33791665_12.2.1.4.220105,33723124_12.2.1.4.0,32647448_12.2.1.4.0,33591019_12.2.1.4.0,32999272_12.2.1.4.0,33448950_12.2.1.4.0,32999272_12.2.1.4.0,33448950_12.2.1.4.0,33697227_12.2.1.4.0,33678607_12.2.1.4.0,33735326_12.2.1.4.220105 --opatchBugNumber=28186730_13.9.4.2.8 An example buildArgs file is now as follows:\ncreate --jdkVersion=8u321 --type oud_wls --version=12.2.1.4.0 --tag=oudsm-latestpsu:12.2.1.4.0 --pull --installerResponseFile /scratch/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file,/scratch/imagetool-setup/docker-images/OracleUnifiedDirectorySM/dockerfiles/12.2.1.4.0/install/oud.response --additionalBuildCommands /scratch/imagetool-setup/docker-images/OracleUnifiedDirectorySM/imagetool/12.2.1.4.0/additionalBuildCmds.txt --additionalBuildFiles /scratch/imagetool-setup/docker-images/OracleUnifiedDirectorySM/dockerfiles/12.2.1.4.0/container-scripts --patches 33727616_12.2.1.4.0,33093748_12.2.1.4.0,32720458_12.2.1.4.0,33791665_12.2.1.4.220105,33723124_12.2.1.4.0,32647448_12.2.1.4.0,33591019_12.2.1.4.0,32999272_12.2.1.4.0,33448950_12.2.1.4.0,33448950_12.2.1.4.0,33697227_12.2.1.4.0,33678607_12.2.1.4.0,33735326_12.2.1.4.220105 --opatchBugNumber=28186730_13.9.4.2.8 Note: In the buildArgs file:\n --jdkVersion value must match the --version value used in the imagetool cache addInstaller command for --type jdk. --version value must match the --version value used in the imagetool cache addInstaller command for --type OUDSM. Refer to this page for the complete list of options available with the WebLogic Image Tool create command.\n Create the Oracle Unified Directory Services Manager image:\n$ imagetool @\u0026lt;absolute path to buildargs file\u0026gt; --fromImage ghcr.io/oracle/oraclelinux:7-slim Note: Make sure that the absolute path to the buildargs file is prepended with a @ character, as shown in the example above.\n For example:\n$ imagetool @\u0026lt;imagetool-setup-location\u0026gt;/docker-images/OracleUnifiedDirectorySM/imagetool/12.2.1.4.0/buildArgs --fromImage ghcr.io/oracle/oraclelinux:7-slim Check the created image using the docker images command:\n$ docker images | grep oudsm The output will look similar to the following:\noudsm-latestpsu 12.2.1.4.0 f6dd9d2ca0e6 4 minutes ago 3.72GB Run the following command to save the container image to a tar file:\n$ docker save -o \u0026lt;path\u0026gt;/\u0026lt;file\u0026gt;.tar \u0026lt;image\u0026gt; For example:\n$ docker save -o $WORKDIR/oudsm-latestpsu.tar oudsm-latestpsu:12.2.1.4.0 Update an image The steps below show how to update an existing Oracle Unified Directory Services Manager image with an interim patch.\nThe container image to be patched must be loaded in the local docker images repository before attempting these steps.\nIn the examples below the image oracle/oudsm:12.2.1.4.0 is updated with an interim patch.\n$ docker images REPOSITORY TAG IMAGE ID CREATED SIZE oracle/oudsm 12.2.1.4.0 b051804ba15f 3 months ago 3.72GB Set up the WebLogic Image Tool.\n Download the required interim patch and latest Opatch (28186730) from My Oracle Support. and save them in a directory of your choice.\n Add the OPatch patch to the WebLogic Image Tool cache, for example:\n$ imagetool cache addEntry --key 28186730_13.9.4.2.8 --value \u0026lt;downloaded-patches-location\u0026gt;/p28186730_139428_Generic.zip Execute the imagetool cache addEntry command for each patch to add the required patch(es) to the WebLogic Image Tool cache. For example, to add patch p33521773_12214211008_Generic.zip:\n$ imagetool cache addEntry --key=33521773_12.2.1.4.211008 --value \u0026lt;downloaded-patches-location\u0026gt;/p33521773_12214211008_Generic.zip Provide the following arguments to the WebLogic Image Tool update command:\n –-fromImage - Identify the image that needs to be updated. In the example below, the image to be updated is oracle/oudsm:12.2.1.4.0. –-patches - Multiple patches can be specified as a comma-separated list. --tag - Specify the new tag to be applied for the image being built. Refer here for the complete list of options available with the WebLogic Image Tool update command.\n Note: The WebLogic Image Tool cache should have the latest OPatch zip. The WebLogic Image Tool will update the OPatch if it is not already updated in the image.\n For example:\n$ imagetool update --fromImage oracle/oudsm:12.2.1.4.0 --tag=oracle/oudsm-new:12.2.1.4.0 --patches=33521773_12.2.1.4.211008 --opatchBugNumber=28186730_13.9.4.2.8 Note: If the command fails because the files in the image being upgraded are not owned by oracle:oracle, then add the parameter --chown \u0026lt;userid\u0026gt;:\u0026lt;groupid\u0026gt; to correspond with the values returned in the error.\n Check the built image using the docker images command:\n$ docker images | grep oudsm The output will look similar to the following:\nREPOSITORY TAG IMAGE ID CREATED SIZE oracle/oudsm-new 12.2.1.4.0 78ccd1ad67eb 5 minutes ago 1.11GB oracle/oudsm 12.2.1.4.0 b051804ba15f 3 months ago 1.04GB Run the following command to save the patched container image to a tar file:\n$ docker save -o \u0026lt;path\u0026gt;/\u0026lt;file\u0026gt;.tar \u0026lt;image\u0026gt; For example:\n$ docker save -o $WORKDIR/oudsm-new.tar oracle/oudsm-new:12.2.1.4.0 " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oam/post-install-config/", + "title": "Post Install Configuration", + "tags": [], + "description": "Post install configuration.", + "content": "Follow these post install configuration steps.\n Create a Server Overrides File Removing OAM Server from WebLogic Server 12c Default Coherence Cluster WebLogic Server Tuning Enable Virtualization Restart the domain Create a Server Overrides File Navigate to the following directory:\n$ cd $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv/output/weblogic-domains/accessdomain Create a setUserOverrides.sh with the following contents:\nDERBY_FLAG=false JAVA_OPTIONS=\u0026quot;${JAVA_OPTIONS} -Djava.net.preferIPv4Stack=true\u0026quot; MEM_ARGS=\u0026quot;-Xms8192m -Xmx8192m\u0026quot; Copy the setUserOverrides.sh file to the Administration Server pod:\n$ chmod 755 setUserOverrides.sh $ kubectl cp setUserOverrides.sh oamns/accessdomain-adminserver:/u01/oracle/user_projects/domains/accessdomain/bin/setUserOverrides.sh Where oamns is the OAM namespace and accessdomain is the DOMAIN_NAME/UID.\n Stop the OAM domain using the following command:\n$ kubectl -n \u0026lt;domain_namespace\u0026gt; patch domains \u0026lt;domain_uid\u0026gt; --type=\u0026#39;json\u0026#39; -p=\u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/serverStartPolicy\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;Never\u0026#34; }]\u0026#39; For example:\n$ kubectl -n oamns patch domains accessdomain --type=\u0026#39;json\u0026#39; -p=\u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/serverStartPolicy\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;Never\u0026#34; }]\u0026#39; The output will look similar to the following:\ndomain.weblogic.oracle/accessdomain patched Check that all the pods are stopped:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oamns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Terminating 0 27m accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 4h29m accessdomain-oam-policy-mgr1 1/1 Terminating 0 24m accessdomain-oam-server1 1/1 Terminating 0 24m helper 1/1 Running 0 4h44m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 108m The Administration Server pod and Managed Server pods will move to a STATUS of Terminating. After a few minutes, run the command again and the pods should have disappeared:\nNAME READY STATUS RESTARTS AGE accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 4h30m helper 1/1 Running 0 4h45m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 109m Start the domain using the following command:\n$ kubectl -n \u0026lt;domain_namespace\u0026gt; patch domains \u0026lt;domain_uid\u0026gt; --type=\u0026#39;json\u0026#39; -p=\u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/serverStartPolicy\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;IfNeeded\u0026#34; }]\u0026#39; For example:\n$ kubectl -n oamns patch domains accessdomain --type=\u0026#39;json\u0026#39; -p=\u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/serverStartPolicy\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;IfNeeded\u0026#34; }]\u0026#39; Run the following kubectl command to view the pods:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oamns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 4h30m accessdomain-introspector-mckp2 1/1 Running 0 8s helper 1/1 Running 0 4h46m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 110m The Administration Server pod will start followed by the OAM Managed Servers pods. This process will take several minutes, so keep executing the command until all the pods are running with READY status 1/1:\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Running 0 5m38s accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 4h37m accessdomain-oam-policy-mgr1 1/1 Running 0 2m51s accessdomain-oam-server1 1/1 Running 0 2m50s helper 1/1 Running 0 4h52m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 116m Removing OAM Server from WebLogic Server 12c Default Coherence Cluster Exclude all Oracle Access Management (OAM) clusters (including Policy Manager and OAM runtime server) from the default WebLogic Server 12c coherence cluster by using the WebLogic Server Administration Console.\nFrom 12.2.1.3.0 onwards, OAM server-side session management uses the database and does not require coherence cluster to be established. In some environments, warnings and errors are observed due to default coherence cluster initialized by WebLogic. To avoid or fix these errors, exclude all of the OAM clusters from default WebLogic Server coherence cluster using the following steps:\n Login to the WebLogic Server Console at https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console. Click Lock \u0026amp; Edit. In Domain Structure, expand Environment and select Coherence Clusters. Click defaultCoherenceCluster and select the Members tab. From Servers and Clusters, deselect all OAM clusters (oam_cluster and policy_cluster). Click Save. Click Activate changes. WebLogic Server Tuning For production environments, the following WebLogic Server tuning parameters must be set:\nAdd Minimum Thread constraint to worker manager \u0026ldquo;OAPOverRestWM\u0026rdquo; Login to the WebLogic Server Console at https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console. Click Lock \u0026amp; Edit. In Domain Structure, click Deployments. On the Deployments page click Next until you see oam_server. Expand oam_server by clicking on the + icon, then click /iam/access/binding. Click the Configuration tab, followed by the Workload tab. Click wm/OAPOverRestWM Under Application Scoped Work Managed Components, click New. In Create a New Work Manager Component, select Minumum Threads Constraint and click Next. In Minimum Threads Constraint Properties enter the Count as 400 and click Finish. In the Save Deployment Plan change the Path to the value /u01/oracle/user_projects/domains/accessdomain/Plan.xml, where accessdomain is your domain_UID. Click OK and then Activate Changes. Remove Max Thread Constraint and Capacity Constraint Repeat steps 1-7 above. Under Application Scoped Work Managed Components select the check box for Capacity and MaxThreadsCount. Click Delete. In the Delete Work Manage Components screen, click OK to delete. Click on Release Configuration and then Log Out. oamDS DataSource Tuning Login to the WebLogic Server Console at https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console. Click Lock \u0026amp; Edit. In Domain Structure, Expand Services and click Data Sources. Click on oamDS. In Settings for oamDS, select the Configuration tab, and then the Connection Pool tab. Change Initial Capacity, Maximum Capacity, and Minimum Capacity to 800 and click Save. Click Activate Changes. Enable Virtualization Log in to Oracle Enterprise Manager Fusion Middleware Control at https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/em Click WebLogic Domain \u0026gt; Security \u0026gt; Security Provider Configuration. Expand Security Store Provider. Expand Identity Store Provider. Click Configure. Add a custom property. Select virtualize property with value true and click OK. Click OK again to persist the change. Restart the domain For the above changes to take effect, you must restart the OAM domain:\n Stop the OAM domain using the following command:\n$ kubectl -n \u0026lt;domain_namespace\u0026gt; patch domains \u0026lt;domain_uid\u0026gt; --type=\u0026#39;json\u0026#39; -p=\u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/serverStartPolicy\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;Never\u0026#34; }]\u0026#39; For example:\n$ kubectl -n oamns patch domains accessdomain --type=\u0026#39;json\u0026#39; -p=\u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/serverStartPolicy\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;Never\u0026#34; }]\u0026#39; The output will look similar to the following:\ndomain.weblogic.oracle/accessdomain patched Check that all the pods are stopped:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oamns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Terminating 0 27m accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 4h29m accessdomain-oam-policy-mgr1 1/1 Terminating 0 24m accessdomain-oam-server1 1/1 Terminating 0 24m helper 1/1 Running 0 4h44m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 108m The Administration Server pod and Managed Server pods will move to a STATUS of Terminating. After a few minutes, run the command again and the pods should have disappeared:\nNAME READY STATUS RESTARTS AGE accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 4h30m helper 1/1 Running 0 4h45m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 109m Start the domain using the following command:\n$ kubectl -n \u0026lt;domain_namespace\u0026gt; patch domains \u0026lt;domain_uid\u0026gt; --type=\u0026#39;json\u0026#39; -p=\u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/serverStartPolicy\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;IfNeeded\u0026#34; }]\u0026#39; For example:\n$ kubectl -n oamns patch domains accessdomain --type=\u0026#39;json\u0026#39; -p=\u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/serverStartPolicy\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;IfNeeded\u0026#34; }]\u0026#39; Run the following kubectl command to view the pods:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oamns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 4h30m accessdomain-introspector-mckp2 1/1 Running 0 8s helper 1/1 Running 0 4h46m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 110m The Administration Server pod will start followed by the OAM Managed Servers pods. This process will take several minutes, so keep executing the command until all the pods are running with READY status 1/1:\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Running 0 5m38s accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 4h37m accessdomain-oam-policy-mgr1 1/1 Running 0 2m51s accessdomain-oam-server1 1/1 Running 0 2m50s helper 1/1 Running 0 4h52m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 116m " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oig/post-install-config/", + "title": "Post install configuration", + "tags": [], + "description": "Post install configuration.", + "content": "Follow these post install configuration steps.\n a. Post Install Tasks Perform post install tasks.\n b. Install and configure connectors Install and Configure Connectors.\n " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oig/configure-design-console/", + "title": "Configure Design Console", + "tags": [], + "description": "Configure Design Console.", + "content": "Configure an Ingress to allow Design Console to connect to your Kubernetes cluster.\n a. Using Design Console with NGINX(non-SSL) Configure Design Console with NGINX(non-SSL).\n b. Using Design Console with NGINX(SSL) Configure Design Console with NGINX(SSL).\n " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oud/patch-and-upgrade/", + "title": "Patch and Upgrade", + "tags": [], + "description": "This document provides steps to patch or upgrade an OUD image", + "content": "In this section you learn how to upgrade OUD from a previous version. Follow the section relevant to the version you are upgrading from.\n Upgrading to October 23 (23.4.1) from April 23 (23.2.1) or later Upgrading to October 23 (23.4.1) from October 22 (22.4.1) or January 23 (23.1.1) Upgrading to October 23 (23.4.1) from July 22 (22.3.1) Upgrading to October 23 (23.4.1) from releases prior to July 22 (22.3.1) Upgrading Elasticsearch and Kibana Note: If on July 22 (22.3.1) or later, and have Kubernetes Horizontal Pod Autoscaler (HPA) enabled, you must disable HPA before performing the steps in the relevant upgrade section. See Delete the HPA.\nUpgrading to October 23 (23.4.1) from April 23 (23.2.1) or later The instructions below are for upgrading from April 23 (23.2.1) or later to October 23 (23.4.1).\nNote: If you are not using Oracle Container Registry or your own container registry, then you must first load the new container image on all nodes in your Kubernetes cluster.\n Navigate to the $WORKDIR/kubernetes/helm directory:\n$ cd $WORKDIR/kubernetes/helm Create a oud-patch-override.yaml file that contains:\nimage: repository: \u0026lt;image_location\u0026gt; tag: \u0026lt;image_tag\u0026gt; imagePullSecrets: - name: orclcred For example:\nimage: repository: container-registry.oracle.com/middleware/oud_cpu tag: 12.2.1.4-jdk8-ol7-\u0026lt;October\u0026#39;23\u0026gt; imagePullSecrets: - name: orclcred The following caveats exist:\n If you are not using Oracle Container Registry or your own container registry for your OUD container image, then you can remove the following:\nimagePullSecrets: - name: orclcred Run the following command to upgrade the deployment:\n$ helm upgrade --namespace \u0026lt;namespace\u0026gt; \\ --values oud-patch-override.yaml \\ \u0026lt;release_name\u0026gt; oud-ds-rs --reuse-values For example:\n$ helm upgrade --namespace oudns \\ --values oud-patch-override.yaml \\ oud-ds-rs oud-ds-rs --reuse-values Upgrading to October 23 (23.4.1) from October 22 (22.4.1) or January 23 (23.1.1) The instructions below are for upgrading from October 22 (22.4.1) or January 23 (23.1.1), to October 23 (23.4.1).\nNote: If you are not using Oracle Container Registry or your own container registry, then you must first load the new container image on all nodes in your Kubernetes cluster.\nScale down OUD Make sure the base pod (oud-ds-rs-0) is running and healthy (READY 1/1) by running the following command:\n$ kubectl get pods -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl get pods -n oudns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE oud-ds-rs-0 1/1 Running 0 21h oud-ds-rs-1 1/1 Running 0 20h oud-ds-rs-2 1/1 Running 0 20h Ensure dsreplication is healthy by running the following command:\n$ $ kubectl --namespace \u0026lt;namespace\u0026gt; exec -it -c \u0026lt;containername\u0026gt; \u0026lt;podname\u0026gt; -- \\ /u01/oracle/user_projects/\u0026lt;OUD Instance/Pod Name\u0026gt;/OUD/bin/dsreplication status \\ --trustAll --hostname \u0026lt;OUD Instance/Pod Name\u0026gt; --port 1444 --adminUID admin \\ --dataToDisplay compat-view --dataToDisplay rs-connections For example:\n$ kubectl --namespace oudns exec -it -c oud-ds-rs oud-ds-rs-0 -- \\ /u01/oracle/user_projects/oud-ds-rs-0/OUD/bin/dsreplication status \\ --trustAll --hostname oud-ds-rs-0 --port 1444 --adminUID admin \\ --dataToDisplay compat-view --dataToDisplay rs-connections The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE \u0026gt;\u0026gt;\u0026gt;\u0026gt; Specify Oracle Unified Directory LDAP connection parameters Password for user 'admin': Establishing connections and reading configuration ..... Done. dc=example,dc=com - Replication Enabled ======================================= Server : Entries : M.C. [1] : A.O.M.C. [2] : Port [3] : Encryption [4] : Trust [5] : U.C. [6] : Status [7] : ChangeLog [8] : Group ID [9] : Connected To [10] ---------------------:---------:----------:--------------:----------:----------------:-----------:----------:------------:---------------:--------------:------------------------------- oud-ds-rs-0:1444 : 202 : 0 : 0 : 1898 : Disabled : Trusted : -- : Normal : Enabled : 1 : oud-ds-rs-0:1898 : : : : : : : : : : : (GID=1) oud-ds-rs-1:1444 : 202 : 0 : 0 : 1898 : Disabled : Trusted : -- : Normal : Enabled : 1 : oud-ds-rs-1:1898 : : : : : : : : : : : (GID=1) oud-ds-rs-2:1444 : 202 : 0 : 0 : 1898 : Disabled : Trusted : -- : Normal : Enabled : 1 : oud-ds-rs-2:1898 : : : : : : : : : : : (GID=1) Replication Server [11] : RS #1 : RS #2 : RS #3 -------------------------------:-------:-------:------ oud-ds-rs-0:1898 : -- : Yes : Yes (#1) : : : oud-ds-rs-1:1898 : Yes : -- : Yes (#2) : : : oud-ds-rs-2:1898 : Yes : Yes : -- (#3) : : : etc... Scale down OUD by reducing the replicas to 1:\n$ cd $WORKDIR/kubernetes/helm $ helm upgrade -n oudns --set replicaCount=1 oud-ds-rs oud-ds-rs --reuse-values Note: The $WORKDIR is the directory for your existing release, not October 23.\nThe output will be similar to the following:\nRelease \u0026quot;oud-ds-rs\u0026quot; has been upgraded. Happy Helming! NAME: oud-ds-rs LAST DEPLOYED: \u0026lt;DATE\u0026gt; NAMESPACE: oudns STATUS: deployed REVISION: 2 NOTES: etc.. Make sure the replica pods are shutdown before proceeding:\n$ kubectl get pods -n oudns NAME READY STATUS RESTARTS AGE oud-ds-rs-0 1/1 Running 0 21h Note: It will take several minutes before the replica pods disappear.\n Backup OUD data Take a backup of the OUD data for every pod in the NFS shared volume:\n$ kubectl exec -it -n oudns oud-ds-rs-0 -- bash [oracle@oud-ds-rs-0 oracle]$ cd user_projects [oracle@oud-ds-rs-0 user_projects]$ mkdir OUD_backup_\u0026lt;DATE\u0026gt; [oracle@oud-ds-rs-0 user_projects]$ cp -r oud-ds-rs-* OUD_backup_\u0026lt;DATE\u0026gt;/ Make sure the backup created successfully:\n[oracle@oud-ds-rs-0 user_projects]$ ls -l OUD_backup_\u0026lt;date\u0026gt; total 2 drwxr-x---. 5 oracle root 3 \u0026lt;DATE\u0026gt; oud-ds-rs-0 drwxr-x---. 5 oracle root 3 \u0026lt;DATE\u0026gt; oud-ds-rs-1 drwxr-x---. 5 oracle root 3 \u0026lt;DATE\u0026gt; oud-ds-rs-2 Remove the non-zero pod directories oud-ds-rs-1 and oud-ds-rs-2:\n[oracle@oud-ds-rs-0 user_projects]$ rm -rf oud-ds-rs-1 oud-ds-rs-2 Exit the oud-ds-rs-0 bash session:\n[oracle@oud-ds-rs-0 user_projects]$ exit Setup the October 23 code repository to deploy OUD Create a working directory on the persistent volume to setup the latest source code:\n$ mkdir \u0026lt;persistent_volume\u0026gt;/\u0026lt;workdir\u0026gt; For example:\n$ mkdir /scratch/shared/OUDK8SJuly23 Download the latest OUD deployment scripts from the OUD repository:\n$ cd \u0026lt;persistent_volume\u0026gt;/\u0026lt;workdir\u0026gt; $ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1 For example:\n$ mkdir /scratch/shared/OUDK8SJuly23 $ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1 Set the $WORKDIR environment variable as follows:\n$ export WORKDIR=\u0026lt;workdir\u0026gt;/fmw-kubernetes/OracleUnifiedDirectory For example:\n$ export WORKDIR=/scratch/shared/OUDK8SJuly23/fmw-kubernetes/OracleUnifiedDirectory Update the OUD container image Navigate to the $WORKDIR/kubernetes/helm directory:\n$ cd $WORKDIR/kubernetes/helm Create a oud-patch-override.yaml file that contains:\nimage: repository: \u0026lt;image_location\u0026gt; tag: \u0026lt;image_tag\u0026gt; pullPolicy: IfNotPresent imagePullSecrets: - name: orclcred oudConfig: cleanupbeforeStart: false disablereplicationbeforeStop: false replicaCount: 3 For example:\nimage: repository: container-registry.oracle.com/middleware/oud_cpu tag: 12.2.1.4-jdk8-ol7-\u0026lt;October\u0026#39;23\u0026gt; pullPolicy: IfNotPresent imagePullSecrets: - name: orclcred oudConfig: cleanupbeforeStart: false disablereplicationbeforeStop: false replicaCount: 3 The following caveats exist:\n If you are not using Oracle Container Registry or your own container registry for your OUD container image, then you can remove the following:\nimagePullSecrets: - name: orclcred Run the following command to upgrade the deployment:\n$ cd $WORKDIR/kubernetes/helm $ helm upgrade --namespace \u0026lt;namespace\u0026gt; \\ --values oud-patch-override.yaml \\ \u0026lt;release_name\u0026gt; oud-ds-rs --reuse-values For example:\n$ cd $WORKDIR/kubernetes/helm $ helm upgrade --namespace oudns \\ --values oud-patch-override.yaml \\ oud-ds-rs oud-ds-rs --reuse-values The output should look similar to the following:\nRelease \u0026quot;oud-ds-rs\u0026quot; has been upgraded. Happy Helming! NAME: oud-ds-rs LAST DEPLOYED: \u0026lt;DATE\u0026gt; NAMESPACE: oudns STATUS: deployed REVISION: 3 NOTES: etc.. Verify the pods After updating with the new image the pods will restart. Verify the pods are running:\n$ kubectl --namespace \u0026lt;namespace\u0026gt; get pods For example:\n$ kubectl --namespace oudns get pods The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE oud-ds-rs-0 1/1 Running 0 11m oud-ds-rs-1 1/1 Running 0 28m oud-ds-rs-2 1/1 Running 0 22m ... Note: It will take several minutes before the pods oud-ds-rs-1 and oud-ds-rs-2 start, and oud-ds-rs-0 restarts. While the OUD pods have a STATUS of 0/1 the pods are started but the OUD server associated with it is currently starting.\n Verify the pods are using the new image by running the following command:\n$ kubectl describe pod \u0026lt;pod\u0026gt; -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl describe pod oud-ds-rs-0 -n oudns | grep Image The output will look similar to the following:\n... Image: container-registry.oracle.com/middleware/oud_cpu:12.2.1.4-jdk8-ol7-\u0026lt;October\u0026#39;23\u0026gt; Image ID: container-registry.oracle.com/middleware/oud_cpu@sha256:\u0026lt;sha256\u0026gt; Ensure dsreplication is healthy by running the following command:\n$ $ kubectl --namespace \u0026lt;namespace\u0026gt; exec -it -c \u0026lt;containername\u0026gt; \u0026lt;podname\u0026gt; -- \\ /u01/oracle/user_projects/\u0026lt;OUD Instance/Pod Name\u0026gt;/OUD/bin/dsreplication status \\ --trustAll --hostname \u0026lt;OUD Instance/Pod Name\u0026gt; --port 1444 --adminUID admin \\ --dataToDisplay compat-view --dataToDisplay rs-connections For example:\n$ kubectl --namespace oudns exec -it -c oud-ds-rs oud-ds-rs-0 -- \\ /u01/oracle/user_projects/oud-ds-rs-0/OUD/bin/dsreplication status \\ --trustAll --hostname oud-ds-rs-0 --port 1444 --adminUID admin \\ --dataToDisplay compat-view --dataToDisplay rs-connections The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE \u0026gt;\u0026gt;\u0026gt;\u0026gt; Specify Oracle Unified Directory LDAP connection parameters Password for user 'admin': Establishing connections and reading configuration ..... Done. dc=example,dc=com - Replication Enabled ======================================= Server : Entries : M.C. [1] : A.O.M.C. [2] : Port [3] : Encryption [4] : Trust [5] : U.C. [6] : Status [7] : ChangeLog [8] : Group ID [9] : Connected To [10] ---------------------:---------:----------:--------------:----------:----------------:-----------:----------:------------:---------------:--------------:------------------------------- oud-ds-rs-0:1444 : 202 : 0 : 0 : 1898 : Disabled : Trusted : -- : Normal : Enabled : 1 : oud-ds-rs-0:1898 : : : : : : : : : : : (GID=1) oud-ds-rs-1:1444 : 202 : 0 : 0 : 1898 : Disabled : Trusted : -- : Normal : Enabled : 1 : oud-ds-rs-1:1898 : : : : : : : : : : : (GID=1) oud-ds-rs-2:1444 : 202 : 0 : 0 : 1898 : Disabled : Trusted : -- : Normal : Enabled : 1 : oud-ds-rs-2:1898 : : : : : : : : : : : (GID=1) Replication Server [11] : RS #1 : RS #2 : RS #3 -------------------------------:-------:-------:------ oud-ds-rs-0:1898 : -- : Yes : Yes (#1) : : : oud-ds-rs-1:1898 : Yes : -- : Yes (#2) : : : oud-ds-rs-2:1898 : Yes : Yes : -- (#3) : : : etc... Once the validation steps are performed and you are confident OUD is working correctly, you can optionally delete the OUD backup data in the NFS shared volume:\n$ kubectl exec -it -n oudns oud-ds-rs-0 -- bash [oracle@oud-ds-rs-0 oracle]$ cd user_projects/OUD_backup_\u0026lt;DATE\u0026gt;/ [oracle@oud-ds-rs-0 OUD_backup_\u0026lt;DATE\u0026gt;]$ rm -rf oud-ds-rs-0 oud-ds-rs-1 oud-ds-rs-2 Upgrading to October 23 (23.4.1) from July 22 (22.3.1) The instructions below are for upgrading from July 22 (22.3.1) to October 23 (23.4.1).\n Follow Upgrading to October 23 (23.4.1) from October 22 (22.4.1) or January 23 (23.1.1) to upgrade the image. Once the image is upgraded, follow Upgrading Elasticsearch and Kibana. Upgrading to October 23 (23.4.1) from releases prior to July 22 (22.3.1) In releases prior to July 22 (22.3.1) OUD used pod based deployment. From July 22 (22.3.1) onwards OUD is deployed using StatefulSets.\nIf you are upgrading from a release prior to July 22 (22.3.1) you must follow the steps below to deploy a new OUD instance to use your existing OUD data in \u0026lt;persistent_volume\u0026gt;/oud_user_projects.\nNote: The steps below will incur a small outage.\nDelete the existing deployment Find the deployment release name as follows:\n$ helm --namespace \u0026lt;namespace\u0026gt; list For example:\n$ helm --namespace oudns list The output will look similar to the following:\nNAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION oud-ds-rs oudns 1 \u0026lt;DATE\u0026gt; deployed oud-ds-rs-0.2 12.2.1.4.0 Delete the deployment using the following command:\n$ helm uninstall --namespace \u0026lt;namespace\u0026gt; \u0026lt;release\u0026gt; For example:\n$ helm uninstall --namespace oudns oud-ds-rs release \u0026#34;oud-ds-rs\u0026#34; uninstalled Run the following command to view the status:\n$ kubectl --namespace oudns get pod,service,secret,pv,pvc,ingress -o wide Initially the pods and persistent volume (PV) and persistent volume claim (PVC) will move to a Terminating status:\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/oud-ds-rs-0 1/1 Terminating 0 24m 10.244.1.180 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oud-ds-rs-1 1/1 Terminating 0 18m 10.244.1.181 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oud-ds-rs-2 1/1 Terminating 0 12m 10.244.1.182 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; NAME TYPE DATA AGE secret/default-token-msmmd kubernetes.io/service-account-token 3 3d20h secret/dockercred kubernetes.io/dockerconfigjson 1 3d20h secret/orclcred kubernetes.io/dockerconfigjson 1 3d20h NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE VOLUMEMODE persistentvolume/oud-ds-rs-pv 20Gi RWX Delete Terminating oudns/oud-ds-rs-pvc manual 24m Filesystem NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE VOLUMEMODE persistentvolumeclaim/oud-ds-rs-pvc Terminating oud-ds-rs-pv 20Gi RWX manual 24m Filesystem Run the command again until the pods, PV and PVC disappear.\n Setup the code repository to deploy OUD Create a working directory on the persistent volume to setup the latest source code:\n$ mkdir \u0026lt;persistent_volume\u0026gt;/\u0026lt;workdir\u0026gt; For example:\n$ mkdir /scratch/shared/OUDK8SJuly23 Download the latest OUD deployment scripts from the OUD repository:\n$ cd \u0026lt;persistent_volume\u0026gt;/\u0026lt;workdir\u0026gt; $ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1 For example:\n$ cd /scratch/shared/OUDK8SJuly23 $ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1 Set the $WORKDIR environment variable as follows:\n$ export WORKDIR=\u0026lt;workdir\u0026gt;/fmw-kubernetes/OracleUnifiedDirectory For example:\n$ export WORKDIR=/scratch/shared/OUDK8SJuly23/fmw-kubernetes/OracleUnifiedDirectory Create a new instance against your existing persistent volume Navigate to the $WORKDIR/kubernetes/helm directory\n$ cd $WORKDIR/kubernetes/helm Create an oud-ds-rs-values-override.yaml as follows:\nimage: repository: \u0026lt;image_location\u0026gt; tag: \u0026lt;image_tag\u0026gt; pullPolicy: IfNotPresent imagePullSecrets: - name: orclcred oudConfig: rootUserPassword: \u0026lt;password\u0026gt; sampleData: \u0026#34;200\u0026#34; persistence: type: filesystem filesystem: hostPath: path: \u0026lt;persistent_volume\u0026gt;/oud_user_projects cronJob: kubectlImage: repository: bitnami/kubectl tag: \u0026lt;version\u0026gt; pullPolicy: IfNotPresent imagePullSecrets: - name: dockercred For example:\nimage: repository: container-registry.oracle.com/middleware/oud_cpu tag: 12.2.1.4-jdk8-ol7-\u0026lt;October\u0026#39;23\u0026gt; pullPolicy: IfNotPresent imagePullSecrets: - name: orclcred oudConfig: rootUserPassword: \u0026lt;password\u0026gt; sampleData: \u0026#34;200\u0026#34; persistence: type: filesystem filesystem: hostPath: path: /scratch/shared/oud_user_projects cronJob: kubectlImage: repository: bitnami/kubectl tag: 1.24.5 pullPolicy: IfNotPresent imagePullSecrets: - name: dockercred The following caveats exist:\n The \u0026lt;persistent_volume\u0026gt;/oud_user_projects must point to the directory used in your previous deployment otherwise your existing OUD data will not be used. Make sure you take a backup of the \u0026lt;persistent_volume\u0026gt;/oud_user_projects directory before proceeding further.\n Replace \u0026lt;password\u0026gt; with the password used in your previous deployment.\n The \u0026lt;version\u0026gt; in kubectlImage tag: should be set to the same version as your Kubernetes version (kubectl version). For example if your Kubernetes version is 1.24.5 set to 1.24.5.\n If you are not using Oracle Container Registry or your own container registry for your OUD container image, then you can remove the following:\nimagePullSecrets: - name: orclcred If using NFS for your persistent volume then change the persistence section as follows:\npersistence: type: networkstorage networkstorage: nfs: path: \u0026lt;persistent_volume\u0026gt;/oud_user_projects server: \u0026lt;NFS IP address\u0026gt; Run the following command to deploy OUD:\n$ helm install --namespace \u0026lt;namespace\u0026gt; \\ --values oud-ds-rs-values-override.yaml \\ \u0026lt;release_name\u0026gt; oud-ds-rs For example:\n$ helm install --namespace oudns \\ --values oud-ds-rs-values-override.yaml \\ oud-ds-rs oud-ds-rs Check the OUD deployment as per Verify the OUD deployment and Verify the OUD replication.\n Upgrade Elasticsearch and Kibana by following Upgrading Elasticsearch and Kibana.\n Upgrading Elasticsearch and Kibana This section shows how to upgrade Elasticsearch and Kibana. From October 22 (22.4.1) onwards, OUD logs should be stored on a centralized Elasticsearch and Kibana stack.\nNote: This section should only be followed if upgrading from July 22 (22.3.1) or earlier to October 23 (23.4.1). If you are upgrading from October 22 or later to October 23 do not follow this section.\nUndeploy Elasticsearch and Kibana From October 22 (22.4.1) onwards, OUD logs should be stored on a centralized Elasticsearch and Kibana (ELK) stack.\nDeployments prior to October 22 (22.4.1) used local deployments of Elasticsearch and Kibana.\nIf you are upgrading from July 22 (22.3.1) or earlier, to October 23 (23.4.1), you must first undeploy Elasticsearch and Kibana using the steps below:\n Navigate to the $WORKDIR/kubernetes/helm directory and create a logging-override-values-uninstall.yaml with the following:\nelk: enabled: false Run the following command to remove the existing ELK deployment:\n$ helm upgrade --namespace \u0026lt;domain_namespace\u0026gt; --values \u0026lt;valuesfile.yaml\u0026gt; \u0026lt;releasename\u0026gt; oud-ds-rs --reuse-values For example:\n$ helm upgrade --namespace oudns --values logging-override-values-uninstall.yaml oud-ds-rs oud-ds-rs --reuse-values Deploy ElasticSearch and Kibana in centralized stack Follow Install Elasticsearch stack and Kibana to deploy ElasticSearch and Kibana in a centralized stack. " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oudsm/patch-and-upgrade/", + "title": "Patch and upgrade", + "tags": [], + "description": "This document provides steps to patch or upgrade an OUD image, and Elasticsearch and Kibana.", + "content": "This section shows you how to upgrade the OUDSM image, and how to upgrade the Elasticsearch and Kibana stack to April 23 (23.2.1).\nThe upgrade path taken depends on the version you are upgrading from.\nPlease refer to the Release Notes for information on which upgrade steps are necessary for the version you are upgrading to.\n a. Patch an image Instructions on how to update your OUDSM Kubernetes cluster with a new OUDSM container image.\n b. Upgrade Elasticsearch and Kibana Instructions on how to upgrade Elastic Search and Kibana.\n " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oam/validate-sso-using-webgate/", + "title": "Validate a Basic SSO Flow using WebGate Registration ", + "tags": [], + "description": "Sample for validating a basic SSO flow using WebGate registration.", + "content": "In this section you validate single-sign on works to the OAM Kubernetes cluster via Oracle WebGate. The instructions below assume you have a running Oracle HTTP Server (for example ohs_k8s) and Oracle WebGate installed on an independent server. The instructions also assume basic knowledge of how to register a WebGate agent.\nNote: At present Oracle HTTP Server and Oracle WebGate are not supported on a Kubernetes cluster.\nUpdate the OAM Hostname and Port for the Loadbalancer If using an NGINX ingress with no load balancer, change {LOADBALANCER-HOSTNAME}:${LOADBALANCER-PORT} to {MASTERNODE-HOSTNAME}:${MASTERNODE-PORT} when referenced below.\n Launch a browser and access the OAM console (https://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-PORT}/oamconsole). Login with the weblogic username and password (weblogic/\u0026lt;password\u0026gt;)\n Navigate to Configuration → Settings ( View ) → Access Manager.\n Under Load Balancing modify the OAM Server Host and OAM Server Port, to point to the Loadbalancer HTTP endpoint (e.g loadbalancer.example.com and \u0026lt;port\u0026gt; respectively). In the OAM Server Protocol drop down list select https.\n Under WebGate Traffic Load Balancer modify the OAM Server Host and OAM Server Port, to point to the Loadbalancer HTTP endpoint (e.g loadbalancer.example.com and \u0026lt;port\u0026gt; repectively). In the OAM Server Protocol drop down list select https.\n Click Apply.\n Register a WebGate Agent In all the examples below, change the directory path as appropriate for your installation.\n Run the following command on the server with Oracle HTTP Server and WebGate installed:\n$ cd \u0026lt;OHS_ORACLE_HOME\u0026gt;/webgate/ohs/tools/deployWebGate $ ./deployWebGateInstance.sh -w \u0026lt;OHS_DOMAIN_HOME\u0026gt;/config/fmwconfig/components/OHS/ohs_k8s -oh \u0026lt;OHS_ORACLE_HOME\u0026gt; -ws ohs The output will look similar to the following:\nCopying files from WebGate Oracle Home to WebGate Instancedir Run the following command to update the OHS configuration files appropriately:\n$ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:\u0026lt;OHS_ORACLE_HOME\u0026gt;/lib $ cd \u0026lt;OHS_ORACLE_HOME\u0026gt;/webgate/ohs/tools/setup/InstallTools/ $ ./EditHttpConf -w \u0026lt;OHS_DOMAIN_HOME\u0026gt;/config/fmwconfig/components/OHS/ohs_k8s -oh \u0026lt;OHS_ORACLE_HOME\u0026gt; The output will look similar to the following:\nThe web server configuration file was successfully updated \u0026lt;OHS_DOMAIN_HOME\u0026gt;/config/fmwconfig/components/OHS/ohs_k8s/httpd.conf has been backed up as \u0026lt;OHS_DOMAIN_HOME\u0026gt;/config/fmwconfig/components/OHS/ohs_k8s/httpd.conf.ORIG Launch a browser, and access the OAM console. Navigate to Application Security → Quick Start Wizards → SSO Agent Registration. Register the agent in the usual way, download the configuration zip file and copy to the OHS WebGate server, for example: \u0026lt;OHS_DOMAIN_HOME\u0026gt;/config/fmwconfig/components/OHS/ohs_k8/webgate/config. Extract the zip file.\n Copy the Certificate Authority (CA) certificate (cacert.pem) for the load balancer/ingress certificate to the same directory e.g: \u0026lt;OHS_DOMAIN_HOME\u0026gt;/config/fmwconfig/components/OHS/ohs_k8/webgate/config.\nIf you used a self signed certificate for the ingress, instead copy the self signed certificate (e.g: /scratch/ssl/tls.crt) to the above directory. Rename the certificate to cacert.pem.\n Restart Oracle HTTP Server.\n Access the configured OHS e.g http://ohs.example.com:7778, and check you are redirected to the SSO login page. Login and make sure you are redirected successfully to the home page.\n Changing WebGate agent to use OAP Note: This section should only be followed if you need to change the OAM/WebGate Agent communication from HTTPS to OAP.\nTo change the WebGate agent to use OAP:\n In the OAM Console click Application Security and then Agents.\n Search for the agent you want modify and select it.\n In the User Defined Parameters change:\na) OAMServerCommunicationMode from HTTPS to OAP. For example OAMServerCommunicationMode=OAP\nb) OAMRestEndPointHostName=\u0026lt;hostname\u0026gt; to the {$MASTERNODE-HOSTNAME}. For example OAMRestEndPointHostName=masternode.example.com\n In the Server Lists section click Add to add a new server with the following values:\n Access Server: Other Host Name: \u0026lt;{$MASTERNODE-HOSTNAME}\u0026gt; Host Port: \u0026lt;oamoap-service NodePort\u0026gt; Note: To find the value for Host Port run the following:\n$ kubectl describe svc accessdomain-oamoap-service -n oamns The output will look similar to the following:\nName: accessdomain-oamoap-service Namespace: oamns Labels: \u0026lt;none\u0026gt; Annotations: \u0026lt;none\u0026gt; Selector: weblogic.clusterName=oam_cluster Type: NodePort IP Families: \u0026lt;none\u0026gt; IP: 10.100.202.44 IPs: 10.100.202.44 Port: \u0026lt;unset\u0026gt; 5575/TCP TargetPort: 5575/TCP NodePort: \u0026lt;unset\u0026gt; 30540/TCP Endpoints: 10.244.5.21:5575,10.244.6.76:5575 Session Affinity: None External Traffic Policy: Cluster Events: \u0026lt;none\u0026gt; In the example above the NodePort is 30540.\n Delete all servers in Server Lists except for the one just created, and click Apply.\n Click Download to download the webgate zip file. Copy the zip file to the desired WebGate.\n Delete the cache from \u0026lt;OHS_DOMAIN_HOME\u0026gt;/servers/ohs1/cache and restart Oracle HTTP Server.\n " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oam/manage-oam-domains/", + "title": "Manage OAM Domains", + "tags": [], + "description": "This document provides steps to manage the OAM domain.", + "content": "Important considerations for Oracle Access Management domains in Kubernetes.\n a. Domain Life Cycle Learn about the domain life cycle of an OAM domain.\n b. WLST Administration Operations Describes the steps for WLST administration using helper pod running in the same Kubernetes Cluster as OAM Domain.\n c. Logging and Visualization Describes the steps for logging and visualization with Elasticsearch and Kibana.\n d. Monitoring an OAM domain Describes the steps for Monitoring the OAM domain.\n e. Kubernetes Horizontal Pod Autoscaler Describes the steps for implementing the Horizontal Pod Autoscaler.\n f. Delete the OAM domain home Learn about the steps to cleanup the OAM domain home.\n " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oig/manage-oig-domains/", + "title": "Manage OIG domains", + "tags": [], + "description": "This document provides steps to manage the OIG domain.", + "content": "Important considerations for Oracle Identity Governance domains in Kubernetes.\n a. Domain life cycle Learn about the domain lifecycle of an OIG domain.\n b. WLST administration operations Describes the steps for WLST administration using helper pod running in the same Kubernetes Cluster as OIG Domain.\n c. Runnning OIG utilities Describes the steps for running OIG utilities in Kubernetes.\n d. Logging and visualization Describes the steps for logging and visualization with Elasticsearch and Kibana.\n e. Monitoring an OIG domain Describes the steps for Monitoring the OIG domain and Publishing the logs to Elasticsearch.\n f. Kubernetes Horizontal Pod Autoscaler Describes the steps for implementing the Horizontal Pod Autoscaler.\n g. Delete the OIG domain home Learn about the steps to cleanup the OIG domain home.\n " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oud/troubleshooting/", + "title": "Troubleshooting", + "tags": [], + "description": "How to Troubleshoot issues.", + "content": " Check the status of a namespace View pod logs View pod description Known issues Check the status of a namespace To check the status of objects in a namespace use the following command:\n$ kubectl --namespace \u0026lt;namespace\u0026gt; get nodes,pod,service,secret,pv,pvc,ingress -o wide For example:\n$ kubectl --namespace oudns get pod,service,secret,pv,pvc,ingress -o wide The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/oud-ds-rs-0 1/1 Running 0 14m 10.244.1.180 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oud-ds-rs-1 1/1 Running 0 8m26s 10.244.1.181 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oud-ds-rs-2 0/1 Running 0 2m24s 10.244.1.182 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oud-pod-cron-job-27586680-p5d8q 0/1 Completed 0 50s 10.244.1.183 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR service/oud-ds-rs ClusterIP None \u0026lt;none\u0026gt; 1444/TCP,1888/TCP,1389/TCP,1636/TCP,1080/TCP,1081/TCP,1898/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs service/oud-ds-rs-0 ClusterIP None \u0026lt;none\u0026gt; 1444/TCP,1888/TCP,1898/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-0 service/oud-ds-rs-1 ClusterIP None \u0026lt;none\u0026gt; 1444/TCP,1888/TCP,1898/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-1 service/oud-ds-rs-2 ClusterIP None \u0026lt;none\u0026gt; 1444/TCP,1888/TCP,1898/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-2 service/oud-ds-rs-http-0 ClusterIP 10.104.112.93 \u0026lt;none\u0026gt; 1080/TCP,1081/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-0 service/oud-ds-rs-http-1 ClusterIP 10.103.105.70 \u0026lt;none\u0026gt; 1080/TCP,1081/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-1 service/oud-ds-rs-http-2 ClusterIP 10.110.160.107 \u0026lt;none\u0026gt; 1080/TCP,1081/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-2 service/oud-ds-rs-lbr-admin ClusterIP 10.99.238.222 \u0026lt;none\u0026gt; 1888/TCP,1444/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs service/oud-ds-rs-lbr-http ClusterIP 10.101.250.196 \u0026lt;none\u0026gt; 1080/TCP,1081/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs service/oud-ds-rs-lbr-ldap ClusterIP 10.104.149.90 \u0026lt;none\u0026gt; 1389/TCP,1636/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs service/oud-ds-rs-ldap-0 ClusterIP 10.109.255.221 \u0026lt;none\u0026gt; 1389/TCP,1636/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-0 service/oud-ds-rs-ldap-1 ClusterIP 10.111.135.142 \u0026lt;none\u0026gt; 1389/TCP,1636/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-1 service/oud-ds-rs-ldap-2 ClusterIP 10.100.8.145 \u0026lt;none\u0026gt; 1389/TCP,1636/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-2 NAME TYPE DATA AGE secret/dockercred kubernetes.io/dockerconfigjson 1 4h24m secret/orclcred kubernetes.io/dockerconfigjson 1 14m secret/oud-ds-rs-creds opaque 8 14m secret/oud-ds-rs-tls-cert kubernetes.io/tls 2 14m secret/sh.helm.release.v1.oud-ds-rs.v1 helm.sh/release.v1 1 14m NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE VOLUMEMODE persistentvolume/oud-ds-rs-pv 20Gi RWX Delete Bound oudns/oud-ds-rs-pvc manual 14m Filesystem NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE VOLUMEMODE persistentvolumeclaim/oud-ds-rs-pvc Bound oud-ds-rs-pv 20Gi RWX manual 14m Filesystem NAME CLASS HOSTS ADDRESS PORTS AGE ingress.networking.k8s.io/oud-ds-rs-admin-ingress-nginx \u0026lt;none\u0026gt; oud-ds-rs-admin-0,oud-ds-rs-admin-0,oud-ds-rs-admin-1 + 3 more... 80, 443 14m ingress.networking.k8s.io/oud-ds-rs-http-ingress-nginx \u0026lt;none\u0026gt; oud-ds-rs-http-0,oud-ds-rs-http-1,oud-ds-rs-http-2 + 3 more... 80, 443 14m Include/exclude elements (nodes,pod,service,secret,pv,pvc,ingress) as required.\nView pod logs To view logs for a pod use the following command:\n$ kubectl logs \u0026lt;pod\u0026gt; -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl logs oud-ds-rs-0 -n oudns View pod description Details about a pod can be viewed using the kubectl describe command:\n$ kubectl describe pod \u0026lt;pod\u0026gt; -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl describe pod oud-ds-rs-0 -n oudns The output will look similar to the following:\nName: oud-ds-rs-0 Namespace: oudns Priority: 0 Node: \u0026lt;Worker Node\u0026gt;/100.105.18.114 Start Time: \u0026lt;DATE\u0026gt; Labels: app.kubernetes.io/instance=oud-ds-rs app.kubernetes.io/name=oud-ds-rs controller-revision-hash=oud-ds-rs-5c8b8f67c9 statefulset.kubernetes.io/pod-name=oud-ds-rs-0 Annotations: \u0026lt;none\u0026gt; Status: Running IP: 10.244.2.48 IPs: IP: 10.244.2.48 Controlled By: StatefulSet/oud-ds-rs Init Containers: mount-pv: Container ID: cri-o://905af11c6f032f2dfa18b1e3956d7936cb7dd04d9d0df0cfcf8ed061e6930b52 Image: \u0026lt;location\u0026gt;/busybox Image ID: \u0026lt;location\u0026gt;@sha256:2c8ed5408179ff4f53242a4bdd2706110ce000be239fe37a61be9c52f704c437 Port: \u0026lt;none\u0026gt; Host Port: \u0026lt;none\u0026gt; Command: /bin/sh -c Args: ordinal=${OUD_INSTANCE_NAME##*-}; if [[ ${CLEANUP_BEFORE_START} == \u0026quot;true\u0026quot; ]]; then if [[ \u0026quot;$ordinal\u0026quot; != \u0026quot;0\u0026quot; ]]; then cd /u01/oracle; rm -fr /u01/oracle/user_projects/$(OUD_INSTANCE_NAME)/OUD; fi; fi if [[ ${CONFIGVOLUME_ENABLED} == \u0026quot;true\u0026quot; ]]; then if [[ \u0026quot;$ordinal\u0026quot; == \u0026quot;0\u0026quot; ]]; then cp \u0026quot;/mnt/baseOUD.props\u0026quot; \u0026quot;${CONFIGVOLUME_MOUNTPATH}/config-baseOUD.props\u0026quot;; else cp \u0026quot;/mnt/replOUD.props\u0026quot; \u0026quot;${CONFIGVOLUME_MOUNTPATH}/config-replOUD.props\u0026quot;; fi; fi; State: Terminated Reason: Completed Exit Code: 0 Started: \u0026lt;DATE\u0026gt; Finished: \u0026lt;DATE\u0026gt; Ready: True Restart Count: 0 Environment: OUD_INSTANCE_NAME: oud-ds-rs-0 (v1:metadata.name) CONFIGVOLUME_ENABLED: false CONFIGVOLUME_MOUNTPATH: /u01/oracle/config-input CLEANUP_BEFORE_START: false Mounts: /u01/oracle/user_projects from oud-ds-rs-pv (rw) /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-65skp (ro) Containers: oud-ds-rs: Container ID: cri-o://d691b090dfbb1ee1b8606952497d80642424a82a2290071b325ea720098817c3 Image: container-registry.oracle.com/middleware/oud_cpu:12.2.1.4-jdk8-ol7-\u0026lt;April'23\u0026gt; Image ID: container-registry.oracle.com/middleware/oud_cpu@sha256:faca16dbbcda1985ff567eefe3f2ca7bae6cbbb7ebcd296fffb040ce61e9396a Ports: 1444/TCP, 1888/TCP, 1389/TCP, 1636/TCP, 1080/TCP, 1081/TCP, 1898/TCP Host Ports: 0/TCP, 0/TCP, 0/TCP, 0/TCP, 0/TCP, 0/TCP, 0/TCP State: Running Started: \u0026lt;DATE\u0026gt; Ready: True Restart Count: 0 Limits: cpu: 1 memory: 4Gi Requests: cpu: 500m memory: 4Gi Liveness: tcp-socket :ldap delay=300s timeout=30s period=60s #success=1 #failure=5 Readiness: exec [/u01/oracle/container-scripts/checkOUDInstance.sh] delay=300s timeout=30s period=60s #success=1 #failure=10 Environment: instanceType: DS2RS_STS OUD_INSTANCE_NAME: oud-ds-rs-0 (v1:metadata.name) MY_NODE_NAME: (v1:spec.nodeName) MY_POD_NAME: oud-ds-rs-0 (v1:metadata.name) sleepBeforeConfig: 3 sourceHost: oud-ds-rs-0 baseDN: dc=example,dc=com rootUserDN: \u0026lt;set to the key 'rootUserDN' in secret 'oud-ds-rs-creds'\u0026gt; Optional: false rootUserPassword: \u0026lt;set to the key 'rootUserPassword' in secret 'oud-ds-rs-creds'\u0026gt; Optional: false adminUID: \u0026lt;set to the key 'adminUID' in secret 'oud-ds-rs-creds'\u0026gt; Optional: false adminPassword: \u0026lt;set to the key 'adminPassword' in secret 'oud-ds-rs-creds'\u0026gt; Optional: false bindDN1: \u0026lt;set to the key 'bindDN1' in secret 'oud-ds-rs-creds'\u0026gt; Optional: false bindPassword1: \u0026lt;set to the key 'bindPassword1' in secret 'oud-ds-rs-creds'\u0026gt; Optional: false bindDN2: \u0026lt;set to the key 'bindDN2' in secret 'oud-ds-rs-creds'\u0026gt; Optional: false bindPassword2: \u0026lt;set to the key 'bindPassword2' in secret 'oud-ds-rs-creds'\u0026gt; Optional: false sourceServerPorts: oud-ds-rs-0:1444 sourceAdminConnectorPort: 1444 sourceReplicationPort: 1898 sampleData: 200 adminConnectorPort: 1444 httpAdminConnectorPort: 1888 ldapPort: 1389 ldapsPort: 1636 httpPort: 1080 httpsPort: 1081 replicationPort: 1898 dsreplication_1: verify --hostname ${sourceHost} --port ${sourceAdminConnectorPort} --baseDN ${baseDN} --serverToRemove $(OUD_INSTANCE_NAME):${adminConnectorPort} --connectTimeout 600000 --readTimeout 600000 dsreplication_2: enable --host1 ${sourceHost} --port1 ${sourceAdminConnectorPort} --replicationPort1 ${sourceReplicationPort} --host2 $(OUD_INSTANCE_NAME) --port2 ${adminConnectorPort} --replicationPort2 ${replicationPort} --baseDN ${baseDN} --connectTimeout 600000 --readTimeout 600000 dsreplication_3: initialize --hostSource ${initializeFromHost} --portSource ${sourceAdminConnectorPort} --hostDestination $(OUD_INSTANCE_NAME) --portDestination ${adminConnectorPort} --baseDN ${baseDN} --connectTimeout 600000 --readTimeout 600000 dsreplication_4: verify --hostname $(OUD_INSTANCE_NAME) --port ${adminConnectorPort} --baseDN ${baseDN} --connectTimeout 600000 --readTimeout 600000 post_dsreplication_dsconfig_1: set-replication-domain-prop --domain-name ${baseDN} --set group-id:1 post_dsreplication_dsconfig_2: set-replication-server-prop --set group-id:1 Mounts: /u01/oracle/user_projects from oud-ds-rs-pv (rw) /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-65skp (ro) Conditions: Type Status Initialized True Ready True ContainersReady True PodScheduled True Volumes: oud-ds-rs-pv: Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace) ClaimName: oud-ds-rs-pvc ReadOnly: false kube-api-access-65skp: Type: Projected (a volume that contains injected data from multiple sources) TokenExpirationSeconds: 3607 ConfigMapName: kube-root-ca.crt ConfigMapOptional: \u0026lt;nil\u0026gt; DownwardAPI: true QoS Class: Burstable Node-Selectors: \u0026lt;none\u0026gt; Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s node.kubernetes.io/unreachable:NoExecute op=Exists for 300s Events: \u0026lt;none\u0026gt; Known issues dsreplication output after scale up/down shows pod in unknown state Sometimes when scaling up or down, it is possible to get incorrect data in the dsreplication output. In the example below the replicaCount was changed from 4 to 3. The oud-ds-rs-3 server appears as \u0026lt;Unknown\u0026gt; when it should have disappeared:\ndc=example,dc=com - Replication Enabled ======================================= Server : Entries : M.C. [1] : A.O.M.C. [2] : Port [3] : Encryption [4] : Trust [5] : U.C. [6] : Status [7] : ChangeLog [8] : Group ID [9] : Connected To [10] -------------------------------:---------:----------:--------------:----------:----------------:-----------:----------:------------:---------------:--------------:------------------------------- oud-ds-rs-3:\u0026lt;Unknown\u0026gt; : -- : N/A : -- : 1898 : Disabled : -- : -- : Unknown : -- : N/A : -- [11] : : : : : : : : : : : oud-ds-rs-0:1444 : 39135 : 0 : 0 : 1898 : Disabled : Trusted : -- : Normal : Enabled : 1 : oud-ds-rs-2:1898 : : : : : : : : : : : (GID=1) oud-ds-rs-1:1444 : 39135 : 0 : 0 : 1898 : Disabled : Trusted : -- : Normal : Enabled : 1 : oud-ds-rs-1:1898 : : : : : : : : : : : (GID=1) oud-ds-rs-2:1444 : 39135 : 0 : 0 : 1898 : Disabled : Trusted : -- : Normal : Enabled : 1 : oud-ds-rs-2:1898 : : : : : : : : : : : (GID=1) Replication Server [12] : RS #1 : RS #2 : RS #3 : RS #4 ------------------------------:-------:-------:-------:------ oud-ds-rs-0:1898 (#1) : -- : Yes : Yes : N/A oud-ds-rs-1:1898 (#2) : Yes : -- : Yes : N/A oud-ds-rs-2:1898 (#3) : Yes : Yes : -- : N/A oud-ds-rs-3:1898 (#4) : No : No : No : -- In this situation perform the following steps to remove the server:\n Run the following command to enter the OUD Kubernetes pod:\n$ kubectl --namespace \u0026lt;namespace\u0026gt; exec -it -c \u0026lt;containername\u0026gt; \u0026lt;podname\u0026gt; -- bash For example:\nkubectl --namespace oudns exec -it -c oud-ds-rs oud-ds-rs-0 -- bash This will take you into the pod:\n[oracle@oud-ds-rs-0 oracle]$ Once inside the pod run the following command to create a password file:\necho \u0026lt;ADMIN_PASSWORD\u0026gt; \u0026gt; /tmp/adminpassword.txt Run the following command to remove the replicationPort:\n/u01/oracle/oud/bin/dsreplication disable --hostname localhost --port $adminConnectorPort --adminUID admin --trustAll --adminPasswordFile /tmp/adminpassword.txt --no-prompt --unreachableServer oud-ds-rs-3:$replicationPort The output will look similar to the following:\nEstablishing connections and reading configuration ........ Done. The following errors were encountered reading the configuration of the existing servers: Could not connect to the server oud-ds-rs-3:1444. Check that the server is running and that is accessible from the local machine. Details: oud-ds-rs-3:1444 The tool will try to update the configuration in a best effort mode. Removing references to replication server oud-ds-rs-3:1898 ..... Done. Run the following command to remove the adminConnectorPort:\n/u01/oracle/oud/bin/dsreplication disable --hostname localhost --port $adminConnectorPort --adminUID admin --trustAll --adminPasswordFile /tmp/adminpassword.txt --no-prompt --unreachableServer oud-ds-rs-3:$adminConnectorPort The output will look similar to the following:\nEstablishing connections and reading configuration ...... Done. Removing server oud-ds-rs-3:1444 from the registration information ..... Done. Delete the password file:\nrm /tmp/adminpassword.txt " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oudsm/troubleshooting/", + "title": "Troubleshooting", + "tags": [], + "description": "How to Troubleshoot issues.", + "content": " Check the status of a namespace View pod logs View pod description Check the status of a namespace To check the status of objects in a namespace use the following command:\n$ kubectl --namespace \u0026lt;namespace\u0026gt; get nodes,pod,service,secret,pv,pvc,ingress -o wide For example:\n$ kubectl --namespace oudsmns get nodes,pod,service,secret,pv,pvc,ingress -o wide The output will look similar to the following:\n$ kubectl --namespace oudsmns get pod,service,secret,pv,pvc,ingress -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/oudsm-1 1/1 Running 0 18m 10.244.1.89 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR service/oudsm-1 ClusterIP 10.101.79.110 \u0026lt;none\u0026gt; 7001/TCP,7002/TCP 18m app.kubernetes.io/instance=oudsm,app.kubernetes.io/name=oudsm,oudsm/instance=oudsm-1 service/oudsm-lbr ClusterIP 10.106.241.204 \u0026lt;none\u0026gt; 7001/TCP,7002/TCP 18m app.kubernetes.io/instance=oudsm,app.kubernetes.io/name=oudsm NAME TYPE DATA AGE secret/default-token-jtwn2 kubernetes.io/service-account-token 3 22h secret/orclcred kubernetes.io/dockerconfigjson 1 22h secret/oudsm-creds opaque 2 18m secret/oudsm-tls-cert kubernetes.io/tls 2 18m secret/oudsm-token-7kjff kubernetes.io/service-account-token 3 18m secret/sh.helm.release.v1.oudsm.v1 helm.sh/release.v1 1 18m NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE VOLUMEMODE persistentvolume/oudsm-pv 20Gi RWX Delete Bound oudsmns/oudsm-pvc manual 18m Filesystem NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE VOLUMEMODE persistentvolumeclaim/oudsm-pvc Bound oudsm-pv 20Gi RWX manual 18m Filesystem NAME CLASS HOSTS ADDRESS PORTS AGE ingress.networking.k8s.io/oudsm-ingress-nginx \u0026lt;none\u0026gt; oudsm-1,oudsm 80, 443 18m Include/exclude elements (nodes,pod,service,secret,pv,pvc,ingress) as required.\nView pod logs To view logs for a pod use the following command:\n$ kubectl logs \u0026lt;pod\u0026gt; -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl logs oudsm-1 -n oudsmns View pod description Details about a pod can be viewed using the kubectl describe command:\n$ kubectl describe pod \u0026lt;pod\u0026gt; -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl describe pod oudsm-1 -n oudsmns The output will look similar to the following:\nName: oudsm-1 Namespace: oudsmns Priority: 0 Node: \u0026lt;worker-node\u0026gt;/100.102.48.28 Start Time: \u0026lt;DATE\u0026gt; Labels: app.kubernetes.io/instance=oudsm app.kubernetes.io/managed-by=Helm app.kubernetes.io/name=oudsm app.kubernetes.io/version=12.2.1.4.0 helm.sh/chart=oudsm-0.1 oudsm/instance=oudsm-1 Annotations: meta.helm.sh/release-name: oudsm meta.helm.sh/release-namespace: oudsmns Status: Running IP: 10.244.1.89 IPs: IP: 10.244.1.89 Containers: oudsm: Container ID: cri-o://37dbe00257095adc0a424b8841db40b70bbb65645451e0bc53718a0fd7ce22e4 Image: container-registry.oracle.com/middleware/oudsm_cpu:12.2.1.4-jdk8-ol7-\u0026lt;October'23\u0026gt; Image ID: container-registry.oracle.com/middleware/oudsm_cpu@sha256:47960d36d502d699bfd8f9b1be4c9216e302db95317c288f335f9c8a32974f2c Ports: 7001/TCP, 7002/TCP Host Ports: 0/TCP, 0/TCP State: Running Started: \u0026lt;DATE\u0026gt; Ready: True Restart Count: 0 Liveness: http-get http://:7001/oudsm delay=1200s timeout=15s period=60s #success=1 #failure=3 Readiness: http-get http://:7001/oudsm delay=900s timeout=15s period=30s #success=1 #failure=3 Environment: DOMAIN_NAME: oudsmdomain-1 ADMIN_USER: \u0026lt;set to the key 'adminUser' in secret 'oudsm-creds'\u0026gt; Optional: false ADMIN_PASS: \u0026lt;set to the key 'adminPass' in secret 'oudsm-creds'\u0026gt; Optional: false ADMIN_PORT: 7001 ADMIN_SSL_PORT: 7002 WLS_PLUGIN_ENABLED: true Mounts: /u01/oracle/user_projects from oudsm-pv (rw) /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-9ht84 (ro) Conditions: Type Status Initialized True Ready True ContainersReady True PodScheduled True Volumes: oudsm-pv: Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace) ClaimName: oudsm-pvc ReadOnly: false kube-api-access-9ht84: Type: Projected (a volume that contains injected data from multiple sources) TokenExpirationSeconds: 3607 ConfigMapName: kube-root-ca.crt ConfigMapOptional: \u0026lt;nil\u0026gt; DownwardAPI: true QoS Class: BestEffort Node-Selectors: \u0026lt;none\u0026gt; Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s node.kubernetes.io/unreachable:NoExecute op=Exists for 300s Events: Type Reason Age From Message ---- ------ ---- ---- ------- Warning FailedScheduling 39m default-scheduler 0/3 nodes are available: 3 pod has unbound immediate PersistentVolumeClaims. Normal Scheduled 39m default-scheduler Successfully assigned oudsmns/oudsm-1 to \u0026lt;worker-node\u0026gt; Normal Pulled 39m kubelet Container image \u0026quot;container-registry.oracle.com/middleware/oudsm_cpu:12.2.1.4-jdk8-ol7-\u0026lt;October'23\u0026gt;\u0026quot; already present on machine Normal Created 39m kubelet Created container oudsm Normal Started 39m kubelet Started container oudsm " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oam/create-or-update-image/", + "title": "Create or update an image", + "tags": [], + "description": "Create or update an Oracle Access Management (OAM) container image used for deploying OAM domains.", + "content": "As described in Prepare Your Environment you can create your own OAM container image. If you have access to the My Oracle Support (MOS), and there is a need to build a new image with an interim or one off patch, it is recommended to use the WebLogic Image Tool to build an Oracle Access Management image for production deployments.\nCreate or update an Oracle Access Management image using the WebLogic Image Tool Using the WebLogic Image Tool, you can create a new Oracle Access Management image with PSU\u0026rsquo;s and interim patches or update an existing image with one or more interim patches.\n Recommendations:\n Use create for creating a new Oracle Access Management image containing the Oracle Access Management binaries, bundle patch and interim patches. This is the recommended approach if you have access to the OAM patches because it optimizes the size of the image. Use update for patching an existing Oracle Access Management image with a single interim patch. Note that the patched image size may increase considerably due to additional image layers introduced by the patch application tool. Create an image Set up the WebLogic Image Tool Prerequisites Set up the WebLogic Image Tool Validate setup WebLogic Image Tool build directory WebLogic Image Tool cache Prerequisites Verify that your environment meets the following prerequisites:\n Docker client and daemon on the build machine, with minimum Docker version 18.03.1.ce. Bash version 4.0 or later, to enable the command complete feature. JAVA_HOME environment variable set to the appropriate JDK location e.g: /scratch/export/oracle/product/jdk Set up the WebLogic Image Tool To set up the WebLogic Image Tool:\n Create a working directory and change to it:\n$ mdir \u0026lt;workdir\u0026gt; $ cd \u0026lt;workdir\u0026gt; For example:\n$ mkdir /scratch/imagetool-setup $ cd /scratch/imagetool-setup Download the latest version of the WebLogic Image Tool from the releases page.\n$ wget https://github.com/oracle/weblogic-image-tool/releases/download/release-X.X.X/imagetool.zip where X.X.X is the latest release referenced on the releases page.\n Unzip the release ZIP file in the imagetool-setup directory.\n$ unzip imagetool.zip Execute the following commands to set up the WebLogic Image Tool:\n$ cd \u0026lt;workdir\u0026gt;/imagetool-setup/imagetool/bin $ source setup.sh For example:\n$ cd /scratch/imagetool-setup/imagetool/bin $ source setup.sh Validate setup To validate the setup of the WebLogic Image Tool:\n Enter the following command to retrieve the version of the WebLogic Image Tool:\n$ imagetool --version Enter imagetool then press the Tab key to display the available imagetool commands:\n$ imagetool \u0026lt;TAB\u0026gt; cache create help rebase update WebLogic Image Tool build directory The WebLogic Image Tool creates a temporary Docker context directory, prefixed by wlsimgbuilder_temp, every time the tool runs. Under normal circumstances, this context directory will be deleted. However, if the process is aborted or the tool is unable to remove the directory, it is safe for you to delete it manually. By default, the WebLogic Image Tool creates the Docker context directory under the user\u0026rsquo;s home directory. If you prefer to use a different directory for the temporary context, set the environment variable WLSIMG_BLDDIR:\n$ export WLSIMG_BLDDIR=\u0026#34;/path/to/buid/dir\u0026#34; WebLogic Image Tool cache The WebLogic Image Tool maintains a local file cache store. This store is used to look up where the Java, WebLogic Server installers, and WebLogic Server patches reside in the local file system. By default, the cache store is located in the user\u0026rsquo;s $HOME/cache directory. Under this directory, the lookup information is stored in the .metadata file. All automatically downloaded patches also reside in this directory. You can change the default cache store location by setting the environment variable WLSIMG_CACHEDIR:\n$ export WLSIMG_CACHEDIR=\u0026#34;/path/to/cachedir\u0026#34; Set up additional build scripts Creating an Oracle Access Management container image using the WebLogic Image Tool requires additional container scripts for Oracle Access Management domains.\n Clone the docker-images repository to set up those scripts. In these steps, this directory is DOCKER_REPO:\n$ cd \u0026lt;workdir\u0026gt;/imagetool-setup $ git clone https://github.com/oracle/docker-images.git For example:\n$ cd /scratch/imagetool-setup $ git clone https://github.com/oracle/docker-images.git Note: If you want to create the image continue with the following steps, otherwise to update the image see update an image.\n Create an image After setting up the WebLogic Image Tool, follow these steps to use the WebLogic Image Tool to create a new Oracle Access Management image.\nDownload the Oracle Access Management installation binaries and patches You must download the required Oracle Access Management installation binaries and patches as listed below from the Oracle Software Delivery Cloud and save them in a directory of your choice.\nThe installation binaries and patches required are:\n Oracle Identity and Access Management 12.2.1.4.0\n fmw_12.2.1.4.0_idm.jar Oracle Fusion Middleware 12c Infrastructure 12.2.1.4.0\n fmw_12.2.1.4.0_infrastructure.jar OAM and FMW Infrastructure Patches:\n View document ID 2723908.1 on My Oracle Support. In the Container Image Download/Patch Details section, locate the Oracle Access Management (OAM) table. For the latest PSU click the README link in the Documentation column. In the README, locate the \u0026ldquo;Installed Software\u0026rdquo; section. All the patch numbers to be download are listed here. Download all these individual patches from My Oracle Support. Oracle JDK v8\n jdk-8uXXX-linux-x64.tar.gz where XXX is the JDK version referenced in the README above. Update required build files The following files in the code repository location \u0026lt;imagetool-setup-location\u0026gt;/docker-images/OracleAccessManagement/imagetool/12.2.1.4.0 are used for creating the image:\n additionalBuildCmds.txt buildArgs Edit the \u0026lt;workdir\u0026gt;/imagetool-setup/docker-images/OracleAccessManagement/imagetool/12.2.1.4.0/buildArgs file and change %DOCKER_REPO%, %JDK_VERSION% and %BUILDTAG% appropriately.\nFor example:\ncreate --jdkVersion=8u301 --type oam --version=12.2.1.4.0 --tag=oam-latestpsu:12.2.1.4.0 --pull --installerResponseFile /scratch/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file,/scratch/imagetool-setup/docker-images/OracleAccessManagement/dockerfiles/12.2.1.4.0/install/iam.response --additionalBuildCommands /scratch/imagetool-setup/docker-images/OracleAccessManagement/imagetool/12.2.1.4.0/addtionalBuildCmds.txt --additionalBuildFiles /scratch/imagetool-setup/docker-images/OracleAccessManagement/dockerfiles/12.2.1.4.0/container-scripts Edit the \u0026lt;workdir\u0026gt;/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file and under the GENERIC section add the line INSTALL_TYPE=\u0026quot;Fusion Middleware Infrastructure\u0026rdquo;. For example:\n[GENERIC] INSTALL_TYPE=\u0026quot;Fusion Middleware Infrastructure\u0026quot; DECLINE_SECURITY_UPDATES=true SECURITY_UPDATES_VIA_MYORACLESUPPORT=false Create the image Add a JDK package to the WebLogic Image Tool cache. For example:\n$ imagetool cache addInstaller --type jdk --version 8uXXX --path \u0026lt;download location\u0026gt;/jdk-8uXXX-linux-x64.tar.gz where XXX is the JDK version downloaded\n Add the downloaded installation binaries to the WebLogic Image Tool cache. For example:\n$ imagetool cache addInstaller --type fmw --version 12.2.1.4.0 --path \u0026lt;download location\u0026gt;/fmw_12.2.1.4.0_infrastructure.jar $ imagetool cache addInstaller --type OAM --version 12.2.1.4.0 --path \u0026lt;download location\u0026gt;/fmw_12.2.1.4.0_idm.jar Add the downloaded OPatch patch to the WebLogic Image Tool cache. For example:\n$ imagetool cache addEntry --key 28186730_13.9.4.2.8 --value \u0026lt;download location\u0026gt;/p28186730_139428_Generic.zip Add the rest of the downloaded product patches to the WebLogic Image Tool cache:\n$ imagetool cache addEntry --key \u0026lt;patch\u0026gt;_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p\u0026lt;patch\u0026gt;_122140_Generic.zip For example:\n$ imagetool cache addEntry --key 32971905_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p32971905_122140_Generic.zip $ imagetool cache addEntry --key 20812896_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p20812896_122140_Generic.zip $ imagetool cache addEntry --key 32880070_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p32880070_122140_Generic.zip $ imagetool cache addEntry --key 33059296_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33059296_122140_Generic.zip $ imagetool cache addEntry --key 32905339_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p32905339_122140_Generic.zip $ imagetool cache addEntry --key 33084721_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33084721_122140_Generic.zip $ imagetool cache addEntry --key 31544353_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p31544353_122140_Linux-x86-64.zip $ imagetool cache addEntry --key 32957281_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p32957281_122140_Generic.zip $ imagetool cache addEntry --key 33093748_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33093748_122140_Generic.zip Edit the \u0026lt;workdir\u0026gt;/imagetool-setup/docker-images/OracleAccessManagement/imagetool/12.2.1.4.0/buildArgs file and append the product patches and opatch patch as follows:\n--patches 32971905_12.2.1.4.0,20812896_12.2.1.4.0,32880070_12.2.1.4.0,33059296_12.2.1.4.0,32905339_12.2.1.4.0,33084721_12.2.1.4.0,31544353_12.2.1.4.0,32957281_12.2.1.4.0,33093748_12.2.1.4.0 --opatchBugNumber=28186730_13.9.4.2.8 An example buildArgs file is now as follows:\ncreate --jdkVersion=8u301 --type oam --version=12.2.1.4.0 --tag=oam-latestpsu:12.2.1.4.0 --pull --installerResponseFile /scratch/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file,/scratch/imagetool-setup/docker-images/OracleAccessManagement/dockerfiles/12.2.1.4.0/install/iam.response --additionalBuildCommands /scratch/imagetool-setup/docker-images/OracleAccessManagement/imagetool/12.2.1.4.0/additionalBuildCmds.txt --additionalBuildFiles /scratch/imagetool-setup/docker-images/OracleAccessManagement/dockerfiles/12.2.1.4.0/container-scripts --patches 32971905_12.2.1.4.0,20812896_12.2.1.4.0,32880070_12.2.1.4.0,33059296_12.2.1.4.0,32905339_12.2.1.4.0,33084721_12.2.1.4.0,31544353_12.2.1.4.0,32957281_12.2.1.4.0,33093748_12.2.1.4.0 --opatchBugNumber=28186730_13.9.4.2.8 Note: In the buildArgs file:\n --jdkVersion value must match the --version value used in the imagetool cache addInstaller command for --type jdk. --version value must match the --version value used in the imagetool cache addInstaller command for --type OAM. Refer to this page for the complete list of options available with the WebLogic Image Tool create command.\n Create the Oracle Access Management image:\n$ imagetool @\u0026lt;absolute path to buildargs file\u0026gt; --fromImage ghcr.io/oracle/oraclelinux:7-slim Note: Make sure that the absolute path to the buildargs file is prepended with a @ character, as shown in the example above.\n For example:\n$ imagetool @\u0026lt;imagetool-setup-location\u0026gt;/docker-images/OracleAccessManagement/imagetool/12.2.1.4.0/buildArgs --fromImage ghcr.io/oracle/oraclelinux:7-slim Check the created image using the docker images command:\n$ docker images | grep oam The output will look similar to the following:\noam-latestpsu 12.2.1.4.0 ad732fc7c16b About a minute ago 3.35GB Run the following command to save the container image to a tar file:\n$ docker save -o \u0026lt;path\u0026gt;/\u0026lt;file\u0026gt;.tar \u0026lt;image\u0026gt; For example:\n$ docker save -o $WORKDIR/oam-latestpsu.tar oam-latestpsu:12.2.1.4.0 Update an image The steps below show how to update an existing Oracle Access Management image with an interim patch.\nThe container image to be patched must be loaded in the local docker images repository before attempting these steps.\nIn the examples below the image oracle/oam:12.2.1.4.0 is updated with an interim patch.\n$ docker images REPOSITORY TAG IMAGE ID CREATED SIZE oracle/oam 12.2.1.4.0 b051804ba15f 3 months ago 3.34GB Set up the WebLogic Image Tool.\n Download the required interim patch and latest Opatch (28186730) from My Oracle Support. and save them in a directory of your choice.\n Add the OPatch patch to the WebLogic Image Tool cache, for example:\n$ imagetool cache addEntry --key 28186730_13.9.4.2.8 --value \u0026lt;downloaded-patches-location\u0026gt;/p28186730_139428_Generic.zip Execute the imagetool cache addEntry command for each patch to add the required patch(es) to the WebLogic Image Tool cache. For example, to add patch p32701831_12214210607_Generic.zip:\n$ imagetool cache addEntry --key=32701831_12.2.1.4.210607 --value \u0026lt;downloaded-patches-location\u0026gt;/p32701831_12214210607_Generic.zip Provide the following arguments to the WebLogic Image Tool update command:\n –-fromImage - Identify the image that needs to be updated. In the example below, the image to be updated is oracle/oam:12.2.1.4.0. –-patches - Multiple patches can be specified as a comma-separated list. --tag - Specify the new tag to be applied for the image being built. Refer here for the complete list of options available with the WebLogic Image Tool update command.\n Note: The WebLogic Image Tool cache should have the latest OPatch zip. The WebLogic Image Tool will update the OPatch if it is not already updated in the image.\n For example:\n$ imagetool update --fromImage oracle/oam:12.2.1.4.0 --tag=oracle/oam-new:12.2.1.4.0 --patches=32701831_12.2.1.4.210607 --opatchBugNumber=28186730_13.9.4.2.8 Note: If the command fails because the files in the image being upgraded are not owned by oracle:oracle, then add the parameter --chown \u0026lt;userid\u0026gt;:\u0026lt;groupid\u0026gt; to correspond with the values returned in the error.\n Check the built image using the docker images command:\n$ docker images | grep oam The output will look similar to the following:\nREPOSITORY TAG IMAGE ID CREATED SIZE oracle/oam-new 12.2.1.4.0 78ccd1ad67eb 5 minutes ago 3.8GB oracle/oam 12.2.1.4.0 b051804ba15f 3 months ago 3.34GB Run the following command to save the patched container image to a tar file:\n$ docker save -o \u0026lt;path\u0026gt;/\u0026lt;file\u0026gt;.tar \u0026lt;image\u0026gt; For example:\n$ docker save -o $WORKDIR/oam-new.tar oracle/oam-new:12.2.1.4.0 " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oig/create-or-update-image/", + "title": "Create or update an image", + "tags": [], + "description": "Create or update an Oracle Identity Governance (OIG) container image used for deploying OIG domains", + "content": "As described in Prepare Your Environment you can create your own OIG container image. If you have access to the My Oracle Support (MOS), and there is a need to build a new image with an interim or one off patch, it is recommended to use the WebLogic Image Tool to build an Oracle Identity Governance image for production deployments.\nCreate or update an Oracle Identity Governance image using the WebLogic Image Tool Using the WebLogic Image Tool, you can create a new Oracle Identity Governance image with PSU\u0026rsquo;s and interim patches or update an existing image with one or more interim patches.\n Recommendations:\n Use create for creating a new Oracle Identity Governance image containing the Oracle Identity Governance binaries, bundle patch and interim patches. This is the recommended approach if you have access to the OIG patches because it optimizes the size of the image. Use update for patching an existing Oracle Identity Governance image with a single interim patch. Note that the patched image size may increase considerably due to additional image layers introduced by the patch application tool. Create an image Set up the WebLogic Image Tool Prerequisites Set up the WebLogic Image Tool Validate setup WebLogic Image Tool build directory WebLogic Image Tool cache Prerequisites Verify that your environment meets the following prerequisites:\n Docker client and daemon on the build machine, with minimum Docker version 18.03.1.ce. Bash version 4.0 or later, to enable the command complete feature. JAVA_HOME environment variable set to the appropriate JDK location e.g: /scratch/export/oracle/product/jdk Set up the WebLogic Image Tool To set up the WebLogic Image Tool:\n Create a working directory and change to it:\n$ mkdir \u0026lt;workdir\u0026gt; $ cd \u0026lt;workdir\u0026gt; For example:\n$ mkdir /scratch/imagetool-setup $ cd /scratch/imagetool-setup Download the latest version of the WebLogic Image Tool from the releases page.\n$ wget https://github.com/oracle/weblogic-image-tool/releases/download/release-X.X.X/imagetool.zip where X.X.X is the latest release referenced on the releases page.\n Unzip the release ZIP file in the imagetool-setup directory.\n$ unzip imagetool.zip Execute the following commands to set up the WebLogic Image Tool:\n$ cd \u0026lt;workdir\u0026gt;/imagetool-setup/imagetool/bin $ source setup.sh For example:\n$ cd /scratch/imagetool-setup/imagetool/bin $ source setup.sh Validate setup To validate the setup of the WebLogic Image Tool:\n Enter the following command to retrieve the version of the WebLogic Image Tool:\n$ imagetool --version Enter imagetool then press the Tab key to display the available imagetool commands:\n$ imagetool \u0026lt;TAB\u0026gt; cache create help rebase update WebLogic Image Tool build directory The WebLogic Image Tool creates a temporary Docker context directory, prefixed by wlsimgbuilder_temp, every time the tool runs. Under normal circumstances, this context directory will be deleted. However, if the process is aborted or the tool is unable to remove the directory, it is safe for you to delete it manually. By default, the WebLogic Image Tool creates the Docker context directory under the user\u0026rsquo;s home directory. If you prefer to use a different directory for the temporary context, set the environment variable WLSIMG_BLDDIR:\n$ export WLSIMG_BLDDIR=\u0026#34;/path/to/buid/dir\u0026#34; WebLogic Image Tool cache The WebLogic Image Tool maintains a local file cache store. This store is used to look up where the Java, WebLogic Server installers, and WebLogic Server patches reside in the local file system. By default, the cache store is located in the user\u0026rsquo;s $HOME/cache directory. Under this directory, the lookup information is stored in the .metadata file. All automatically downloaded patches also reside in this directory. You can change the default cache store location by setting the environment variable WLSIMG_CACHEDIR:\n$ export WLSIMG_CACHEDIR=\u0026#34;/path/to/cachedir\u0026#34; Set up additional build scripts Creating an Oracle Identity Governance container image using the WebLogic Image Tool requires additional container scripts for Oracle Identity Governance domains.\n Clone the docker-images repository to set up those scripts. In these steps, this directory is DOCKER_REPO:\n$ cd \u0026lt;workdir\u0026gt;/imagetool-setup $ git clone https://github.com/oracle/docker-images.git For example:\n$ cd /scratch/imagetool-setup $ git clone https://github.com/oracle/docker-images.git Note: If you want to create the image continue with the following steps, otherwise to update the image see update an image.\n Create an image After setting up the WebLogic Image Tool, follow these steps to use the WebLogic Image Tool to create a new Oracle Identity Governance image.\nDownload the Oracle Identity Governance installation binaries and patches You must download the required Oracle Identity Governance installation binaries and patches as listed below from the Oracle Software Delivery Cloud and save them in a directory of your choice.\nThe installation binaries and patches required are:\n Oracle Identity and Access Management 12.2.1.4.0\n fmw_12.2.1.4.0_idm.jar Oracle Fusion Middleware 12c Infrastructure 12.2.1.4.0\n fmw_12.2.1.4.0_infrastructure.jar Oracle SOA Suite for Oracle Middleware 12.2.1.4.0\n fmw_12.2.1.4.0_soa.jar Oracle Service Bus 12.2.1.4.0\n fmw_12.2.1.4.0_osb.jar OIG and FMW Infrastructure Patches:\n View document ID 2723908.1 on My Oracle Support. In the Container Image Download/Patch Details section, locate the Oracle Identity Governance (OIG) table. For the latest PSU click the README link in the Documentation column. In the README, locate the \u0026ldquo;Installed Software\u0026rdquo; section. All the patch numbers to be download are listed here. Download all these individual patches from My Oracle Support. Oracle JDK v8\n jdk-8uXXX-linux-x64.tar.gz where XXX is the JDK version referenced in the README above. Update required build files The following files in the code repository location \u0026lt;imagetool-setup-location\u0026gt;/docker-images/OracleIdentityGovernance/imagetool/12.2.1.4.0 are used for creating the image:\n additionalBuildCmds.txt buildArgs . Edit the \u0026lt;workdir\u0026gt;/imagetool-setup/docker-images/OracleIdentityGovernance/imagetool/12.2.1.4.0/buildArgs file and change %DOCKER_REPO%, %JDK_VERSION% and %BUILDTAG% appropriately.\nFor example:\ncreate --jdkVersion=8u311 --type oig --chown oracle:root --version=12.2.1.4.0 --tag=oig-latestpsu:12.2.1.4.0 --pull --installerResponseFile /scratch/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file,/scratch/imagetool-setup/docker-images/OracleSOASuite/dockerfiles/12.2.1.4.0/install/soasuite.response,/scratch/imagetool-setup/docker-images/OracleSOASuite/dockerfiles/12.2.1.4.0/install/osb.response,/scratch/imagetool-setup/docker-images/OracleIdentityGovernance/dockerfiles/12.2.1.4.0/idmqs.response --additionalBuildCommands /scratch/imagetool-setup/docker-images/OracleIdentityGovernance/imagetool/12.2.1.4.0/additionalBuildCmds.txt --additionalBuildFiles /scratch/imagetool-setup/docker-images/OracleIdentityGovernance/dockerfiles/12.2.1.4.0/container-scripts Edit the \u0026lt;workdir\u0026gt;/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4.0/install.file and under the GENERIC section add the line INSTALL_TYPE=\u0026quot;Fusion Middleware Infrastructure\u0026rdquo;. For example:\n[GENERIC] INSTALL_TYPE=\u0026quot;Fusion Middleware Infrastructure\u0026quot; DECLINE_SECURITY_UPDATES=true SECURITY_UPDATES_VIA_MYORACLESUPPORT=false Create the image Add a JDK package to the WebLogic Image Tool cache. For example:\n$ imagetool cache addInstaller --type jdk --version 8uXXX --path \u0026lt;download location\u0026gt;/jdk-8uXXX-linux-x64.tar.gz where XXX is the JDK version downloaded\n Add the downloaded installation binaries to the WebLogic Image Tool cache. For example:\n$ imagetool cache addInstaller --type fmw --version 12.2.1.4.0 --path \u0026lt;download location\u0026gt;/fmw_12.2.1.4.0_infrastructure.jar $ imagetool cache addInstaller --type soa --version 12.2.1.4.0 --path \u0026lt;download location\u0026gt;/fmw_12.2.1.4.0_soa.jar $ imagetool cache addInstaller --type osb --version 12.2.1.4.0 --path \u0026lt;download location\u0026gt;/fmw_12.2.1.4.0_osb.jar $ imagetool cache addInstaller --type idm --version 12.2.1.4.0 --path \u0026lt;download location\u0026gt;/fmw_12.2.1.4.0_idm.jar Add the downloaded OPatch patch to the WebLogic Image Tool cache. For example:\n$ imagetool cache addEntry --key 28186730_13.9.4.2.8 --value \u0026lt;download location\u0026gt;/p28186730_139428_Generic.zip Add the rest of the downloaded product patches to the WebLogic Image Tool cache:\n$ imagetool cache addEntry --key \u0026lt;patch\u0026gt;_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p\u0026lt;patch\u0026gt;_122140_Generic.zip For example:\n$ imagetool cache addEntry --key 33416868_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33416868_122140_Generic.zip $ imagetool cache addEntry --key 33453703_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33453703_122140_Generic.zip $ imagetool cache addEntry --key 32999272_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p32999272_122140_Generic.zip $ imagetool cache addEntry --key 33093748_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33093748_122140_Generic.zip $ imagetool cache addEntry --key 33281560_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33281560_122140_Generic.zip $ imagetool cache addEntry --key 31544353_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p31544353_122140_Linux-x86-64.zip $ imagetool cache addEntry --key 33313802_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33313802_122140_Generic.zip $ imagetool cache addEntry --key 33408307_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33408307_122140_Generic.zip $ imagetool cache addEntry --key 33286160_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33286160_122140_Generic.zip $ imagetool cache addEntry --key 32880070_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p32880070_122140_Generic.zip $ imagetool cache addEntry --key 32905339_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p32905339_122140_Generic.zip $ imagetool cache addEntry --key 32784652_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p32784652_122140_Generic.zip Edit the \u0026lt;workdir\u0026gt;/imagetool-setup/docker-images/OracleIdentityGovernance/imagetool/12.2.1.4.0/buildArgs file and append the product patches and opatch patch as follows:\n--patches 33416868_12.2.1.4.0,33453703_12.2.1.4.0,32999272_12.2.1.4.0,33093748_12.2.1.4.0,33281560_12.2.1.4.0,31544353_12.2.1.4.0,33313802_12.2.1.4.0,33408307_12.2.1.4.0,33286160_12.2.1.4.0,32880070_12.2.1.4.0,32905339_12.2.1.4.0,32784652_12.2.1.4.0 --opatchBugNumber=28186730_13.9.4.2.8 An example buildArgs file is now as follows:\ncreate --jdkVersion=8u301 --type oig --version=12.2.1.4.0 --tag=oig-latestpsu:12.2.1.4.0 --pull --installerResponseFile /scratch/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file,/scratch/docker-images/OracleSOASuite/dockerfiles/12.2.1.4.0/install/soasuite.response,/scratch/docker-images/OracleSOASuite/dockerfiles/12.2.1.4.0/install/osb.response,/scratch/docker-images/OracleIdentityGovernance/dockerfiles/12.2.1.4.0/idmqs.response --additionalBuildCommands /scratch/imagetool-setup/docker-images/OracleIdentityGovernance/imagetool/12.2.1.4.0/additionalBuildCmds.txt --additionalBuildFiles /scratch/imagetool-setup/docker-images/OracleIdentityGovernance/dockerfiles/12.2.1.4.0/container-scripts --patches 33416868_12.2.1.4.0,33453703_12.2.1.4.0,32999272_12.2.1.4.0,33093748_12.2.1.4.0,33281560_12.2.1.4.0,31544353_12.2.1.4.0,33313802_12.2.1.4.0,33408307_12.2.1.4.0,33286160_12.2.1.4.0,32880070_12.2.1.4.0,32905339_12.2.1.4.0,32784652_12.2.1.4.0 --opatchBugNumber=28186730_13.9.4.2.8 Note: In the buildArgs file:\n --jdkVersion value must match the --version value used in the imagetool cache addInstaller command for --type jdk. --version value must match the --version value used in the imagetool cache addInstaller command for --type idm. Refer to this page for the complete list of options available with the WebLogic Image Tool create command.\n Create the Oracle Identity Governance image:\n$ imagetool @\u0026lt;absolute path to buildargs file\u0026gt; --fromImage ghcr.io/oracle/oraclelinux:7-slim Note: Make sure that the absolute path to the buildargs file is prepended with a @ character, as shown in the example above.\n For example:\n$ imagetool @\u0026lt;imagetool-setup-location\u0026gt;/docker-images/OracleIdentityGovernance/imagetool/12.2.1.4.0/buildArgs --fromImage ghcr.io/oracle/oraclelinux:7-slim Check the created image using the docker images command:\n$ docker images | grep oig The output will look similar to the following:\noig-latestpsu 12.2.1.4.0 e391ed154bcb 50 seconds ago 4.43GB Run the following command to save the container image to a tar file:\n$ docker save -o \u0026lt;path\u0026gt;/\u0026lt;file\u0026gt;.tar \u0026lt;image\u0026gt; For example:\n$ docker save -o $WORKDIR/oig-latestpsu.tar oig-latestpsu:12.2.1.4.0 Update an image The steps below show how to update an existing Oracle Identity Governance image with an interim patch.\nThe container image to be patched must be loaded in the local docker images repository before attempting these steps.\nIn the examples below the image oracle/oig:12.2.1.4.0 is updated with an interim patch.\n$ docker images REPOSITORY TAG IMAGE ID CREATED SIZE oracle/oig 12.2.1.4.0 298fdb98e79c 3 months ago 4.42GB Set up the WebLogic Image Tool.\n Download the required interim patch and latest Opatch (28186730) from My Oracle Support. and save them in a directory of your choice.\n Add the OPatch patch to the WebLogic Image Tool cache, for example:\n$ imagetool cache addEntry --key 28186730_13.9.4.2.8 --value \u0026lt;downloaded-patches-location\u0026gt;/p28186730_139428_Generic.zip Execute the imagetool cache addEntry command for each patch to add the required patch(es) to the WebLogic Image Tool cache. For example, to add patch p32701831_12214210607_Generic.zip:\n$ imagetool cache addEntry --key=33165837_12.2.1.4.210708 --value \u0026lt;downloaded-patches-location\u0026gt;/p33165837_12214210708_Generic.zip Provide the following arguments to the WebLogic Image Tool update command:\n –-fromImage - Identify the image that needs to be updated. In the example below, the image to be updated is oracle/oig:12.2.1.4.0. –-patches - Multiple patches can be specified as a comma-separated list. --tag - Specify the new tag to be applied for the image being built. Refer here for the complete list of options available with the WebLogic Image Tool update command.\n Note: The WebLogic Image Tool cache should have the latest OPatch zip. The WebLogic Image Tool will update the OPatch if it is not already updated in the image.\n For example:\n$ imagetool update --fromImage oracle/oig:12.2.1.4.0 --tag=oracle/oig-new:12.2.1.4.0 --patches=33165837_12.2.1.4.210708 --opatchBugNumber=28186730_13.9.4.2.8 Note: If the command fails because the files in the image being upgraded are not owned by oracle:oracle, then add the parameter --chown \u0026lt;userid\u0026gt;:\u0026lt;groupid\u0026gt; to correspond with the values returned in the error.\n Check the built image using the docker images command:\n$ docker images | grep oig The output will look similar to the following:\nREPOSITORY TAG IMAGE ID CREATED SIZE oracle/oig-new 12.2.1.4.0 0c8381922e95 16 seconds ago 4.91GB oracle/oig 12.2.1.4.0 298fdb98e79c 3 months ago 4.42GB Run the following command to save the patched container image to a tar file:\n$ docker save -o \u0026lt;path\u0026gt;/\u0026lt;file\u0026gt;.tar \u0026lt;image\u0026gt; For example:\n$ docker save -o $WORKDIR/oig-new.tar oracle/oig-new:12.2.1.4.0 " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oam/patch-and-upgrade/", + "title": "Patch and Upgrade", + "tags": [], + "description": "This document provides steps to patch or upgrade an OAM image, WebLogic Kubernetes Operator, ELK, and Ingress.", + "content": "This section shows you how to upgrade the WebLogic Kubernetes Operator, the OAM image, the Elasticsearch and Kibana stack, and the Ingress.\nThe upgrade path taken depends on the version you are upgrading from, and the version you are upgrading to.\nPlease refer to the Release Notes for information on which upgrade steps are necessary for the version you are upgrading to.\n a. Upgrade an operator release Instructions on how to update the WebLogic Kubernetes Operator version.\n b. Patch an image Instructions on how to update your OAM Kubernetes cluster with a new OAM container image.\n c. Upgrade Ingress Instructions on how to upgrade the ingress.\n d. Upgrade Elasticsearch and Kibana Instructions on how to upgrade Elastic Search and Kibana.\n " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oig/patch-and-upgrade/", + "title": "Patch and upgrade", + "tags": [], + "description": "This document provides steps to patch or upgrade an OIG image, or WebLogic Kubernetes Operator.", + "content": "This section shows you how to upgrade the WebLogic Kubernetes Operator, upgrade the OIG image, and patch the OIG domain. It also shows you how to upgrade the Elasticsearch and Kibana stack, and the Ingress.\nThe upgrade path taken depends on the version you are upgrading from, and the version you are upgrading to.\nPlease refer to the Release Notes for information on which upgrade steps are necessary for the version you are upgrading to.\n a. Upgrade an operator release Instructions on how to update the WebLogic Kubernetes Operator version.\n b. Patch an image Instructions on how to update your OIG Kubernetes cluster with a new OIG container image.\n c. Upgrade Ingress Instructions on how to upgrade the ingress.\n d. Upgrade Elasticsearch and Kibana Instructions on how to upgrade Elastic Search and Kibana.\n " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oam/troubleshooting/", + "title": "Troubleshooting", + "tags": [], + "description": "How to Troubleshoot domain creation failure.", + "content": "Domain creation failure If the OAM domain creation fails when running create-domain.sh, run the following to diagnose the issue:\n Run the following command to diagnose the create domain job:\n$ kubectl logs \u0026lt;domain_job\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl logs accessdomain-create-fmw-infra-sample-domain-job-c6vfb -n oamns Also run:\n$ kubectl describe pod \u0026lt;domain_job\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl describe pod accessdomain-create-fmw-infra-sample-domain-job-c6vfb -n oamns Using the output you should be able to diagnose the problem and resolve the issue.\nClean down the failed domain creation by following steps 1-3 in Delete the OAM domain home. Then follow RCU schema creation onwards to recreate the RCU schema, kubernetes secrets for domain and RCU, the persistent volume and the persistent volume claim. Then execute the OAM domain creation steps again.\n If any of the above commands return the following error:\nFailed to start container \u0026quot;create-fmw-infra-sample-domain-job\u0026quot;: Error response from daemon: error while creating mount source path '/scratch/shared/accessdomainpv ': mkdir /scratch/shared/accessdomainpv : permission denied then there is a permissions error on the directory for the PV and PVC and the following should be checked:\na) The directory has 777 permissions: chmod -R 777 \u0026lt;persistent_volume\u0026gt;/accessdomainpv.\nb) If it does have the permissions, check if an oracle user exists and the uid is 1000 and gid is 0.\nCreate the oracle user if it doesn\u0026rsquo;t exist and set the uid to 1000 and gid to 0.\nc) Edit the $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc/create-pv-pvc-inputs.yaml and add a slash to the end of the directory for the weblogicDomainStoragePath parameter:\nweblogicDomainStoragePath: /scratch/shared/accessdomainpv/ Clean down the failed domain creation by following steps 1-3 in Delete the OAM domain home. Then follow RCU schema creation onwards to recreate the RCU schema, kubernetes secrets for domain and RCU, the persistent volume and the persistent volume claim. Then execute the OAM domain creation steps again.\n " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oig/troubleshooting/", + "title": "Troubleshooting", + "tags": [], + "description": "Sample for creating an OIG domain home on an existing PV or PVC, and the domain resource YAML file for deploying the generated OIG domain.", + "content": "Domain creation failure If the OIG domain creation fails when running create-domain.sh, run the following to diagnose the issue:\n Run the following command to diagnose the create domain job:\n$ kubectl logs \u0026lt;job_name\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl logs governancedomain-create-fmw-infra-sample-domain-job-9wqzb -n oigns Also run:\n$ kubectl describe pod \u0026lt;job_domain\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl describe pod governancedomain-create-fmw-infra-sample-domain-job-9wqzb -n oigns Using the output you should be able to diagnose the problem and resolve the issue.\nClean down the failed domain creation by following steps 1-3 in Delete the OIG domain home. Then follow RCU schema creation onwards to recreate the RCU schema, Kubernetes secrets for domain and RCU, the persistent volume and the persistent volume claim. Then execute the OIG domain creation steps again.\n If any of the above commands return the following error:\nFailed to start container \u0026quot;create-fmw-infra-sample-domain-job\u0026quot;: Error response from daemon: error while creating mount source path '/scratch/shared/governancedomainpv ': mkdir /scratch/shared/governancedomainpv : permission denied then there is a permissions error on the directory for the PV and PVC and the following should be checked:\na) The directory has 777 permissions: chmod -R 777 \u0026lt;persistent_volume\u0026gt;/governancedomainpv.\nb) If it does have the permissions, check if an oracle user exists and the uid and gid equal 1000, for example:\n$ uid=1000(oracle) gid=1000(spg) groups=1000(spg),59968(oinstall),8500(dba),100(users),1007(cgbudba) Create the oracle user if it doesn\u0026rsquo;t exist and set the uid and gid to 1000.\nc) Edit the $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc/create-pv-pvc-inputs.yaml and add a slash to the end of the directory for the weblogicDomainStoragePath parameter:\nweblogicDomainStoragePath: /scratch/shared/governancedomainpv/ Clean down the failed domain creation by following steps 1-3 in Delete the OIG domain home. Then follow RCU schema creation onwards to recreate the RCU schema, kubernetes secrets for domain and RCU, the persistent volume and the persistent volume claim. Then execute the OIG domain creation steps again.\n Patch domain failures The instructions in this section relate to problems patching a deployment with a new image as per Patch an image.\n If the OIG domain patching fails when running patch_oig_domain.sh, run the following to diagnose the issue:\n$ kubectl describe domain \u0026lt;domain name\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl describe domain governancedomain -n oigns Using the output you should be able to diagnose the problem and resolve the issue.\nIf the domain is already patched successfully and the script failed at the last step of waiting for pods to come up with the new image, then you do not need to rerun the script again after issue resolution. The pods will come up automatically once you resolve the underlying issue.\n If the script is stuck at the following message for a long time:\n\u0026quot;[INFO] Waiting for weblogic pods to be ready..This may take several minutes, do not close the window. Check log /scratch/OIGK8Slatest/fmw-kubernetes/OracleIdentityGovernance/kubernetes/domain-lifecycle/log/oim_patch_log-\u0026lt;DATE\u0026gt;/monitor_weblogic_pods.log for progress\u0026quot; run the following command to diagnose the issue:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oigns Run the following to check the logs of the AdminServer, SOA server or OIM server pods, as there may be an issue that is not allowing the domain pods to start properly:\n$ kubectl logs \u0026lt;pod\u0026gt; -n oigns If the above does not glean any information you can also run:\n$ kubectl describe pod \u0026lt;pod\u0026gt; -n oigns Further diagnostic logs can also be found under the $WORKDIR/kubernetes/domain-lifecycle.\nOnce any issue is resolved the pods will come up automatically without the need to rerun the script.\n " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/", + "title": "Oracle Identity Management on Kubernetes", + "tags": [], + "description": "This document lists all the Oracle Identity Management products deployment supported on Kubernetes.", + "content": "Oracle Fusion Middleware on Kubernetes Oracle supports the deployment of the following Oracle Identity Management products on Kubernetes. Click on the appropriate document link below to get started on configuring the product.\nPlease note the following:\n The individual product guides below for Oracle Access Management, Oracle Identity Governance, Oracle Unified Directory, and Oracle Unified Directory Services Manager, are for configuring that product on a Kubernetes cluster where no other Oracle Identity Management products will be deployed. For example, if you are deploying Oracle Access Management (OAM) only, then you can follow the Oracle Access Management guide. If you are deploying multiple Oracle Identity Management products on the same Kubernetes cluster, then you must follow the Enterprise Deployment Guide outlined in Enterprise Deployments. Please note, you also have the option to follow the Enterprise Deployment Guide even if you are only installing one product, such as OAM for example.\n The individual product guides do not explain how to configure a Kubernetes cluster given the product can be deployed on any compliant Kubernetes vendor. If you need to understand how to configure a Kubernetes cluster ready for an Oracle Identity Management deployment, you should follow the Enterprise Deployment Guide in Enterprise Deployments.\n The Enterprise Deployment Automation section also contains details on automation scripts that can:\n Automate the creation of a Kubernetes cluster on Oracle Cloud Infrastructure (OCI), ready for the deployment of Oracle Identity Management products. Automate the deployment of Oracle Identity Management products on any compliant Kubernetes cluster. Enterprise Deployments The complete Oracle Identity Management suite can be deployed in a production environment\n Oracle Access Management The WebLogic Kubernetes Operator supports deployment of Oracle Access Management (OAM).\n Oracle Identity Governance The WebLogic Kubernetes Operator supports deployment of Oracle Identity Governance (OIG).\n Oracle Internet Directory Oracle Internet Directory provides a comprehensive Directory Solution for robust Identity Management\n Oracle Unified Directory Oracle Unified Directory provides a comprehensive Directory Solution for robust Identity Management\n Oracle Unified Directory Services Manager Oracle Unified Directory Services Manager provides an interface for managing instances of Oracle Unified Directory\n " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oud/manage-oud-containers/scaling-up-down/", + "title": "a) Scaling Up/Down OUD Pods ", + "tags": [], + "description": "Describes the steps for scaling up/down for OUD pods.", + "content": "Introduction This section describes how to increase or decrease the number of OUD pods in the Kubernetes deployment.\nNote: The instructions below are for scaling servers up or down manually. If you wish to use autoscaling, see Kubernetes Horizontal Pod Autoscaler. Please note, if you have enabled autoscaling, it is recommended to delete the autoscaler before running the commands below.\nView existing OUD pods By default the oud-ds-rs helm chart deployment starts three pods: oud-ds-rs-0 and two replica pods oud-ds-rs-1 and oud-ds-rs-2.\nThe number of pods started is determined by the replicaCount, which is set to 3 by default. A value of 3 starts the three pods above.\nTo scale up or down the number of OUD pods, set replicaCount accordingly.\nRun the following command to view the number of pods in the OUD deployment:\n$ kubectl --namespace \u0026lt;namespace\u0026gt; get pods -o wide For example:\n$ kubectl --namespace oudns get pods -o wide The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/oud-ds-rs-0 1/1 Running 0 22h 10.244.0.195 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oud-ds-rs-1 1/1 Running 0 22h 10.244.0.194 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oud-ds-rs-2 1/1 Running 0 22h 10.244.0.193 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; Scaling up OUD pods In this example, replicaCount is increased to 4 which creates a new OUD pod oud-ds-rs-3 with associated services created.\nYou can scale up the number of OUD pods using one of the following methods:\n Using a YAML file Using --set argument Using a YAML file Navigate to the $WORKDIR/kubernetes/helm directory:\n$ cd $WORKDIR/kubernetes/helm Create a oud-scaleup-override.yaml file that contains:\nreplicaCount: 4 Run the following command to scale up the OUD pods:\n$ helm upgrade --namespace \u0026lt;namespace\u0026gt; \\ --values oud-scaleup-override.yaml \\ \u0026lt;release_name\u0026gt; oud-ds-rs --reuse-values For example:\n$ helm upgrade --namespace oudns \\ --values oud-scaleup-override.yaml \\ oud-ds-rs oud-ds-rs --reuse-values Using --set argument Run the following command to scale up the OUD pods:\n$ helm upgrade --namespace \u0026lt;namespace\u0026gt; \\ --set replicaCount=4 \\ \u0026lt;release_name\u0026gt; oud-ds-rs --reuse-values For example:\n$ helm upgrade --namespace oudns \\ --set replicaCount=4 \\ oud-ds-rs oud-ds-rs --reuse-values Verify the pods Verify the new OUD pod oud-ds-rs-3 and has started:\n$ kubectl get pod,service -o wide -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl get pods,service -n oudns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/oud-ds-rs-0 1/1 Running 0 22h 10.244.0.195 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oud-ds-rs-1 1/1 Running 0 22h 10.244.0.194 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oud-ds-rs-2 1/1 Running 0 22h 10.244.0.193 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oud-ds-rs-3 1/1 Running 0 17m 10.244.0.193 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR service/oud-ds-rs ClusterIP None \u0026lt;none\u0026gt; 1444/TCP,1888/TCP,1389/TCP,1636/TCP,1080/TCP,1081/TCP,1898/TCP 22h app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs service/oud-ds-rs-0 ClusterIP None \u0026lt;none\u0026gt; 1444/TCP,1888/TCP,1898/TCP 22h app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-0 service/oud-ds-rs-1 ClusterIP None \u0026lt;none\u0026gt; 1444/TCP,1888/TCP,1898/TCP 22h app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-1 service/oud-ds-rs-2 ClusterIP None \u0026lt;none\u0026gt; 1444/TCP,1888/TCP,1898/TCP 22h app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-2 service/oud-ds-rs-3 ClusterIP None \u0026lt;none\u0026gt; 1444/TCP,1888/TCP,1898/TCP 9m9s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-3 service/oud-ds-rs-http-0 ClusterIP 10.104.112.93 \u0026lt;none\u0026gt; 1080/TCP,1081/TCP 22h app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-0 service/oud-ds-rs-http-1 ClusterIP 10.103.105.70 \u0026lt;none\u0026gt; 1080/TCP,1081/TCP 22h app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-1 service/oud-ds-rs-http-2 ClusterIP 10.110.160.107 \u0026lt;none\u0026gt; 1080/TCP,1081/TCP 22h app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-2 service/oud-ds-rs-http-3 ClusterIP 10.102.93.179 \u0026lt;none\u0026gt; 1080/TCP,1081/TCP 9m9s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-3 service/oud-ds-rs-lbr-admin ClusterIP 10.99.238.222 \u0026lt;none\u0026gt; 1888/TCP,1444/TCP 22h app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs service/oud-ds-rs-lbr-http ClusterIP 10.101.250.196 \u0026lt;none\u0026gt; 1080/TCP,1081/TCP 22h app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs service/oud-ds-rs-lbr-ldap ClusterIP 10.104.149.90 \u0026lt;none\u0026gt; 1389/TCP,1636/TCP 22h app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs service/oud-ds-rs-ldap-0 ClusterIP 10.109.255.221 \u0026lt;none\u0026gt; 1389/TCP,1636/TCP 22h app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-0 service/oud-ds-rs-ldap-1 ClusterIP 10.111.135.142 \u0026lt;none\u0026gt; 1389/TCP,1636/TCP 22h app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-1 service/oud-ds-rs-ldap-2 ClusterIP 10.100.8.145 \u0026lt;none\u0026gt; 1389/TCP,1636/TCP 22h app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-2 service/oud-ds-rs-ldap-3 ClusterIP 10.111.177.46 \u0026lt;none\u0026gt; 1389/TCP,1636/TCP 9m9s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-3 Note: It will take several minutes before all the services listed above show. While the oud-ds-rs-3 pod has a STATUS of 0/1 the pod is started but the OUD server associated with it is currently starting. While the pod is starting you can check the startup status in the pod log, by running the following command:\n$ kubectl logs oud-ds-rs-3 -n oudns Scaling down OUD pods Scaling down OUD pods is performed in exactly the same as in Scaling up OUD pods except the replicaCount is reduced to the required number of pods.\nOnce the kubectl command is executed the pod(s) will move to a Terminating state. In the example below replicaCount was reduced from 4 to 3 and hence oud-ds-rs-3 has moved to Terminating:\n$ kubectl get pods -n oudns NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/oud-ds-rs-0 1/1 Running 0 22h 10.244.0.195 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oud-ds-rs-1 1/1 Running 0 22h 10.244.0.194 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oud-ds-rs-2 1/1 Running 0 22h 10.244.0.193 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oud-ds-rs-3 1/1 Terminating 0 21m 10.244.0.193 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; The pod will take a minute or two to stop and then will disappear:\n$ kubectl get pods -n oudns NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/oud-ds-rs-0 1/1 Running 0 22h 10.244.0.195 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oud-ds-rs-1 1/1 Running 0 22h 10.244.0.194 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oud-ds-rs-2 1/1 Running 0 22h 10.244.0.193 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oudsm/manage-oudsm-containers/scaling-up-down/", + "title": "a) Scaling Up/Down OUDSM Pods ", + "tags": [], + "description": "Describes the steps for scaling up/down for OUDSM pods.", + "content": "Introduction This section describes how to increase or decrease the number of OUDSM pods in the Kubernetes deployment.\nView existing OUDSM pods By default the oudsm helm chart deployment starts one pod: oudsm-1.\nThe number of pods started is determined by the replicaCount, which is set to 1 by default. A value of 1 starts the pod above.\nTo scale up or down the number of OUDSM pods, set replicaCount accordingly.\nRun the following command to view the number of pods in the OUDSM deployment:\n$ kubectl --namespace \u0026lt;namespace\u0026gt; get pods -o wide For example:\n$ kubectl --namespace oudsmns get pods -o wide The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/oudsm-1 1/1 Running 0 73m 10.244.0.19 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; Scaling up OUDSM pods In this example, replicaCount is increased to 2 which creates a new OUDSM pod oudsm-2 with associated services created.\nYou can scale up the number of OUDSM pods using one of the following methods:\n Using a YAML file Using --set argument Using a YAML file Navigate to the $WORKDIR/kubernetes/helm directory:\n$ cd $WORKDIR/kubernetes/helm Create a oudsm-scaleup-override.yaml file that contains:\nreplicaCount: 2 Run the following command to scale up the OUDSM pods:\n$ helm upgrade --namespace \u0026lt;namespace\u0026gt; \\ --values oudsm-scaleup-override.yaml \\ \u0026lt;release_name\u0026gt; oudsm --reuse-values For example:\n$ helm upgrade --namespace oudsmns \\ --values oudsm-scaleup-override.yaml \\ oudsm oudsm --reuse-values Using --set argument Run the following command to scale up the OUDSM pods:\n$ helm upgrade --namespace \u0026lt;namespace\u0026gt; \\ --set replicaCount=2 \\ \u0026lt;release_name\u0026gt; oudsm --reuse-values For example:\n$ helm upgrade --namespace oudsmns \\ --set replicaCount=2 \\ oudsm oudsm --reuse-values Verify the pods Verify the new OUDSM pod oudsm-2 has started:\n$ kubectl get pod,service -o wide -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl get pods,service -n oudsmns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/oudsm-1 1/1 Running 0 88m 10.244.0.19 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oudsm-2 1/1 Running 0 15m 10.245.3.45 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR service/oudsm-1 ClusterIP 10.96.108.200 \u0026lt;none\u0026gt; 7001/TCP,7002/TCP 88m app.kubernetes.io/instance=oudsm,app.kubernetes.io/name=oudsm,oudsm/instance=oudsm-1 service/oudsm-2 ClusterIP 10.96.31.201 \u0026lt;none\u0026gt; 7001/TCP,7002/TCP 15m app.kubernetes.io/instance=oudsm,app.kubernetes.io/name=oudsm,oudsm/instance=oudsm-2 service/oudsm-lbr ClusterIP 10.96.41.201 \u0026lt;none\u0026gt; 7001/TCP,7002/TCP 73m app.kubernetes.io/instance=oudsm,app.kubernetes.io/name=oudsm Note: It will take several minutes before all the services listed above show. While the oudsm-2 pod has a STATUS of 0/1 the pod is started but the OUDSM server associated with it is currently starting. While the pod is starting you can check the startup status in the pod log, by running the following command:\n$ kubectl logs oudsm-2 -n oudsmns Scaling down OUDSM pods Scaling down OUDSM pods is performed in exactly the same as in Scaling up OUDSM pods except the replicaCount is reduced to the required number of pods.\nOnce the kubectl command is executed the pod(s) will move to a Terminating state. In the example below replicaCount was reduced from 2 to 1 and hence oudsm-2 has moved to Terminating:\n$ kubectl get pods -n oudsmns NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/oudsm-1 1/1 Running 0 92m 10.244.0.19 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oudsm-2 1/1 Terminating 0 19m 10.245.3.45 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; The pod will take a minute or two to stop and then will disappear:\n$ kubectl get pods -n oudsmns NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/oudsm-1 1/1 Running 0 94m 10.244.0.19 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oam/manage-oam-domains/domain-lifecycle/", + "title": "a. Domain Life Cycle", + "tags": [], + "description": "Learn about the domain life cycle of an OAM domain.", + "content": " View existing OAM servers Starting/Scaling up OAM Managed servers Stopping/Scaling down OAM Managed servers Starting/Scaling up OAM Policy Managed servers Stopping/Scaling down OAM Policy Managed servers Stopping and starting the Administration Server and Managed Servers Domain lifecycle sample scripts As OAM domains use the WebLogic Kubernetes Operator, domain lifecyle operations are managed using the WebLogic Kubernetes Operator itself.\nThis document shows the basic operations for starting, stopping and scaling servers in the OAM domain.\nFor more detailed information refer to Domain Life Cycle in the WebLogic Kubernetes Operator documentation.\nDo not use the WebLogic Server Administration Console or Oracle Enterprise Manager Console to start or stop servers.\n Note: The instructions below are for starting, stopping, or scaling servers manually. If you wish to use autoscaling, see Kubernetes Horizontal Pod Autoscaler. Please note, if you have enabled autoscaling, it is recommended to delete the autoscaler before running the commands below.\nView existing OAM servers The default OAM deployment starts the Administration Server (AdminServer), one OAM Managed Server (oam_server1) and one OAM Policy Manager server (oam_policy_mgr1).\nThe deployment also creates, but doesn\u0026rsquo;t start, four extra OAM Managed Servers (oam-server2 to oam-server5) and four more OAM Policy Manager servers (oam_policy_mgr2 to oam_policy_mgr5).\nAll these servers are visible in the WebLogic Server Console https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console by navigating to Domain Structure \u0026gt; oamcluster \u0026gt; Environment \u0026gt; Servers.\nTo view the running servers using kubectl, run the following command:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oamns The output should look similar to the following:\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Running 0 3h29m accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 3h36m accessdomain-oam-policy-mgr1 1/1 Running 0 3h21m accessdomain-oam-server1 1/1 Running 0 3h21m helper 1/1 Running 0 3h51m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 55m Starting/Scaling up OAM Managed Servers The number of OAM Managed Servers running is dependent on the replicas parameter configured for the oam-cluster. To start more OAM Managed Servers perform the following steps:\n Run the following kubectl command to edit the oam-cluster:\n$ kubectl edit cluster accessdomain-oam-cluster -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl edit cluster accessdomain-oam-cluster -n oamns Note: This opens an edit session for the oam-cluster where parameters can be changed using standard vi commands.\n In the edit session, search for spec:, and then look for the replicas parameter under clusterName: oam_cluster. By default the replicas parameter is set to \u0026ldquo;1\u0026rdquo; hence one OAM Managed Server is started (oam_server1):\n... spec: clusterName: oam_cluster replicas: 1 serverPod: env: - name: USER_MEM_ARGS value: -XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m ... To start more OAM Managed Servers, increase the replicas value as desired. In the example below, two more managed servers will be started by setting replicas to \u0026ldquo;3\u0026rdquo;:\n... spec: clusterName: oam_cluster replicas: 3 serverPod: env: - name: USER_MEM_ARGS value: -XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m ... Save the file and exit (:wq!)\nThe output will look similar to the following:\ncluster.weblogic.oracle/accessdomain-oam-cluster edited Run the following kubectl command to view the pods:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oamns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Running 0 3h33m accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 3h40m accessdomain-oam-policy-mgr1 1/1 Running 0 3h25m accessdomain-oam-server1 1/1 Running 0 3h25m accessdomain-oam-server2 0/1 Running 0 3h25m accessdomain-oam-server3 0/1 Pending 0 9s helper 1/1 Running 0 3h55m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 59m Two new pods (accessdomain-oam-server2 and accessdomain-oam-server3) are started, but currently have a READY status of 0/1. This means oam_server2 and oam_server3 are not currently running but are in the process of starting. The servers will take several minutes to start so keep executing the command until READY shows 1/1:\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Running 0 3h37m accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 3h43m accessdomain-oam-policy-mgr1 1/1 Running 0 3h29m accessdomain-oam-server1 1/1 Running 0 3h29m accessdomain-oam-server2 1/1 Running 0 3h29m accessdomain-oam-server3 1/1 Running 0 3m45s helper 1/1 Running 0 3h59m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 63m Note: To check what is happening during server startup when READY is 0/1, run the following command to view the log of the pod that is starting:\n$ kubectl logs \u0026lt;pod\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl logs accessdomain-oam-server3 -n oamns Stopping/Scaling down OAM Managed Servers As mentioned in the previous section, the number of OAM Managed Servers running is dependent on the replicas parameter configured for the cluster. To stop one or more OAM Managed Servers, perform the following:\n Run the following kubectl command to edit the oam-cluster:\n$ kubectl edit cluster accessdomain-oam-cluster -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl edit cluster accessdomain-oam-cluster -n oamns In the edit session, search for spec:, and then look for the replicas parameter under clusterName: oam_cluster. In the example below replicas is set to \u0026ldquo;3\u0026rdquo;, hence three OAM Managed Servers are started (access-domain-oam_server1 - access-domain-oam_server3):\n... spec: clusterName: oam_cluster replicas: 3 serverPod: env: - name: USER_MEM_ARGS value: -XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m ... To stop OAM Managed Servers, decrease the replicas value as desired. In the example below, we will stop two managed servers by setting replicas to \u0026ldquo;1\u0026rdquo;:\nspec: clusterName: oam_cluster replicas: 1 serverPod: env: - name: USER_MEM_ARGS value: -XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m ... Save the file and exit (:wq!)\n Run the following kubectl command to view the pods:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oamns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Running 0 3h45m accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 3h51m accessdomain-oam-policy-mgr1 1/1 Running 0 3h37m accessdomain-oam-server1 1/1 Running 0 3h37m accessdomain-oam-server2 1/1 Running 0 3h37m accessdomain-oam-server3 1/1 Terminating 0 11m helper 1/1 Running 0 4h6m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 71m One pod now has a STATUS of Terminating (accessdomain-oam-server3). The server will take a minute or two to stop. Once terminated the other pod (accessdomain-oam-server2) will move to Terminating and then stop. Keep executing the command until the pods have disappeared:\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Running 0 3h48m accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 3h54m accessdomain-oam-policy-mgr1 1/1 Running 0 3h40m accessdomain-oam-server1 1/1 Running 0 3h40m helper 1/1 Running 0 4h9m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 74m Starting/Scaling up OAM Policy Managed Servers The number of OAM Policy Managed Servers running is dependent on the replicas parameter configured for the policy-cluster. To start more OAM Policy Managed Servers perform the following steps:\n Run the following kubectl command to edit the policy-cluster:\n$ kubectl edit cluster accessdomain-policy-cluster -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl edit cluster accessdomain-policy-cluster -n oamns Note: This opens an edit session for the policy-cluster where parameters can be changed using standard vi commands.\n In the edit session, search for spec:, and then look for the replicas parameter under clusterName: policy_cluster. By default the replicas parameter is set to \u0026ldquo;1\u0026rdquo; hence one OAM Policy Managed Server is started (oam_policy_mgr1):\n... spec: clusterName: policy_cluster replicas: 1 serverService: precreateService: true ... To start more OAM Policy Managed Servers, increase the replicas value as desired. In the example below, two more managed servers will be started by setting replicas to \u0026ldquo;3\u0026rdquo;:\n... spec: clusterName: policy_cluster replicas: 3 serverService: precreateService: true ... Save the file and exit (:wq!)\nThe output will look similar to the following:\ncluster.weblogic.oracle/accessdomain-policy-cluster edited After saving the changes two new pods will be started (accessdomain-oam-policy-mgr2 and accessdomain-oam-policy-mgr3). After a few minutes they will have a READY status of 1/1. In the example below accessdomain-oam-policy-mgr2 and accessdomain-oam-policy-mgr3 are started:\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Running 0 3h43m accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 3h49m accessdomain-oam-policy-mgr1 1/1 Running 0 3h35m accessdomain-oam-policy-mgr2 1/1 Running 0 3h35m accessdomain-oam-policy-mgr3 1/1 Running 0 4m18s accessdomain-oam-server1 1/1 Running 0 3h35m helper 1/1 Running 0 4h4m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 69m Stopping/Scaling down OAM Policy Managed Servers As mentioned in the previous section, the number of OAM Policy Managed Servers running is dependent on the replicas parameter configured for the cluster. To stop one or more OAM Policy Managed Servers, perform the following:\n Run the following kubectl command to edit the policy-cluster:\n$ kubectl edit cluster accessdomain-policy-cluster -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl edit cluster accessdomain-policy-cluster -n oamns In the edit session, search for spec:, and then look for the replicas parameter under clusterName: policy_cluster. To stop OAM Policy Managed Servers, decrease the replicas value as desired. In the example below, we will stop two managed servers by setting replicas to \u0026ldquo;1\u0026rdquo;:\n... spec: clusterName: policy_cluster replicas: 1 serverService: precreateService: true ... After saving the changes one pod will move to a STATUS of Terminating (accessdomain-oam-policy-mgr3).\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Running 0 3h49m accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 3h55m accessdomain-oam-policy-mgr1 1/1 Running 0 3h41m accessdomain-oam-policy-mgr2 1/1 Running 0 3h41m accessdomain-oam-policy-mgr3 1/1 Terminating 0 10m accessdomain-oam-server1 1/1 Running 0 3h41m helper 1/1 Running 0 4h11m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 75m The pods will take a minute or two to stop, so keep executing the command until the pods has disappeared:\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Running 0 3h50m accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 3h57m accessdomain-oam-policy-mgr1 1/1 Running 0 3h42m accessdomain-oam-server1 1/1 Running 0 3h42m helper 1/1 Running 0 4h12m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 76m Stopping and Starting the Administration Server and Managed Servers To stop all the OAM Managed Servers and the Administration Server in one operation:\n Run the following kubectl command to edit the domain:\n$ kubectl edit domain \u0026lt;domain_uid\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl edit domain accessdomain -n oamns In the edit session, search for serverStartPolicy: IfNeeded under the domain spec:\n... volumeMounts: - mountPath: /u01/oracle/user_projects/domains name: weblogic-domain-storage-volume volumes: - name: weblogic-domain-storage-volume persistentVolumeClaim: claimName: accessdomain-domain-pvc serverStartPolicy: IfNeeded webLogicCredentialsSecret: name: accessdomain-credentials ... Change serverStartPolicy: IfNeeded to Never as follows:\n... volumeMounts: - mountPath: /u01/oracle/user_projects/domains name: weblogic-domain-storage-volume volumes: - name: weblogic-domain-storage-volume persistentVolumeClaim: claimName: accessdomain-domain-pvc serverStartPolicy: Never webLogicCredentialsSecret: name: accessdomain-credentials ... Save the file and exit (:wq!).\n Run the following kubectl command to view the pods:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oamns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Terminating 0 3h52m accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 3h59m accessdomain-oam-policy-mgr1 1/1 Terminating 0 3h44m accessdomain-oam-server1 1/1 Terminating 0 3h44m helper 1/1 Running 0 4h14m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 78m The Administration Server pods and Managed Server pods will move to a STATUS of Terminating. After a few minutes, run the command again and the pods should have disappeared:\nNAME READY STATUS RESTARTS AGE accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 4h helper 1/1 Running 0 4h15m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 80m To start the Administration Server and Managed Servers up again, repeat the previous steps but change serverStartPolicy: Never to IfNeeded as follows:\n... volumeMounts: - mountPath: /u01/oracle/user_projects/domains name: weblogic-domain-storage-volume volumes: - name: weblogic-domain-storage-volume persistentVolumeClaim: claimName: accessdomain-domain-pvc serverStartPolicy: IfNeeded webLogicCredentialsSecret: name: accessdomain-credentials ... Run the following kubectl command to view the pods:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oamns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 4h1m accessdomain-introspector-jwqxw 1/1 Running 0 10s helper 1/1 Running 0 4h17m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 81m The Administration Server pod will start followed by the OAM Managed Servers pods. This process will take several minutes, so keep executing the command until all the pods are running with READY status 1/1 :\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Running 0 10m accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 4h12m accessdomain-oam-policy-mgr1 1/1 Running 0 7m35s accessdomain-oam-server1 1/1 Running 0 7m35s helper 1/1 Running 0 4h28m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 92m Domain lifecycle sample scripts The WebLogic Kubernetes Operator provides sample scripts to start up or shut down a specific Managed Server or cluster in a deployed domain, or the entire deployed domain.\nNote: Prior to running these scripts, you must have previously created and deployed the domain.\nThe scripts are located in the $WORKDIR/kubernetes/domain-lifecycle directory. For more information, see the README.\n" +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oig/manage-oig-domains/domain-lifecycle/", + "title": "a. Domain life cycle", + "tags": [], + "description": "Learn about the domain lifecycle of an OIG domain.", + "content": " View existing OIG servers Starting/Scaling up OIG Managed servers Stopping/Scaling down OIG Managed servers Stopping and starting the Administration Server and Managed Servers Domain lifecycle sample scripts As OIG domains use the WebLogic Kubernetes Operator, domain lifecyle operations are managed using the WebLogic Kubernetes Operator itself.\nThis document shows the basic operations for starting, stopping and scaling servers in the OIG domain.\nFor more detailed information refer to Domain Life Cycle in the WebLogic Kubernetes Operator documentation.\nDo not use the WebLogic Server Administration Console or Oracle Enterprise Manager Console to start or stop servers.\n Note: The instructions below are for starting, stopping, or scaling servers manually. If you wish to use autoscaling, see Kubernetes Horizontal Pod Autoscaler. Please note, if you have enabled autoscaling, it is recommended to delete the autoscaler before running the commands below.\nView existing OIG Servers The default OIG deployment starts the Administration Server (AdminServer), one OIG Managed Server (oim_server1) and one SOA Managed Server (soa_server1).\nThe deployment also creates, but doesn\u0026rsquo;t start, four extra OIG Managed Servers (oim-server2 to oim-server5) and four more SOA Managed Servers (soa_server2 to soa_server5).\nAll these servers are visible in the WebLogic Server Administration Console https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console by navigating to Domain Structure \u0026gt; governancedomain \u0026gt; Environment \u0026gt; Servers.\nTo view the running servers using kubectl, run the following command:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oigns The output should look similar to the following:\nNAME READY STATUS RESTARTS AGE governancedomain-adminserver 1/1 Running 0 23h governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 24h governancedomain-oim-server1 1/1 Running 0 23h governancedomain-soa-server1 1/1 Running 0 23h Starting/Scaling up OIG Managed Servers The number of OIG Managed Servers running is dependent on the replicas parameter configured for the cluster. To start more OIG Managed Servers perform the following steps:\n Run the following kubectl command to edit the oim_cluster:\n$ kubectl edit cluster \u0026lt;cluster_name\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl edit cluster governancedomain-oim-cluster -n oigns Note: This opens an edit session for the domain where parameters can be changed using standard vi commands.\n In the edit session, search for spec:, and then look for the replicas parameter under clusterName: oim_cluster. By default the replicas parameter is set to \u0026ldquo;1\u0026rdquo; hence a single OIG Managed Server is started (oim_server1):\nspec: clusterName: oim_cluster replicas: 1 serverPod: env: - name: USER_MEM_ARGS value: -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m ... To start more OIG Managed Servers, increase the replicas value as desired. In the example below, one more Managed Server will be started by setting replicas to \u0026ldquo;2\u0026rdquo;:\nspec: clusterName: oim_cluster replicas: 2 serverPod: env: - name: USER_MEM_ARGS value: -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m ... Save the file and exit (:wq)\nThe output will look similar to the following:\ncluster.weblogic.oracle/governancedomain-oim-cluster edited Run the following kubectl command to view the pods:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oigns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE governancedomain-adminserver 1/1 Running 0 23h governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 24h governancedomain-oim-server1 1/1 Running 0 23h governancedomain-oim-server2 0/1 Running 0 7s governancedomain-soa-server1 1/1 Running 0 23h One new pod (governancedomain-oim-server2) is started, but currently has a READY status of 0/1. This means oim_server2 is not currently running but is in the process of starting. The server will take several minutes to start so keep executing the command until READY shows 1/1:\nNAME READY STATUS RESTARTS AGE governancedomain-adminserver 1/1 Running 0 23h governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 24h governancedomain-oim-server1 1/1 Running 0 23h governancedomain-oim-server2 1/1 Running 0 5m27s governancedomain-soa-server1 1/1 Running 0 23h Note: To check what is happening during server startup when READY is 0/1, run the following command to view the log of the pod that is starting:\n$ kubectl logs \u0026lt;pod\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl logs governancedomain-oim-server2 -n oigns Stopping/Scaling down OIG Managed Servers As mentioned in the previous section, the number of OIG Managed Servers running is dependent on the replicas parameter configured for the cluster. To stop one or more OIG Managed Servers, perform the following:\n Run the following kubectl command to edit the oim_cluster:\n$ kubectl edit cluster \u0026lt;cluster_name\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl edit cluster governancedomain-oim-cluster -n oigns In the edit session, search for spec:, and then look for the replicas parameter under clusterName: oim_cluster. In the example below replicas is set to \u0026ldquo;2\u0026rdquo; hence two OIG Managed Servers are started (oim_server1 and oim_server2):\nspec: clusterName: oim_cluster replicas: 2 serverPod: env: - name: USER_MEM_ARGS value: -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m ... To stop OIG Managed Servers, decrease the replicas value as desired. In the example below, we will stop one Managed Server by setting replicas to \u0026ldquo;1\u0026rdquo;:\nspec: clusterName: oim_cluster replicas: 1 serverPod: env: - name: USER_MEM_ARGS value: -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m ... Save the file and exit (:wq)\n Run the following kubectl command to view the pods:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oigns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE governancedomain-adminserver 1/1 Running 0 23h governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 24h governancedomain-oim-server1 1/1 Running 0 23h governancedomain-oim-server2 1/1 Terminating 0 7m30s governancedomain-soa-server1 1/1 Running 0 23h The exiting pod shows a STATUS of Terminating (governancedomain-oim-server2). The server may take a minute or two to stop, so keep executing the command until the pod has disappeared:\nNAME READY STATUS RESTARTS AGE governancedomain-adminserver 1/1 Running 0 23h governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 24h governancedomain-oim-server1 1/1 Running 0 23h governancedomain-soa-server1 1/1 Running 0 23h Stopping and Starting the Administration Server and Managed Servers To stop all the OIG Managed Servers and the Administration Server in one operation:\n Run the following kubectl command to edit the domain:\n$ kubectl edit domain \u0026lt;domain_uid\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl edit domain governancedomain -n oigns In the edit session search for serverStartPolicy: IfNeeded under the domain spec:\n... volumeMounts: - mountPath: /u01/oracle/user_projects/domains name: weblogic-domain-storage-volume volumes: - name: weblogic-domain-storage-volume persistentVolumeClaim: claimName: governancedomain-domain-pvc serverStartPolicy: IfNeeded webLogicCredentialsSecret: name: oig-domain-credentials ... Change serverStartPolicy: IfNeeded to Never as follows:\n ... volumeMounts: - mountPath: /u01/oracle/user_projects/domains name: weblogic-domain-storage-volume volumes: - name: weblogic-domain-storage-volume persistentVolumeClaim: claimName: governancedomain-domain-pvc serverStartPolicy: Never webLogicCredentialsSecret: name: oig-domain-credentials ... Save the file and exit (:wq).\n Run the following kubectl command to view the pods:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oigns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE governancedomain-adminserver 1/1 Terminating 0 23h governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 24h governancedomain-oim-server1 1/1 Terminating 0 23h governancedomain-soa-server1 1/1 Terminating 0 23h The AdminServer pod and Managed Server pods will move to a STATUS of Terminating. After a few minutes, run the command again and the pods should have disappeared:\nNAME READY STATUS RESTARTS AGE governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 24h To start the Administration Server and Managed Servers up again, repeat the previous steps but change serverStartPolicy: Never to IfNeeded as follows:\n ... volumeMounts: - mountPath: /u01/oracle/user_projects/domains name: weblogic-domain-storage-volume volumes: - name: weblogic-domain-storage-volume persistentVolumeClaim: claimName: governancedomain-domain-pvc serverStartPolicy: IfNeeded webLogicCredentialsSecret: name: oig-domain-credentials ... Run the following kubectl command to view the pods:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oigns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE governancedomain-adminserver 0/1 Running 0 4s governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 24h The Administration Server pod will start followed by the OIG Managed Servers pods. This process will take several minutes, so keep executing the command until all the pods are running with READY status 1/1 :\nNAME READY STATUS RESTARTS AGE governancedomain-adminserver 1/1 Running 0 6m57s governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 24h governancedomain-oim-server1 1/1 Running 0 4m33s governancedomain-soa-server1 1/1 Running 0 4m33s Domain lifecycle sample scripts The WebLogic Kubernetes Operator provides sample scripts to start up or shut down a specific Managed Server or cluster in a deployed domain, or the entire deployed domain.\nNote: Prior to running these scripts, you must have previously created and deployed the domain.\nThe scripts are located in the $WORKDIR/kubernetes/domain-lifecycle directory. For more information, see the README.\n" +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/enterprise-deployments/enterprise-deployment-guide/", + "title": "a. Enterprise Deployment Guide", + "tags": [], + "description": "The Enterprise Deployment Guide shows how to deploy the entire Oracle Identity Management suite in a production environment", + "content": "Enterprise Deployment Guide The Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster is a step by step guide that describes how to deploy the entire Oracle Identity and Access Management Suite in a production environment. It incorporates best practices learned over many years to ensure that your Identity and Access Management deployment maintains the highest levels of Availability and Security.\nIt includes:\n Preparing your On-premises Kubernetes, or Oracle Cloud Infrastructure Container Engine for Kubernetes (OCI OKE), for an Identity Management (IDM) Deployment. Deploying and configuring Oracle Unified Directory (OUD) seeding data needed by other IDM products. Deploying and Configuring an Ingress Controller. Deploying and Configuring the WebLogic Kubernetes Operator Deploying and Configuring Oracle Access Management (OAM) and integrating with OUD. Deploying and Configuring Oracle Identity Governance (OIG) and integrating with OUD and OAM. Deploying and Configuring Oracle Identity Role Intelligence (OIRI) and integrating with OIG. Deploying and configuring Oracle Advanced Authentication (OAA) and Oracle Adaptive Risk Management (OARM) and integrating with OAM. Deploying and Configuring Monitoring and Centralised logging and configuring IDM to send monitoring and logging information to it. Additionally, as per Enterprise Deployment Automation, all of the above can be automated using open source scripts.\n" +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oudsm/patch-and-upgrade/patch-an-oudsm-image/", + "title": "a. Patch an image", + "tags": [], + "description": "Instructions on how to update your OUDSM Kubernetes cluster with a new OUDSM container image.", + "content": "Introduction In this section the Oracle Unified Directory Services Manager (OUDSM) deployment is updated with a new OUDSM container image.\nNote: If you are not using Oracle Container Registry or your own container registry, then you must first load the new container image on all nodes in your Kubernetes cluster.\nYou can update the deployment with a new OUDSM container image using one of the following methods:\n Using a YAML file Using --set argument Using a YAML file Navigate to the $WORKDIR/kubernetes/helm directory:\n$ cd $WORKDIR/kubernetes/helm Create a oudsm-patch-override.yaml file that contains:\nimage: repository: \u0026lt;image_location\u0026gt; tag: \u0026lt;image_tag\u0026gt; imagePullSecrets: - name: orclcred For example:\nimage: repository: container-registry.oracle.com/middleware/oudsm_cpu tag: 12.2.1.4-jdk8-ol7-\u0026lt;October\u0026#39;23\u0026gt; imagePullSecrets: - name: orclcred The following caveats exist:\n If you are not using Oracle Container Registry or your own container registry for your oudsm container image, then you can remove the following:\nimagePullSecrets: - name: orclcred Run the following command to upgrade the deployment:\n$ helm upgrade --namespace \u0026lt;namespace\u0026gt; \\ --values oudsm-patch-override.yaml \\ \u0026lt;release_name\u0026gt; oudsm --reuse-values For example:\n$ helm upgrade --namespace oudsmns \\ --values oudsm-patch-override.yaml \\ oudsm oudsm --reuse-values Using --set argument Navigate to the $WORKDIR/kubernetes/helm directory:\n$ cd $WORKDIR/kubernetes/helm Run the following command to update the deployment with a new OUDSM container image:\n$ helm upgrade --namespace \u0026lt;namespace\u0026gt; \\ --set image.repository=\u0026lt;image_location\u0026gt;,image.tag=\u0026lt;image_tag\u0026gt; \\ --set imagePullSecrets[0].name=\u0026#34;orclcred\u0026#34; \\ \u0026lt;release_name\u0026gt; oudsm --reuse-values For example:\n$ helm upgrade --namespace oudsmns \\ --set image.repository=container-registry.oracle.com/middleware/oudsm_cpu,image.tag=12.2.1.4-jdk8-ol7-\u0026lt;October\u0026#39;23\u0026gt; \\ --set imagePullSecrets[0].name=\u0026#34;orclcred\u0026#34; \\ oudsm oudsm --reuse-values The following caveats exist:\n If you are not using Oracle Container Registry or your own container registry for your OUDSM container image, then you can remove the following: --set imagePullSecrets[0].name=\u0026quot;orclcred\u0026quot;. Verify the pods After updating with the new image the pod will restart. Verify the pod is running:\n$ kubectl --namespace \u0026lt;namespace\u0026gt; get pods For example:\n$ kubectl --namespace oudsmns get pods The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/oudsm-1 1/1 Running 0 73m 10.244.0.19 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; Note: It will take several minutes before the pod starts. While the oudsm pods have a STATUS of 0/1 the pod is started but the OUDSM server associated with it is currently starting. While the pod is starting you can check the startup status in the pod logs, by running the following command:\n Verify the pod is using the new image by running the following command:\n$ kubectl describe pod \u0026lt;pod\u0026gt; -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl describe pod oudsm-1 -n oudsmns The output will look similar to the following:\nName: oudsm-1 Namespace: oudsmns Priority: 0 Node: \u0026lt;worker-node\u0026gt;/100.102.48.28 Start Time: \u0026lt;DATE\u0026gt; Labels: app.kubernetes.io/instance=oudsm app.kubernetes.io/managed-by=Helm app.kubernetes.io/name=oudsm app.kubernetes.io/version=12.2.1.4.0 helm.sh/chart=oudsm-0.1 oudsm/instance=oudsm-1 Annotations: meta.helm.sh/release-name: oudsm meta.helm.sh/release-namespace: oudsmns Status: Running IP: 10.244.1.90 etc... Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Killing 22m kubelet Container oudsm definition changed, will be restarted Normal Created 21m (x2 over 61m) kubelet Created container oudsm Normal Pulling 21m kubelet Container image \u0026#34;container-registry.oracle.com/middleware/oudsm_cpu:12.2.1.4-jdk8-ol7-\u0026lt;October\u0026#39;23\u0026gt;\u0026#34; Normal Started 21m (x2 over 61m) kubelet Started container oudsm " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oig/post-install-config/set_oimfronendurl_using_mbeans/", + "title": "a. Post Install Tasks", + "tags": [], + "description": "Perform post install tasks.", + "content": "Follow these post install configuration steps.\n Create a Server Overrides File Set OIMFrontendURL using MBeans Create a Server Overrides File Navigate to the following directory:\ncd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv/output/weblogic-domains/governancedomain Create a setUserOverrides.sh with the following contents:\nDERBY_FLAG=false JAVA_OPTIONS=\u0026quot;${JAVA_OPTIONS} -Djava.net.preferIPv4Stack=true\u0026quot; MEM_ARGS=\u0026quot;-Xms8192m -Xmx8192m\u0026quot; Copy the setUserOverrides.sh file to the Administration Server pod:\n$ chmod 755 setUserOverrides.sh $ kubectl cp setUserOverrides.sh oigns/governancedomain-adminserver:/u01/oracle/user_projects/domains/governancedomain/bin/setUserOverrides.sh Where oigns is the OIG namespace and governancedomain is the domain_UID.\n Stop the OIG domain using the following command:\n$ kubectl -n \u0026lt;domain_namespace\u0026gt; patch domains \u0026lt;domain_uid\u0026gt; --type=\u0026#39;json\u0026#39; -p=\u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/serverStartPolicy\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;Never\u0026#34; }]\u0026#39; For example:\n$ kubectl -n oigns patch domains governancedomain --type=\u0026#39;json\u0026#39; -p=\u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/serverStartPolicy\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;Never\u0026#34; }]\u0026#39; The output will look similar to the following:\ndomain.weblogic.oracle/governancedomain patched Check that all the pods are stopped:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oigns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE governancedomain-adminserver 1/1 Terminating 0 18h governancedomain-create-fmw-infra-domain-job-8cww8 0/1 Completed 0 24h governancedomain-oim-server1 1/1 Terminating 0 18h governancedomain-soa-server1 1/1 Terminating 0 18h helper 1/1 Running 0 41h The Administration Server pods and Managed Server pods will move to a STATUS of Terminating. After a few minutes, run the command again and the pods should have disappeared:\nNAME READY STATUS RESTARTS AGE governancedomain-create-fmw-infra-domain-job-8cww8 0/1 Completed 0 24h helper 1/1 Running 0 41h Start the domain using the following command:\n$ kubectl -n \u0026lt;domain_namespace\u0026gt; patch domains \u0026lt;domain_uid\u0026gt; --type=\u0026#39;json\u0026#39; -p=\u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/serverStartPolicy\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;IfNeeded\u0026#34; }]\u0026#39; For example:\n$ kubectl -n oigns patch domains governancedomain --type=\u0026#39;json\u0026#39; -p=\u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/serverStartPolicy\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;IfNeeded\u0026#34; }]\u0026#39; Run the following kubectl command to view the pods:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oigns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE governancedomain-create-fmw -infra-domain-job-vj69h 0/1 Completed 0 24h governancedomain-introspect-domain-job-7qx29 1/1 Running 0 8s helper 1/1 Running 0 41h The Administration Server pod will start followed by the OIG Managed Servers pods. This process will take several minutes, so keep executing the command until all the pods are running with READY status 1/1:\nNAME READY STATUS RESTARTS AGE governancedomain-adminserver 1/1 Running 0 6m4s governancedomain-create-fmw-infra-domain-job-vj69h 0/1 Completed 0 24h governancedomain-oim-server1 1/1 Running 0 3m5s governancedomain-soa-server1 1/1 Running 0 3m5s helper 1/1 Running 0 41h Set OIMFrontendURL using MBeans Login to Oracle Enterprise Manager using the following URL:\nhttps://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/em\n Click the Target Navigation icon in the top left of the screen and navigate to the following:\n Expand Identity and Access \u0026gt; Access \u0026gt; OIM \u0026gt; oim Right click the instance oim and select System MBean Browser Under Application Defined MBeans, navigate to oracle.iam, Server:oim_server1 \u0026gt; Application:oim \u0026gt; XMLConfig \u0026gt; Config \u0026gt; XMLConfig.DiscoveryConfig \u0026gt; Discovery. Enter a new value for the OimFrontEndURL attribute, in the format:\n If using an External LoadBalancer for your ingress: https://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-PORT} If using NodePort for your ingress: https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT} If using HTTP instead of HTTPS for your ingress, change the URL appropriately.\nThen click Apply.\n " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oam/patch-and-upgrade/upgrade-an-operator-release/", + "title": "a. Upgrade an operator release", + "tags": [], + "description": "Instructions on how to update the WebLogic Kubernetes Operator version.", + "content": "These instructions apply to upgrading operators from 3.X.X to 4.X, or from within the 4.X release family as additional versions are released.\n On the master node, download the new WebLogic Kubernetes Operator source code from the operator github project:\n$ mkdir \u0026lt;workdir\u0026gt;/weblogic-kubernetes-operator-4.X.X $ cd \u0026lt;workdir\u0026gt;/weblogic-kubernetes-operator-4.X.X $ git clone https://github.com/oracle/weblogic-kubernetes-operator.git --branch v4.X.X For example:\n$ mkdir /scratch/OAMK8S/weblogic-kubernetes-operator-4.X.X $ cd /scratch/OAMK8S/weblogic-kubernetes-operator-4.X.X $ git clone https://github.com/oracle/weblogic-kubernetes-operator.git --branch v4.X.X This will create the directory \u0026lt;workdir\u0026gt;/weblogic-kubernetes-operator-4.X.X/weblogic-kubernetes-operator\n Run the following helm command to upgrade the operator:\n$ cd \u0026lt;workdir\u0026gt;/weblogic-kubernetes-operator-4.X.X/weblogic-kubernetes-operator $ helm upgrade --reuse-values --set image=ghcr.io/oracle/weblogic-kubernetes-operator:4.X.X --namespace \u0026lt;sample-kubernetes-operator-ns\u0026gt; --wait weblogic-kubernetes-operator kubernetes/charts/weblogic-operator For example:\n$ cd /scratch/OAMK8S/weblogic-kubernetes-operator-4.X.X/weblogic-kubernetes-operator $ helm upgrade --reuse-values --set image=ghcr.io/oracle/weblogic-kubernetes-operator:4.X.X --namespace opns --wait weblogic-kubernetes-operator kubernetes/charts/weblogic-operator The output will look similar to the following:\nRelease \u0026quot;weblogic-kubernetes-operator\u0026quot; has been upgraded. Happy Helming! NAME: weblogic-kubernetes-operator LAST DEPLOYED: \u0026lt;DATE\u0026gt; NAMESPACE: opns STATUS: deployed REVISION: 2 TEST SUITE: None Verify that the operator\u0026rsquo;s pod and services are running by executing the following command:\n$ kubectl get all -n \u0026lt;sample-kubernetes-operator-ns\u0026gt; For example:\n$ kubectl get all -n opns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE pod/weblogic-operator-b7d6df78c-mfrc4 1/1 Running 0 40s pod/weblogic-operator-webhook-7996b8b58b-frtwp 1/1 Running 0 42s NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/weblogic-operator-webhook-svc ClusterIP 10.106.51.57 \u0026lt;none\u0026gt; 8083/TCP,8084/TCP 42s NAME READY UP-TO-DATE AVAILABLE AGE deployment.apps/weblogic-operator 1/1 1 1 6d deployment.apps/weblogic-operator-webhook 1/1 1 1 42s NAME DESIRED CURRENT READY AGE replicaset.apps/weblogic-operator-5884685f4f 0 0 0 6d replicaset.apps/weblogic-operator-b7d6df78c 1 1 1 40s replicaset.apps/weblogic-operator-webhook-7996b8b58b 1 1 1 42s Note: When you upgrade a 3.x WebLogic Kubernetes Operator to 4.x, the upgrade process creates a WebLogic Domain resource conversion webhook deployment, and associated resources in the same namespace. The webhook automatically and transparently upgrades the existing WebLogic Domains from the 3.x schema to the 4.x schema. For more information, see Domain Upgrade in the WebLogic Kubernetes Operator documentation.\nNote: In WebLogic Kubernetes Operator 4.X, changes are made to serverStartPolicy that affect starting/stopping of the domain. Refer to the serverStartPolicy entry in the create-domain-inputs.yaml for more information. Also see Domain Life Cycle.\n " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oig/patch-and-upgrade/upgrade-an-operator-release/", + "title": "a. Upgrade an operator release", + "tags": [], + "description": "Instructions on how to update the WebLogic Kubernetes Operator version.", + "content": "These instructions apply to upgrading operators from 3.X.X to 4.X, or from within the 4.x release family as additional versions are released.\n On the master node, download the new WebLogic Kubernetes Operator source code from the operator github project:\n$ mkdir \u0026lt;workdir\u0026gt;/weblogic-kubernetes-operator-4.X.X $ cd \u0026lt;workdir\u0026gt;/weblogic-kubernetes-operator-4.X.X $ git clone https://github.com/oracle/weblogic-kubernetes-operator.git --branch v4.X.X For example:\n$ mkdir /scratch/OIGK8S/weblogic-kubernetes-operator-4.X.X $ cd /scratch/OIGK8S/weblogic-kubernetes-operator-4.X.X $ git clone https://github.com/oracle/weblogic-kubernetes-operator.git --branch v4.X.X This will create the directory \u0026lt;workdir\u0026gt;/weblogic-kubernetes-operator-4.X.X/weblogic-kubernetes-operator\n Run the following helm command to upgrade the operator:\n$ cd \u0026lt;workdir\u0026gt;/weblogic-kubernetes-operator-4.X.X/weblogic-kubernetes-operator $ helm upgrade --reuse-values --set image=ghcr.io/oracle/weblogic-kubernetes-operator:4.X.X --namespace \u0026lt;sample-kubernetes-operator-ns\u0026gt; --wait weblogic-kubernetes-operator kubernetes/charts/weblogic-operator For example:\n$ cd /scratch/OIGK8S/weblogic-kubernetes-operator-4.X.X/weblogic-kubernetes-operator $ helm upgrade --reuse-values --set image=ghcr.io/oracle/weblogic-kubernetes-operator:4.X.X --namespace operator --wait weblogic-kubernetes-operator kubernetes/charts/weblogic-operator The output will look similar to the following:\nRelease \u0026quot;weblogic-kubernetes-operator\u0026quot; has been upgraded. Happy Helming! NAME: weblogic-kubernetes-operator LAST DEPLOYED: \u0026lt;DATE\u0026gt; NAMESPACE: operator STATUS: deployed REVISION: 2 TEST SUITE: None Verify that the operator\u0026rsquo;s pod and services are running by executing the following command:\n$ kubectl get all -n \u0026lt;sample-kubernetes-operator-ns\u0026gt; For example:\n$ kubectl get all -n opns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE pod/weblogic-operator-b7d6df78c-mfrc4 1/1 Running 0 40s pod/weblogic-operator-webhook-7996b8b58b-frtwp 1/1 Running 0 42s NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/weblogic-operator-webhook-svc ClusterIP 10.106.51.57 \u0026lt;none\u0026gt; 8083/TCP,8084/TCP 42s NAME READY UP-TO-DATE AVAILABLE AGE deployment.apps/weblogic-operator 1/1 1 1 6d deployment.apps/weblogic-operator-webhook 1/1 1 1 42s NAME DESIRED CURRENT READY AGE replicaset.apps/weblogic-operator-5884685f4f 0 0 0 6d replicaset.apps/weblogic-operator-b7d6df78c 1 1 1 40s replicaset.apps/weblogic-operator-webhook-7996b8b58b 1 1 1 42s Note: When you upgrade a 3.x WebLogic Kubernetes Operator to 4.x, the upgrade process creates a WebLogic Domain resource conversion webhook deployment, and associated resources in the same namespace. The webhook automatically and transparently upgrades the existing WebLogic Domains from the 3.x schema to the 4.x schema. For more information, see Domain Upgrade in the WebLogic Kubernetes Operator documentation.\nNote: In WebLogic Kubernetes Operator 4.X, changes are made to serverStartPolicy that affect starting/stopping of the domain. Refer to the serverStartPolicy entry in the create-domain-inputs.yaml for more information. Also see Domain Life Cycle.\n " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-k8s/", + "title": "a. Using an Ingress with NGINX (non-SSL)", + "tags": [], + "description": "Steps to set up an Ingress for NGINX to direct traffic to the OIG domain (non-SSL).", + "content": "Setting up an ingress for NGINX for the OIG domain on Kubernetes (non-SSL) The instructions below explain how to set up NGINX as an ingress for the OIG domain with non-SSL termination.\nNote: All the steps below should be performed on the master node.\n Install NGINX\na. Configure the repository\nb. Create a namespace\nc. Install NGINX using helm\nd. Setup routing rules for the domain\n Create an ingress for the domain\n Verify that you can access the domain URL\n Install NGINX Use helm to install NGINX.\nConfigure the repository Add the Helm chart repository for NGINX using the following command:\n$ helm repo add stable https://kubernetes.github.io/ingress-nginx The output will look similar to the following:\n\u0026quot;stable\u0026quot; has been added to your repositories Update the repository using the following command:\n$ helm repo update The output will look similar to the following:\nHang tight while we grab the latest from your chart repositories... ...Successfully got an update from the \u0026quot;stable\u0026quot; chart repository Update Complete. Happy Helming! Create a namespace Create a Kubernetes namespace for NGINX by running the following command:\n$ kubectl create namespace nginx The output will look similar to the following:\nnamespace/nginx created Install NGINX using helm If you can connect directly to the master node IP address from a browser, then install NGINX with the --set controller.service.type=NodePort parameter.\nIf you are using a Managed Service for your Kubernetes cluster,for example Oracle Kubernetes Engine (OKE) on Oracle Cloud Infrastructure (OCI), and connect from a browser to the Load Balancer IP address, then use the --set controller.service.type=LoadBalancer parameter. This instructs the Managed Service to setup a Load Balancer to direct traffic to the NGINX ingress.\n To install NGINX use the following helm command depending on if you are using NodePort or LoadBalancer:\na) Using NodePort\n$ helm install nginx-ingress -n nginx --set controller.service.type=NodePort --set controller.admissionWebhooks.enabled=false stable/ingress-nginx The output will look similar to the following:\nNAME: nginx-ingress LAST DEPLOYED: \u0026lt;DATE\u0026gt; NAMESPACE: nginx STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: The ingress-nginx controller has been installed. Get the application URL by running these commands: export HTTP_NODE_PORT=$(kubectl --namespace nginx get services -o jsonpath=\u0026quot;{.spec.ports[0].nodePort}\u0026quot; nginx-ingress-ingress-nginx-controller) export HTTPS_NODE_PORT=$(kubectl --namespace nginx get services -o jsonpath=\u0026quot;{.spec.ports[1].nodePort}\u0026quot; nginx-ingress-ingress-nginx-controller) export NODE_IP=$(kubectl --namespace nginx get nodes -o jsonpath=\u0026quot;{.items[0].status.addresses[1].address}\u0026quot;) echo \u0026quot;Visit http://$NODE_IP:$HTTP_NODE_PORT to access your application via HTTP.\u0026quot; echo \u0026quot;Visit https://$NODE_IP:$HTTPS_NODE_PORT to access your application via HTTPS.\u0026quot; An example Ingress that makes use of the controller: apiVersion: networking.k8s.io/v1 kind: Ingress metadata: annotations: kubernetes.io/ingress.class: nginx name: example namespace: foo spec: ingressClassName: example-class rules: - host: www.example.com http: paths: - path: / pathType: Prefix backend: service: name: exampleService port: 80 # This section is only required if TLS is to be enabled for the Ingress tls: - hosts: - www.example.com secretName: example-tls If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: apiVersion: v1 kind: Secret metadata: name: example-tls namespace: foo data: tls.crt: \u0026lt;base64 encoded cert\u0026gt; tls.key: \u0026lt;base64 encoded key\u0026gt; type: kubernetes.io/tls b) Using LoadBalancer\n$ helm install nginx-ingress -n nginx --set controller.service.type=LoadBalancer --set controller.admissionWebhooks.enabled=false stable/ingress-nginx The output will look similar to the following:\nNAME: nginx-ingress LAST DEPLOYED: \u0026lt;DATE\u0026gt; NAMESPACE: nginx STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: The nginx-ingress controller has been installed. It may take a few minutes for the LoadBalancer IP to be available. You can watch the status by running 'kubectl --namespace nginx get services -o wide -w nginx-ingress-controller' An example Ingress that makes use of the controller: apiVersion: networking.k8s.io/v1 kind: Ingress metadata: annotations: kubernetes.io/ingress.class: nginx name: example namespace: foo spec: ingressClassName: example-class rules: - host: www.example.com http: paths: - path: / pathType: Prefix backend: service: name: exampleService port: 80 # This section is only required if TLS is to be enabled for the Ingress tls: - hosts: - www.example.com secretName: example-tls If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: apiVersion: v1 kind: Secret metadata: name: example-tls namespace: foo data: tls.crt: \u0026lt;base64 encoded cert\u0026gt; tls.key: \u0026lt;base64 encoded key\u0026gt; type: kubernetes.io/tls Setup routing rules for the domain Setup routing rules by running the following commands:\n$ cd $WORKDIR/kubernetes/charts/ingress-per-domain Edit values.yaml and change the domainUID parameter to match your domainUID, for example domainUID: governancedomain. Also change sslType to NONSSL. The file should look as follows:\n# Load balancer type. Supported values are: NGINX type: NGINX # SSL configuration Type. Supported Values are : NONSSL,SSL sslType: NONSSL # domainType. Supported values are: oim domainType: oim #WLS domain as backend to the load balancer wlsDomain: domainUID: governancedomain adminServerName: AdminServer adminServerPort: 7001 adminServerSSLPort: soaClusterName: soa_cluster soaManagedServerPort: 8001 soaManagedServerSSLPort: oimClusterName: oim_cluster oimManagedServerPort: 14000 oimManagedServerSSLPort: # Host specific values hostName: enabled: false admin: runtime: internal: # Ngnix specific values nginx: nginxTimeOut: 180 Create an ingress for the domain Create an Ingress for the domain (governancedomain-nginx), in the domain namespace by using the sample Helm chart:\n$ cd $WORKDIR $ helm install governancedomain-nginx kubernetes/charts/ingress-per-domain --namespace \u0026lt;namespace\u0026gt; --values kubernetes/charts/ingress-per-domain/values.yaml Note: The \u0026lt;workdir\u0026gt;/kubernetes/charts/ingress-per-domain/templates/nginx-ingress-nonssl.yaml has nginx.ingress.kubernetes.io/enable-access-log set to false. If you want to enable access logs then set this value to true before executing the command. Enabling access-logs can cause issues with disk space if not regularly maintained.\nFor example:\n$ cd $WORKDIR $ helm install governancedomain-nginx kubernetes/charts/ingress-per-domain --namespace oigns --values kubernetes/charts/ingress-per-domain/values.yaml The output will look similar to the following:\n$ helm install governancedomain-nginx kubernetes/charts/ingress-per-domain --namespace oigns --values kubernetes/charts/ingress-per-domain/values.yaml NAME: governancedomain-nginx LAST DEPLOYED: \u0026lt;DATE\u0026gt; NAMESPACE: oigns STATUS: deployed REVISION: 1 TEST SUITE: None Run the following command to show the ingress is created successfully:\n$ kubectl get ing -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get ing -n oigns The output will look similar to the following:\nNAME CLASS HOSTS ADDRESS PORTS AGE governancedomain-nginx \u0026lt;none\u0026gt; * x.x.x.x 80 47s Find the NodePort of NGINX using the following command (only if you installed NGINX using NodePort):\n$ kubectl get services -n nginx -o jsonpath=”{.spec.ports[0].nodePort}” nginx-ingress-ingress-nginx-controller The output will look similar to the following:\n31530 Run the following command to check the ingress:\n$ kubectl describe ing governancedomain-ingress -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl describe ing governancedomain-nginx -n oigns The output will look similar to the following:\nName: governancedomain-nginx Namespace: oigns Address: Default backend: default-http-backend:80 (\u0026lt;error: endpoints \u0026quot;default-http-backend\u0026quot; not found\u0026gt;) Rules: Host Path Backends ---- ---- -------- * /console governancedomain-adminserver:7001 (10.244.2.50:7001) /consolehelp governancedomain-adminserver:7001 (10.244.2.50:7001) /em governancedomain-adminserver:7001 (10.244.2.50:7001) /ws_utc governancedomain-cluster-soa-cluster:8001 (10.244.2.51:8001) /soa governancedomain-cluster-soa-cluster:8001 (10.244.2.51:8001) /integration governancedomain-cluster-soa-cluster:8001 (10.244.2.51:8001) /soa-infra governancedomain-cluster-soa-cluster:8001 (10.244.2.51:8001) /identity governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) /admin governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) /oim governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) /sysadmin governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) /workflowservice governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) /callbackResponseService governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) /spml-xsd governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) /HTTPClnt governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) /reqsvc governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) /iam governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) /provisioning-callback governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) /CertificationCallbackService governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) /ucs governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) /FacadeWebApp governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) /OIGUI governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) /weblogic governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) Annotations: kubernetes.io/ingress.class: nginx meta.helm.sh/release-name: governancedomain-nginx meta.helm.sh/release-namespace: oigns nginx.ingress.kubernetes.io/affinity: cookie nginx.ingress.kubernetes.io/affinity-mode: persistent nginx.ingress.kubernetes.io/enable-access-log: false nginx.ingress.kubernetes.io/proxy-read-timeout: 180 nginx.ingress.kubernetes.io/proxy-send-timeout: 180 nginx.ingress.kubernetes.io/session-cookie-name: sticky Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Sync 27s nginx-ingress-controller Scheduled for sync To confirm that the new ingress is successfully routing to the domain\u0026rsquo;s server pods, run the following command to send a request to the URL for the WebLogic ReadyApp framework:\nNote: If using a load balancer for your ingress replace ${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT} with ${LOADBALANCER-HOSTNAME}:${LOADBALANCER-PORT}.\n$ curl -v http://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/weblogic/ready For example:\na) For NodePort\n$ curl -v http://masternode.example.com:31530/weblogic/ready b) For LoadBalancer\n$ curl -v http://masternode.example.com:80/weblogic/ready The output will look similar to the following:\n$ curl -v http://masternode.example.com:31530/weblogic/ready * About to connect() to masternode.example.com port 31530 (#0) * Trying X.X.X.X... * Connected to masternode.example.com (X.X.X.X) port 31530 (#0) \u0026gt; GET /weblogic/ready HTTP/1.1 \u0026gt; User-Agent: curl/7.29.0 \u0026gt; Host: masternode.example.com:31530 \u0026gt; Accept: */* \u0026gt; \u0026lt; HTTP/1.1 200 OK \u0026lt; Server: nginx/1.19.2 \u0026lt; Date: \u0026lt;DATE\u0026gt; \u0026lt; Content-Length: 0 \u0026lt; Connection: keep-alive \u0026lt; * Connection #0 to host masternode.example.com left intact Verify that you can access the domain URL After setting up the NGINX ingress, verify that the domain applications are accessible through the NGINX ingress port (for example 31530) as per Validate Domain URLs \n" +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oud/manage-oud-containers/logging-and-visualization/", + "title": "b) Logging and Visualization for Helm Chart oud-ds-rs Deployment", + "tags": [], + "description": "Describes the steps for logging and visualization with Elasticsearch and Kibana.", + "content": " Introduction Install Elasticsearch and Kibana Create a Kubernetes secret Enable Logstash Upgrade OUD deployment with ELK configuration Verify the pods Verify and access the Kibana console Introduction This section describes how to install and configure logging and visualization for the oud-ds-rs Helm chart deployment.\nThe ELK stack consists of Elasticsearch, Logstash, and Kibana. Using ELK you can gain insights in real-time from the log data from your applications.\n Elasticsearch is a distributed, RESTful search and analytics engine capable of solving a growing number of use cases. As the heart of the Elastic Stack, it centrally stores your data so you can discover the expected and uncover the unexpected. Logstash is an open source, server-side data processing pipeline that ingests data from a multitude of sources simultaneously, transforms it, and then sends it to your favorite “stash.” Kibana lets you visualize your Elasticsearch data and navigate the Elastic Stack. It gives you the freedom to select the way you give shape to your data. And you don’t always have to know what you\u0026rsquo;re looking for. Install Elasticsearch and Kibana If you do not already have a centralized Elasticsearch (ELK) stack then you must configure this first. For details on how to configure the ELK stack, follow Installing Elasticsearch (ELK) Stack and Kibana\nCreate the logstash pod Variables used in this chapter In order to create the logstash pod, you must create a yaml file. This file contains variables which you must substitute with variables applicable to your ELK environment.\nMost of the values for the variables will be based on your ELK deployment as per Installing Elasticsearch (ELK) Stack and Kibana.\nThe table below outlines the variables and values you must set:\n Variable Sample Value Description \u0026lt;ELK_VER\u0026gt; 8.3.1 The version of logstash you want to install. \u0026lt;ELK_SSL\u0026gt; true If SSL is enabled for ELK set the value to true, or if NON-SSL set to false. This value must be lowercase. \u0026lt;ELK_HOSTS\u0026gt; https://elasticsearch.example.com:9200 The URL for sending logs to Elasticsearch. HTTP if NON-SSL is used. \u0026lt;ELK_USER\u0026gt; logstash_internal The name of the user for logstash to access Elasticsearch. \u0026lt;ELK_PASSWORD\u0026gt; password The password for ELK_USER. \u0026lt;ELK_APIKEY\u0026gt; apikey The API key details. You will also need the BASE64 version of the Certificate Authority (CA) certificate(s) that signed the certificate of the Elasticsearch server. If using a self-signed certificate, this is the self signed certificate of the Elasticsearch server. See Copying the Elasticsearch Certificate for details on how to get the correct certificate. In the example below the certificate is called elk.crt.\nCreate a kubernetes secret Create a Kubernetes secret for Elasticsearch using the API Key or Password.\na) If ELK uses an API Key for authentication:\n$ kubectl create secret generic elasticsearch-pw-elastic -n \u0026lt;domain_namespace\u0026gt; --from-literal password=\u0026lt;ELK_APIKEY\u0026gt; For example:\n$ kubectl create secret generic elasticsearch-pw-elastic -n oudns --from-literal password=\u0026lt;ELK_APIKEY\u0026gt; The output will look similar to the following:\nsecret/elasticsearch-pw-elastic created b) If ELK uses a password for authentication:\n$ kubectl create secret generic elasticsearch-pw-elastic -n \u0026lt;domain_namespace\u0026gt; --from-literal password=\u0026lt;ELK_PASSWORD\u0026gt; For example:\n$ kubectl create secret generic elasticsearch-pw-elastic -n oudns --from-literal password=\u0026lt;ELK_PASSWORD\u0026gt; The output will look similar to the following:\nsecret/elasticsearch-pw-elastic created Note: It is recommended that the ELK Stack is created with authentication enabled. If no authentication is enabled you may create a secret using the values above.\n Check that the dockercred secret that was created previously in Create a Kubernetes secret for cronjob images exists:\n$ kubectl get secret -n \u0026lt;domain_namespace\u0026gt; | grep dockercred For example,\n$ kubectl get secret -n oudns | grep dockercred The output will look similar to the following:\ndockercred kubernetes.io/dockerconfigjson 1 149m If the secret does not exist, create it as per Create a Kubernetes secret for cronjob images.\n Enable logstash Navigate to the $WORKDIR/kubernetes/helm directory and create a logging-override-values.yaml file as follows:\nelk: imagePullSecrets: - name: dockercred IntegrationEnabled: true logStashImage: logstash:\u0026lt;ELK_VER\u0026gt; logstashConfigMap: false esindex: oudlogs-00001 sslenabled: \u0026lt;ELK_SSL\u0026gt; eshosts: \u0026lt;ELK_HOSTS\u0026gt; # Note: We need to provide either esuser,espassword or esapikey esuser: \u0026lt;ELK_USER\u0026gt; espassword: elasticsearch-pw-elastic esapikey: elasticsearch-pw-elastic Change the \u0026lt;ELK_VER\u0026gt;, \u0026lt;ELK_SSL\u0026gt;, \u0026lt;ELK_HOSTS\u0026gt;, and \u0026lt;ELK_USER\u0026gt; to match the values for your environment. If using SSL, replace the elk.crt in $WORKDIR/kubernetes/helm/oud-ds-rs/certs/ with the elk.crt for your ElasticSearch server. If using API KEY for your ELK authentication, leave both esuser: and espassword: with no value. If using a password for ELK authentication, leave esapi_key: but delete elasticsearch-pw-elastic. If no authentication is used for ELK, leave esuser, espassword, and esapi_key with no value assigned. The rest of the lines in the yaml file should not be changed. For example:\nelk: imagePullSecrets: - name: dockercred IntegrationEnabled: true logStashImage: logstash:8.3.1 logstashConfigMap: false esindex: oudlogs-00001 sslenabled: true eshosts: https://elasticsearch.example.com:9200 # Note: We need to provide either esuser,espassword or esapikey esuser: logstash_internal espassword: elasticsearch-pw-elastic esapikey: Upgrade OUD deployment with ELK configuration Run the following command to upgrade the OUD deployment with the ELK configuration:\n$ helm upgrade --namespace \u0026lt;namespace\u0026gt; --values \u0026lt;valuesfile.yaml\u0026gt; \u0026lt;releasename\u0026gt; oud-ds-rs --reuse-values For example:\n$ helm upgrade --namespace oudns --values logging-override-values.yaml oud-ds-rs oud-ds-rs --reuse-values The output should look similar to the following:\nRelease \u0026quot;oud-ds-rs\u0026quot; has been upgraded. Happy Helming! NAME: oud-ds-rs LAST DEPLOYED: \u0026lt;DATE\u0026gt; NAMESPACE: oudns STATUS: deployed REVISION: 2 NOTES: # # Copyright (c) 2020, 2022, Oracle and/or its affiliates. # # Licensed under the Universal Permissive License v 1.0 as shown at # https://oss.oracle.com/licenses/upl # # Since \u0026quot;nginx\u0026quot; has been chosen, follow the steps below to configure nginx ingress controller. Add Repo reference to helm for retriving/installing Chart for nginx-ingress implementation. command-# helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx Command helm install to install nginx-ingress related objects like pod, service, deployment, etc. # helm install --namespace \u0026lt;namespace for ingress\u0026gt; --values nginx-ingress-values-override.yaml lbr-nginx ingress-nginx/ingress-nginx For details of content of nginx-ingress-values-override.yaml refer README.md file of this chart. Run these commands to check port mapping and services: # kubectl --namespace \u0026lt;namespace for ingress\u0026gt; get services -o wide -w lbr-nginx-ingress-controller # kubectl describe --namespace \u0026lt;namespace for oud-ds-rs chart\u0026gt; ingress.extensions/oud-ds-rs-http-ingress-nginx # kubectl describe --namespace \u0026lt;namespace for oud-ds-rs chart\u0026gt; ingress.extensions/oud-ds-rs-admin-ingress-nginx Accessible interfaces through ingress: (External IP Address for LoadBalancer NGINX Controller can be determined through details associated with lbr-nginx-ingress-controller) 1. OUD Admin REST: Port: http/https 2. OUD Data REST: Port: http/https 3. OUD Data SCIM: Port: http/https 4. OUD LDAP/LDAPS: Port: ldap/ldaps 5. OUD Admin LDAPS: Port: ldaps Please refer to README.md from Helm Chart to find more details about accessing interfaces and configuration parameters. Accessible interfaces through ingress: 1. OUD Admin REST: Port: http/https 2. OUD Data REST: Port: http/https 3. OUD Data SCIM: Port: http/https Please refer to README.md from Helm Chart to find more details about accessing interfaces and configuration parameters. Verify the pods Run the following command to check the logstash pod is created correctly:\n$ kubectl get pods -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl get pods -n oudns The output should look similar to the following:\nNAME READY STATUS RESTARTS AGE oud-ds-rs-0 1/1 Running 0 150m oud-ds-rs-1 1/1 Running 0 143m oud-ds-rs-2 1/1 Running 0 137m oud-ds-rs-logstash-5dc8d94597-knk8g 1/1 Running 0 2m12s oud-pod-cron-job-27758370-wpfq7 0/1 Completed 0 66m oud-pod-cron-job-27758400-kd6pn 0/1 Completed 0 36m oud-pod-cron-job-27758430-ndmgj 0/1 Completed 0 6m33s Note: Wait a couple of minutes to make sure the pod has not had any failures or restarts. If the pod fails you can view the pod log using:\n$ kubectl logs -f oud-ds-rs-logstash-\u0026lt;pod\u0026gt; -n oudns Most errors occur due to misconfiguration of the logging-override-values.yaml. This is usually because of an incorrect value set, or the certificate was not pasted with the correct indentation.\nIf the pod has errors, view the helm history to find the last working revision, for example:\n$ helm history oud-ds-rs -n oudns The output will look similar to the following:\nREVISION UPDATED STATUS CHART APP VERSION DESCRIPTION 1 Tue Jan 10 14:06:01 2023 superseded oud-ds-rs-0.2 12.2.1.4.0 Install complete 2 Tue Jan 10 16:34:21 2023 deployed oud-ds-rs-0.2 12.2.1.4.0 Upgrade complete Rollback to the previous working revision by running:\n$ helm rollback \u0026lt;release\u0026gt; \u0026lt;revision\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\nhelm rollback oud-ds-rs 1 -n oudns Once you have resolved the issue in the yaml files, run the helm upgrade command outlined earlier to recreate the logstash pod.\n Verify and access the Kibana console To access the Kibana console you will need the Kibana URL as per Installing Elasticsearch (ELK) Stack and Kibana.\nFor Kibana 7.7.x and below:\n Access the Kibana console with http://\u0026lt;hostname\u0026gt;:\u0026lt;port\u0026gt;/app/kibana and login with your username and password.\n From the Navigation menu, navigate to Management \u0026gt; Kibana \u0026gt; Index Patterns.\n In the Create Index Pattern page enter oudlogs* for the Index pattern and click Next Step.\n In the Configure settings page, from the Time Filter field name drop down menu select @timestamp and click Create index pattern.\n Once the index pattern is created click on Discover in the navigation menu to view the OIG logs.\n For Kibana version 7.8.X and above:\n Access the Kibana console with http://\u0026lt;hostname\u0026gt;:\u0026lt;port\u0026gt;/app/kibana and login with your username and password.\n From the Navigation menu, navigate to Management \u0026gt; Stack Management.\n Click Data Views in the Kibana section.\n Click Create Data View and enter the following information:\n Name: oudlogs* Timestamp: @timestamp Click Create Data View.\n From the Navigation menu, click Discover to view the log file entries.\n From the drop down menu, select oudlogs* to view the log file entries.\n " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oudsm/manage-oudsm-containers/logging-and-visualization/", + "title": "b) Logging and Visualization for Helm Chart oudsm Deployment", + "tags": [], + "description": "Describes the steps for logging and visualization with Elasticsearch and Kibana.", + "content": "Introduction This section describes how to install and configure logging and visualization for the oudsm Helm chart deployment.\nThe ELK stack consists of Elasticsearch, Logstash, and Kibana. Using ELK you can gain insights in real-time from the log data from your applications.\n Elasticsearch is a distributed, RESTful search and analytics engine capable of solving a growing number of use cases. As the heart of the Elastic Stack, it centrally stores your data so you can discover the expected and uncover the unexpected. Logstash is an open source, server-side data processing pipeline that ingests data from a multitude of sources simultaneously, transforms it, and then sends it to your favorite “stash.” Kibana lets you visualize your Elasticsearch data and navigate the Elastic Stack. It gives you the freedom to select the way you give shape to your data. And you don’t always have to know what you\u0026rsquo;re looking for. Install Elasticsearch and Kibana If you do not already have a centralized Elasticsearch (ELK) stack then you must configure this first. For details on how to configure the ELK stack, follow Installing Elasticsearch (ELK) Stack and Kibana\nCreate the logstash pod Variables used in this chapter In order to create the logstash pod, you must create a yaml file. This file contains variables which you must substitute with variables applicable to your ELK environment.\nMost of the values for the variables will be based on your ELK deployment as per Installing Elasticsearch (ELK) Stack and Kibana.\nThe table below outlines the variables and values you must set:\n Variable Sample Value Description \u0026lt;ELK_VER\u0026gt; 8.3.1 The version of logstash you want to install. \u0026lt;ELK_SSL\u0026gt; true If SSL is enabled for ELK set the value to true, or if NON-SSL set to false. This value must be lowercase. \u0026lt;ELK_HOSTS\u0026gt; https://elasticsearch.example.com:9200 The URL for sending logs to Elasticsearch. HTTP if NON-SSL is used. \u0026lt;ELK_USER\u0026gt; logstash_internal The name of the user for logstash to access Elasticsearch. \u0026lt;ELK_PASSWORD\u0026gt; password The password for ELK_USER. \u0026lt;ELK_APIKEY\u0026gt; apikey The API key details. You will also need the BASE64 version of the Certificate Authority (CA) certificate(s) that signed the certificate of the Elasticsearch server. If using a self-signed certificate, this is the self signed certificate of the Elasticsearch server. See Copying the Elasticsearch Certificate for details on how to get the correct certificate. In the example below the certificate is called elk.crt.\nCreate Kubernetes secrets Create a Kubernetes secret for Elasticsearch using the API Key or Password.\na) If ELK uses an API Key for authentication:\n$ kubectl create secret generic elasticsearch-pw-elastic -n \u0026lt;domain_namespace\u0026gt; --from-literal password=\u0026lt;ELK_APIKEY\u0026gt; For example:\n$ kubectl create secret generic elasticsearch-pw-elastic -n oudsmns --from-literal password=\u0026lt;ELK_APIKEY\u0026gt; The output will look similar to the following:\nsecret/elasticsearch-pw-elastic created b) If ELK uses a password for authentication:\n$ kubectl create secret generic elasticsearch-pw-elastic -n \u0026lt;domain_namespace\u0026gt; --from-literal password=\u0026lt;ELK_PASSWORD\u0026gt; For example:\n$ kubectl create secret generic elasticsearch-pw-elastic -n oudsmns --from-literal password=\u0026lt;ELK_PASSWORD\u0026gt; The output will look similar to the following:\nsecret/elasticsearch-pw-elastic created Note: It is recommended that the ELK Stack is created with authentication enabled. If no authentication is enabled you may create a secret using the values above.\n Create a Kubernetes secret to access the required images on hub.docker.com:\nNote: You must first have a user account on hub.docker.com:\n$ kubectl create secret docker-registry \u0026#34;dockercred\u0026#34; --docker-server=\u0026#34;https://index.docker.io/v1/\u0026#34; --docker-username=\u0026#34;\u0026lt;docker_username\u0026gt;\u0026#34; --docker-password=\u0026lt;password\u0026gt; --docker-email=\u0026lt;docker_email_credentials\u0026gt; --namespace=\u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl create secret docker-registry \u0026quot;dockercred\u0026quot; --docker-server=\u0026quot;https://index.docker.io/v1/\u0026quot; --docker-username=\u0026quot;username\u0026quot; --docker-password=\u0026lt;password\u0026gt; --docker-email=user@example.com --namespace=oudsmns The output will look similar to the following:\nsecret/dockercred created Enable logstash Navigate to the $WORKDIR/kubernetes/helm directory and create a logging-override-values.yaml file as follows:\nelk: imagePullSecrets: - name: dockercred IntegrationEnabled: true logStashImage: logstash:\u0026lt;ELK_VER\u0026gt; logstashConfigMap: false esindex: oudsmlogs-00001 sslenabled: \u0026lt;ELK_SSL\u0026gt; eshosts: \u0026lt;ELK_HOSTS\u0026gt; # Note: We need to provide either esuser,espassword or esapikey esuser: \u0026lt;ELK_USER\u0026gt; espassword: elasticsearch-pw-elastic esapikey: elasticsearch-pw-elastic Change the \u0026lt;ELK_VER\u0026gt;, \u0026lt;ELK_SSL\u0026gt;, \u0026lt;ELK_HOSTS\u0026gt;, and \u0026lt;ELK_USER\u0026gt;, to match the values for your environment. If using SSL, replace the elk.crt in $WORKDIR/kubernetes/helm/oudsm/certs/ with the elk.crt for your ElasticSearch server. If using API KEY for your ELK authentication, leave both esuser: and espassword: with no value. If using a password for ELK authentication, leave esapi_key: but delete elasticsearch-pw-elastic. If no authentication is used for ELK, leave esuser, espassword, and esapi_key with no value assigned. The rest of the lines in the yaml file should not be changed. For example:\nelk: imagePullSecrets: - name: dockercred IntegrationEnabled: true logStashImage: logstash:8.3.1 logstashConfigMap: false esindex: oudsmlogs-00001 sslenabled: true eshosts: https://elasticsearch.example.com:9200 # Note: We need to provide either esuser,espassword or esapikey esuser: logstash_internal espassword: elasticsearch-pw-elastic esapikey: Upgrade oudsm deployment with ELK configuration Run the following command to upgrade the oudsm deployment with the ELK configuration:\n$ helm upgrade --namespace \u0026lt;namespace\u0026gt; --values \u0026lt;valuesfile.yaml\u0026gt; \u0026lt;releasename\u0026gt; oudsm --reuse-values For example:\n$ helm upgrade --namespace oudsmns --values logging-override-values.yaml oudsm oudsm --reuse-values The output should look similar to the following:\nRelease \u0026quot;oudsm\u0026quot; has been upgraded. Happy Helming! NAME: oudsm LAST DEPLOYED: \u0026lt;DATE\u0026gt; NAMESPACE: oudsmns STATUS: deployed REVISION: 2 TEST SUITE: None Verify the pods Run the following command to check the logstash pod is created correctly:\n$ kubectl get pods -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl get pods -n oudsmns The output should look similar to the following:\nNAME READY STATUS RESTARTS AGE oudsm-1 1/1 Running 0 51m oudsm-logstash-56dbcc6d9f-mxsgj 1/1 Running 0 2m7s Note: Wait a couple of minutes to make sure the pod has not had any failures or restarts. If the pod fails you can view the pod log using:\n$ kubectl logs -f oudsm-logstash-\u0026lt;pod\u0026gt; -n oudsmns Most errors occur due to misconfiguration of the logging-override-values.yaml. This is usually because of an incorrect value set, or the certificate was not pasted with the correct indentation.\nIf the pod has errors, view the helm history to find the last working revision, for example:\n$ helm history oudsm -n oudsmns The output will look similar to the following:\nREVISION UPDATED STATUS CHART APP VERSION DESCRIPTION 1 \u0026lt;DATE\u0026gt; superseded oudsm-0.1 12.2.1.4.0 Install complete 2 \u0026lt;DATE\u0026gt; deployed oudsm-0.1 12.2.1.4.0 Upgrade complete Rollback to the previous working revision by running:\n$ helm rollback \u0026lt;release\u0026gt; \u0026lt;revision\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\nhelm rollback oudsm 1 -n oudsmns Once you have resolved the issue in the yaml files, run the helm upgrade command outlined earlier to recreate the logstash pod.\n Verify and access the Kibana console To access the Kibana console you will need the Kibana URL as per Installing Elasticsearch (ELK) Stack and Kibana.\nFor Kibana 7.7.x and below:\n Access the Kibana console with http://\u0026lt;hostname\u0026gt;:\u0026lt;port\u0026gt;/app/kibana and login with your username and password.\n From the Navigation menu, navigate to Management \u0026gt; Kibana \u0026gt; Index Patterns.\n In the Create Index Pattern page enter oudsmlogs* for the Index pattern and click Next Step.\n In the Configure settings page, from the Time Filter field name drop down menu select @timestamp and click Create index pattern.\n Once the index pattern is created click on Discover in the navigation menu to view the OUDSM logs.\n For Kibana version 7.8.X and above:\n Access the Kibana console with http://\u0026lt;hostname\u0026gt;:\u0026lt;port\u0026gt;/app/kibana and login with your username and password.\n From the Navigation menu, navigate to Management \u0026gt; Stack Management.\n Click Data Views in the Kibana section.\n Click Create Data View and enter the following information:\n Name: oudsmlogs* Timestamp: @timestamp Click Create Data View.\n From the Navigation menu, click Discover to view the log file entries.\n From the drop down menu, select oudsmlogs* to view the log file entries.\n " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/enterprise-deployments/enterprise-deployment-automation/", + "title": "b. Enterprise Deployment Guide Automation Scripts", + "tags": [], + "description": "The Enterprise Deployment Automation scripts deploy the entire Oracle Identity and Access Management suite in a production environment automatically", + "content": "Enterprise Deployment Automation The Enterprise Deployment Automation scripts allow you to deploy the entire Oracle Identity and Access Management suite in a production environment. You can use the scripts to:\n Automate the creation of a Kubernetes cluster on Oracle Cloud Infrastructure (OCI), ready for the deployment of Oracle Identity and Access Management products. See Automating the OCI Infrastructure Creation for the Identity and Access Management Kubernetes Cluster. Automate the deployment of Oracle Identity and Access Management products on any compliant Kubernetes cluster. See Automating the Identity and Access Management Enterprise Deployment. " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oig/post-install-config/install_and_configure_connectors/", + "title": "b. Install and configure connectors", + "tags": [], + "description": "Install and Configure Connectors.", + "content": "Download the connector Download the Connector you are interested in from Oracle Identity Manager Connector Downloads.\n Copy the connector zip file to a staging directory on the master node e.g. \u0026lt;workdir\u0026gt;/stage and unzip it:\n$ cp $HOME/Downloads/\u0026lt;connector\u0026gt;.zip \u0026lt;workdir\u0026gt;/\u0026lt;stage\u0026gt;/ $ cd \u0026lt;workdir\u0026gt;/\u0026lt;stage\u0026gt; $ unzip \u0026lt;connector\u0026gt;.zip $ chmod -R 755 * For example:\n$ cp $HOME/Downloads/Exchange-12.2.1.3.0.zip /scratch/OIGK8S/stage/ $ cd /scratch/OIGK8S/stage/ $ unzip exchange-12.2.1.3.0.zip $ chmod -R 755 * Copy OIG connectors There are two options to copy OIG Connectors to your Kubernetes cluster:\n a) Copy the connector directly to the Persistent Volume b) Use the kubectl cp command to copy the connector to the Persistent Volume It is recommended to use option a), however there may be cases, for example when using a Managed Service such as Oracle Kubernetes Engine on Oracle Cloud Infrastructure, where it may not be feasible to directly mount the domain directory. In such cases option b) should be used.\na) Copy the connector directly to the persistent volume Copy the connector zip file to the persistent volume. For example:\n$ cp -R \u0026lt;path_to\u0026gt;/\u0026lt;connector\u0026gt; \u0026lt;persistent_volume\u0026gt;/governancedomainpv/ConnectorDefaultDirectory/ For example:\n$ cp -R /scratch/OIGK8S/stage/Exchange-12.2.1.3.0 /scratch/shared/governancedomainpv/ConnectorDefaultDirectory/ b) Use the kubectl cp command to copy the connector to the persistent volume Run the following command to copy over the connector:\n$ kubectl -n \u0026lt;domain_namespace\u0026gt; cp \u0026lt;path_to\u0026gt;/\u0026lt;connector\u0026gt; \u0026lt;cluster_name\u0026gt;:/u01/oracle/idm/server/ConnectorDefaultDirectory/ For example:\n$ kubectl -n oigns cp /scratch/OIGK8S/stage/Exchange-12.2.1.3.0 governancedomain-oim-server1:/u01/oracle/idm/server/ConnectorDefaultDirectory/ Install the connector The connectors are installed as they are on a standard on-premises setup, via Application On Boarding or via Connector Installer.\nRefer to your Connector specific documentation for instructions.\n" +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oam/patch-and-upgrade/patch-an-image/", + "title": "b. Patch an image", + "tags": [], + "description": "Instructions on how to update your OAM Kubernetes cluster with a new OAM container image.", + "content": "Choose one of the following options to update your OAM kubernetes cluster to use the new image:\n Run the kubectl edit domain command Run the kubectl patch domain command In all of the above cases, the WebLogic Kubernetes Operator will restart the Administration Server pod first and then perform a rolling restart on the OAM Managed Servers.\nNote: If you are not using Oracle Container Registry or your own container registry, then you must first load the new container image on all nodes in your Kubernetes cluster.\nRun the kubectl edit domain command To update the domain with the kubectl edit domain command, run the following:\n$ kubectl edit domain \u0026lt;domainname\u0026gt; -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl edit domain accessdomain -n oamns If using Oracle Container Registry or your own container registry for your OAM container image, update the image to point at the new image, for example:\ndomainHomeInImage: false image: container-registry.oracle.com/middleware/oam_cpu:\u0026lt;tag\u0026gt; imagePullPolicy: IfNotPresent If you are not using a container registry and have loaded the image on each of the master and worker nodes, update the image to point at the new image:\ndomainHomeInImage: false image: oracle/oam:\u0026lt;tag\u0026gt; imagePullPolicy: IfNotPresent Save the file and exit (:wq!)\n Run the kubectl patch command To update the domain with the kubectl patch domain command, run the following:\n$ kubectl patch domain \u0026lt;domain\u0026gt; -n \u0026lt;namespace\u0026gt; --type merge -p \u0026#39;{\u0026#34;spec\u0026#34;:{\u0026#34;image\u0026#34;:\u0026#34;newimage:tag\u0026#34;}}\u0026#39; For example, if using Oracle Container Registry or your own container registry for your OAM container image:\n$ kubectl patch domain accessdomain -n oamns --type merge -p \u0026#39;{\u0026#34;spec\u0026#34;:{\u0026#34;image\u0026#34;:\u0026#34;container-registry.oracle.com/middleware/oam_cpu:\u0026lt;tag\u0026gt;\u0026#34;}}\u0026#39; For example, if you are not using a container registry and have loaded the image on each of the master and worker nodes:\n$ kubectl patch domain accessdomain -n oamns --type merge -p \u0026#39;{\u0026#34;spec\u0026#34;:{\u0026#34;image\u0026#34;:\u0026#34;oracle/oam:\u0026lt;tag\u0026gt;\u0026#34;}}\u0026#39; The output will look similar to the following:\ndomain.weblogic.oracle/accessdomain patched " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oig/patch-and-upgrade/patch-an-image/", + "title": "b. Patch an image", + "tags": [], + "description": "Instructions on how to update your OIG Kubernetes cluster with a new OIG container image.", + "content": "Introduction The OIG domain patching script automatically performs the update of your OIG Kubernetes cluster with a new OIG container image.\nNote: Before following the steps below, you must have upgraded to WebLogic Kubernetes Operator 4.1.2.\nThe script executes the following steps sequentially:\n Checks if the helper pod exists in the given namespace. If yes, then it deletes the helper pod. Brings up a new helper pod with the new image. Stops the Administration Server, SOA and OIM managed servers using serverStartPolicy set as Never in the domain definition yaml. Waits for all servers to be stopped (default timeout 2000s) Introspects database properties including credentials from the job configmap. Performs database schema changes from the helper pod Starts the Administration Server, SOA and OIM managed servers by setting serverStartPolicy to IfNeeded and image to new image tag. Waits for all the servers to be ready (default timeout 2000s) The script exits with a failure if a configurable timeout is reached before the target pod count is reached, depending upon the domain configuration. It also exits if there is any failure while patching the database schema and domain.\nNote: The script execution will cause downtime while patching the OIG deployment and database schemas.\nPrerequisites Before you begin, perform the following steps:\n Review the Domain resource documentation.\n Ensure that you have a running OIG deployment in your cluster.\n Ensure that the database is up and running.\n Download the latest code repository Download the latest code repository as follows:\n Create a working directory to setup the source code.\n$ mkdir \u0026lt;workdir\u0026gt; For example:\n$ mkdir /scratch/OIGK8Slatest Download the latest OIG deployment scripts from the OIG repository.\n$ cd \u0026lt;workdir\u0026gt; $ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1 For example:\n$ cd /scratch/OIGK8Slatest $ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1 Set the $WORKDIR environment variable as follows:\n$ export WORKDIR=\u0026lt;workdir\u0026gt;/fmw-kubernetes/OracleIdentityGovernance For example:\n$ export WORKDIR=/scratch/OIGK8Slatest/fmw-kubernetes/OracleIdentityGovernance Run the patch domain script Run the patch domain script as follows. Specify the inputs required by the script. If you need help understanding the inputs run the command help -h.\n$ cd $WORKDIR/kubernetes/domain-lifecycle $ ./patch_oig_domain.sh -h $ ./patch_oig_domain.sh -i \u0026lt;target_image_tag\u0026gt; -n \u0026lt;oig_namespace\u0026gt; For example:\n$ cd $WORKDIR/kubernetes/domain-lifecycle $ ./patch_oig_domain.sh -h $ ./patch_oig_domain.sh -i 12.2.1.4-jdk8-ol7-\u0026lt;October`23\u0026gt; -n oigns The output will look similar to the following\n[INFO] Found domain name: governancedomain [INFO] Image Registry: container-registry.oracle.com/middleware/oig_cpu [INFO] Domain governancedomain is currently running with image: container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-\u0026lt;April`23\u0026gt; current no of pods under governancedomain are 3 [INFO] The pod helper already exists in namespace oigns. [INFO] Deleting pod helper pod \u0026quot;helper\u0026quot; deleted [INFO] Fetched Image Pull Secret: orclcred [INFO] Creating new helper pod with image: container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-\u0026lt;October`23\u0026gt; pod/helper created Checking helper Running [INFO] Stopping Admin, SOA and OIM servers in domain governancedomain. This may take some time, monitor log /scratch/OIGK8Slatest/fmw-kubernetes/OracleIdentityGovernance/kubernetes/domain-lifecycle/log/oim_patch_log-\u0026lt;DATE\u0026gt;/stop_servers.log for details [INFO] All servers are now stopped successfully. Proceeding with DB Schema changes [INFO] Patching OIM schemas... [INFO] DB schema update successful. Check log /scratch/OIGK8Slatest/fmw-kubernetes/OracleIdentityGovernance/kubernetes/domain-lifecycle/log/oim_patch_log-\u0026lt;DATE\u0026gt;/patch_oim_wls.log for details [INFO] Starting Admin, SOA and OIM servers with new image container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-\u0026lt;October`23\u0026gt; [INFO] Waiting for 3 weblogic pods to be ready..This may take several minutes, do not close the window. Check log /scratch/OIGK8Slatest/fmw-kubernetes/OracleIdentityGovernance/kubernetes/domain-lifecycle/log/oim_patch_log-\u0026lt;DATE\u0026gt;/monitor_weblogic_pods.log for progress [SUCCESS] All servers under governancedomain are now in ready state with new image: container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-\u0026lt;October`23\u0026gt; The logs are available at $WORKDIR/kubernetes/domain-lifecycle by default. A custom log location can also be provided to the script.\nNote: If the patch domain script creation fails, refer to the Troubleshooting section.\n " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oudsm/patch-and-upgrade/upgrade-elk/", + "title": "b. Upgrade Elasticsearch and Kibana", + "tags": [], + "description": "Instructions on how to upgrade Elastic Search and Kibana.", + "content": "This section shows how to upgrade Elasticsearch and Kibana.\nTo determine if this step is required for the version you are upgrading from, refer to the Release Notes.\nDownload the latest code repository Download the latest code repository as follows:\n Create a working directory to setup the source code.\n$ mkdir \u0026lt;workdir\u0026gt; For example:\n$ mkdir /scratch/OUDSMK8SOctober23 Download the latest OUDSM deployment scripts from the OUDSM repository.\n$ cd \u0026lt;workdir\u0026gt; $ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1 For example:\n$ cd /scratch/OUDSMK8SOctober23 $ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1 Set the $WORKDIR environment variable as follows:\n$ export WORKDIR=\u0026lt;workdir\u0026gt;/fmw-kubernetes/OracleUnifiedDirectorySM For example:\n$ export WORKDIR=/scratch/OUDSMK8SOctober23/fmw-kubernetes/OracleUnifiedDirectorySM Undeploy Elasticsearch and Kibana From October 22 (22.4.1) onwards, OUDSM logs should be stored on a centralized Elasticsearch and Kibana (ELK) stack.\nDeployments prior to October 22 (22.4.1) used local deployments of Elasticsearch and Kibana.\nIf you are upgrading from July 22 (22.3.1) or earlier, to October 23 (23.4.1), you must first undeploy Elasticsearch and Kibana using the steps below:\n Navigate to the $WORKDIR/kubernetes/helm directory and create a logging-override-values-uninstall.yaml with the following:\nelk: enabled: false Run the following command to remove the existing ELK deployment:\n$ helm upgrade --namespace \u0026lt;domain_namespace\u0026gt; --values \u0026lt;valuesfile.yaml\u0026gt; \u0026lt;releasename\u0026gt; oudsm --reuse-values For example:\n$ helm upgrade --namespace oudsmns --values logging-override-values-uninstall.yaml oudsm oudsm --reuse-values Deploy Elasticsearch and Kibana in centralized stack Follow Install Elasticsearch stack and Kibana to deploy Elasticsearch and Kibana in a centralized stack. " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-k8s-ssl/", + "title": "b. Using an Ingress with NGINX (SSL)", + "tags": [], + "description": "Steps to set up an Ingress for NGINX to direct traffic to the OIG domain using SSL.", + "content": "Setting up an ingress for NGINX for the OIG domain on Kubernetes The instructions below explain how to set up NGINX as an ingress for the OIG domain with SSL termination.\nNote: All the steps below should be performed on the master node.\n Create a SSL certificate\na. Generate SSL certificate\nb. Create a Kubernetes secret for SSL\n Install NGINX\na. Configure the repository\nb. Create a namespace\nc. Install NGINX using helm\n Create an ingress for the domain\n Verify that you can access the domain URL\n Create a SSL certificate Generate SSL certificate Generate a private key and certificate signing request (CSR) using a tool of your choice. Send the CSR to your certificate authority (CA) to generate the certificate.\nIf you want to use a certificate for testing purposes you can generate a self signed certificate using openssl:\n$ mkdir \u0026lt;workdir\u0026gt;/ssl $ cd \u0026lt;workdir\u0026gt;/ssl $ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj \u0026#34;/CN=\u0026lt;nginx-hostname\u0026gt;\u0026#34; For example:\n$ mkdir /scratch/OIGK8S/ssl $ cd /scratch/OIGK8S/ssl $ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj \u0026#34;/CN=masternode.example.com\u0026#34; Note: The CN should match the host.domain of the master node in order to prevent hostname problems during certificate verification.\nThe output will look similar to the following:\nGenerating a 2048 bit RSA private key ..........................................+++ .......................................................................................................+++ writing new private key to 'tls.key' ----- Create a Kubernetes secret for SSL Create a secret for SSL containing the SSL certificate by running the following command:\n$ kubectl -n oigns create secret tls \u0026lt;domain_uid\u0026gt;-tls-cert --key \u0026lt;workdir\u0026gt;/tls.key --cert \u0026lt;workdir\u0026gt;/tls.crt For example:\n$ kubectl -n oigns create secret tls governancedomain-tls-cert --key /scratch/OIGK8S/ssl/tls.key --cert /scratch/OIGK8S/ssl/tls.crt The output will look similar to the following:\nsecret/governancedomain-tls-cert created Confirm that the secret is created by running the following command:\n$ kubectl get secret \u0026lt;domain_uid\u0026gt;-tls-cert -o yaml -n oigns For example:\n$ kubectl get secret governancedomain-tls-cert -o yaml -n oigns The output will look similar to the following:\napiVersion: v1 data: tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURGVENDQWYyZ0F3SUJBZ0lKQUl3ZjVRMWVxZnljTUEwR0NTcUdTSWIzRFFFQkN3VUFNQ0V4SHpBZEJnTlYKQkFNTUZtUmxiakF4WlhadkxuVnpMbTl5WVdOc1pTNWpiMjB3SGhjTk1qQXdPREV3TVRReE9UUXpXaGNOTWpFdwpPREV3TVRReE9UUXpXakFoTVI4d0hRWURWUVFEREJaa1pXNHdNV1YyYnk1MWN5NXZjbUZqYkdVdVkyOXRNSUlCCklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUEyY0lpVUhwcTRVZzBhaGR6aXkycHY2cHQKSVIza2s5REd2eVRNY0syaWZQQ2dtUU5CdHV6VXNFN0l4c294eldITmU5RFpXRXJTSjVON3Ym1lTzJkMVd2NQp1aFhzbkFTbnkwY1N9xVDNQSlpDVk1MK0llZVFKdnhaVjZaWWU4V2FFL1NQSGJzczRjYy9wcG1mc3pxCnErUi83cXEyMm9ueHNHaE9vQ1h1TlQvMFF2WXVzMnNucGtueWRKRHUxelhGbDREYkFIZGMvamNVK0NPWWROeS8KT3Iza2JIV0FaTkR4OWxaZUREOTRmNXZLcUF2V0FkSVJZa2UrSmpNTHg0VHo2ZlM0VXoxbzdBSTVuSApPQ1ZMblV5U0JkaGVuWTNGNEdFU0wwbnorVlhFWjRWVjRucWNjRmo5cnJ0Q29pT1BBNlgvNGdxMEZJbi9Qd0lECkFRQUJvMUF3VGpBZEJnTlZIUTRFRmdRVWw1VnVpVDBDT0xGTzcxMFBlcHRxSC9DRWZyY3dId1lEVlIwakJCZ3cKRm9BVWw1VnVpVDBDT0xGTzcxMFBlcHRxSC9DRWZyY3dEQVlEVlIwVEJBVXdBd0VCL3pBTkJna3Foa2lHOXcwQgpBUXNGQUFPQ0FRRUFXdEN4b2ZmNGgrWXZEcVVpTFFtUnpqQkVBMHJCOUMwL1FWOG9JQzJ3d1hzYi9KaVNuMHdOCjNMdHppejc0aStEbk1yQytoNFQ3enRaSkc3NVluSGRKcmxQajgzVWdDLzhYTlFCSUNDbTFUa3RlVU1jWG0reG4KTEZEMHpReFhpVzV0N1FHcWtvK2FjeTlhUnUvN3JRMXlNSE9HdVVkTTZETzErNXF4cTdFNXFMamhyNEdKejV5OAoraW8zK25UcUVKMHFQOVRocG96RXhBMW80OEY0ZHJybWdqd3ROUldEQVpBYmYyV1JNMXFKWXhxTTJqdU1FQWNsCnFMek1TdEZUQ2o1UGFTQ0NUV1VEK3ZlSWtsRWRpaFdpRm02dzk3Y1diZ0lGMlhlNGk4L2szMmF1N2xUTDEvd28KU3Q2dHpsa20yV25uUFlVMzBnRURnVTQ4OU02Z1dybklpZz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV1d0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktVd2dnU2hBZ0VBQW9JQkFRRFp3aUpRZW1yaFNEUnEKRjNPTExhbS9xbTBoSGVTVDBNYS9KTXh3cmFKODhLQ1pBMEcyN05Td1Rzakd5akhOWWMxNzBObFlTdEluazN1cApkdVo0N1ozVmEvbTZGZXljQktmTFJ4SW84NnIwSmhQYzhsa0pVd3Y0aDU1QW0vRmxYcGxoN3hab1Q5SThkdXl6Cmh4eittbVorek9xcjVIL3VxcmJhaWZHd2FFNmdKZTQxUC9SQzlpNnpheWVtU2ZKMGtPN1hOY1dYZ05zQWQxeisKTnhUNEk1aDAzTDg2dmVSc2RZQmswUEgyVmw0TVAzaC9tOHFWdW5mK1NvQzlZQjBoRmlSNzRtTXd2SGhQUHA5TApoVFBXanNBam1jYzRKVXVkVEpJRjJGNmRqY1hnWVJJdlNmUDVWY1JuaFZYaWVweHdXUDJ1dTBLaUk0OERwZi9pCkNyUVVpZjgvQWdNQkFBRUNnZjl6cnE2TUVueTFNYWFtdGM2c0laWU1QSDI5R2lSVVlwVXk5bG1sZ3BqUHh3V0sKUkRDay9Td0FmZG9yd1Q2ejNVRk1oYWJ4UU01a04vVjZFYkJlamQxT15bjdvWTVEQWJRRTR3RG9SZWlrVApONndWU0FrVC92Z1RXc1RqRlY1bXFKMCt6U2ppOWtySkZQNVNRN1F2cUswQ3BHRlNhVjY2dW8ycktiNmJWSkJYCkxPZmZPMytlS0tVazBaTnE1Q1NVQk9mbnFoNVFJSGdpaDNiMTRlNjB6bndrNWhaMHBHZE9BQm9aTkoKZ21lanUyTEdzVWxXTjBLOVdsUy9lcUllQzVzQm9jaWlocmxMVUpGWnpPRUV6LzErT2cyemhmT29yTE9rMTIrTgpjQnV0cTJWQ2I4ZFJDaFg1ZzJ0WnBrdzgzcXN5RSt3M09zYlQxa0VDZ1lFQTdxUnRLWGFONUx1SENvWlM1VWhNCm1WcnYxTEg0eGNhaDJIZnMksrMHJqQkJONGpkZkFDMmF3R3ZzU1EyR0lYRzVGYmYyK0pwL1kxbktKOEgKZU80MzNLWVgwTDE4NlNNLzFVay9HSEdTek1CWS9KdGR6WkRrbTA4UnBwaTl4bExTeDBWUWtFNVJVcnJJcTRJVwplZzBOM2RVTHZhTVl1UTBrR2dncUFETUNnWUVBNlpqWCtjU2VMZ1BVajJENWRpUGJ1TmVFd2RMeFNPZDFZMUFjCkUzQ01YTWozK2JxQ3BGUVIrTldYWWVuVmM1QiszajlSdHVnQ0YyTkNSdVdkZWowalBpL243UExIRHdCZVY0bVIKM3VQVHJmamRJbFovSFgzQ2NjVE94TmlaajU4VitFdkRHNHNHOGxtRTRieStYRExIYTJyMWxmUk9sUVRMSyswVgpyTU93eU1VQ2dZRUF1dm14WGM4NWxZRW9hU0tkU0cvQk9kMWlYSUtmc2VDZHRNT2M1elJ0UXRsSDQwS0RscE54CmxYcXBjbVc3MWpyYzk1RzVKNmE1ZG5xTE9OSFZoWW8wUEpmSXhPU052RXI2MTE5NjRBMm5sZXRHYlk0M0twUkEKaHBPRHlmdkZoSllmK29kaUJpZFUyL3ZBMCtUczNSUHJzRzBSOUVDOEZqVDNaZVhaNTF1R0xPa0NnWUFpTmU0NwplQjRxWXdrNFRsMTZmZG5xQWpaQkpLR05xY2c1V1R3alpMSkp6R3owdCtuMkl4SFd2WUZFSjdqSkNmcHFsaDlqCmlDcjJQZVV3K09QTlNUTG1JcUgydzc5L1pQQnNKWXVsZHZ4RFdGVWFlRXg1aHpkNDdmZlNRRjZNK0NHQmthYnIKVzdzU3R5V000ZFdITHpDaGZMS20yWGJBd0VqNUQrbkN1WTRrZVFLQmdFSkRHb0puM1NCRXcra2xXTE85N09aOApnc3lYQm9mUW1lRktIS2NHNzFZUFhJbTRlV1kyUi9KOCt5anc5b1FJQ3o5NlRidkdSZEN5QlJhbWhoTmFGUzVyCk9MZUc0ejVENE4zdThUc0dNem9QcU13KzBGSXJiQ3FzTnpGWTg3ekZweEdVaXZvRWZLNE82YkdERTZjNHFqNGEKNmlmK0RSRSt1TWRMWTQyYTA3ekoKLS0tLS1FTkQgUFJJVkFURSBLRVktLS0tLQo= kind: Secret metadata: creationTimestamp: \u0026quot;\u0026lt;DATE\u0026gt;\u0026quot; name: governancedomain-tls-cert namespace: oigns resourceVersion: \u0026quot;3319899\u0026quot; uid: 274cc960-281a-494c-a3e3-d93c3abd051f type: kubernetes.io/tls Install NGINX Use helm to install NGINX.\nConfigure the repository Add the Helm chart repository for installing NGINX using the following command:\n$ helm repo add stable https://kubernetes.github.io/ingress-nginx The output will look similar to the following:\n\u0026quot;stable\u0026quot; has been added to your repositories Update the repository using the following command:\n$ helm repo update The output will look similar to the following:\nHang tight while we grab the latest from your chart repositories... ...Successfully got an update from the \u0026quot;stable\u0026quot; chart repository Update Complete. Happy Helming! Create a namespace Create a Kubernetes namespace for NGINX:\n$ kubectl create namespace nginxssl The output will look similar to the following:\nnamespace/nginxssl created Install NGINX using helm If you can connect directly to the master node IP address from a browser, then install NGINX with the --set controller.service.type=NodePort parameter.\nIf you are using a Managed Service for your Kubernetes cluster, for example Oracle Kubernetes Engine (OKE) on Oracle Cloud Infrastructure (OCI), and connect from a browser to the Load Balancer IP address, then use the --set controller.service.type=LoadBalancer parameter. This instructs the Managed Service to setup a Load Balancer to direct traffic to the NGINX ingress.\n To install NGINX use the following helm command depending on if you are using NodePort or LoadBalancer:\na) Using NodePort\n$ helm install nginx-ingress -n nginxssl --set controller.extraArgs.default-ssl-certificate=oigns/governancedomain-tls-cert --set controller.service.type=NodePort --set controller.admissionWebhooks.enabled=false stable/ingress-nginx The output will look similar to the following:\n$ helm install nginx-ingress -n nginxssl --set controller.extraArgs.default-ssl-certificate=oigns/governancedomain-tls-cert --set controller.service.type=NodePort --set controller.admissionWebhooks.enabled=false stable/ingress-nginx NAME: nginx-ingress LAST DEPLOYED: \u0026lt;DATE\u0026gt; NAMESPACE: nginxssl STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: The nginx-ingress controller has been installed. Get the application URL by running these commands: export HTTP_NODE_PORT=$(kubectl --namespace nginxssl get services -o jsonpath=\u0026quot;{.spec.ports[0].nodePort}\u0026quot; nginx-ingress-controller) export HTTPS_NODE_PORT=$(kubectl --namespace nginxssl get services -o jsonpath=\u0026quot;{.spec.ports[1].nodePort}\u0026quot; nginx-ingress-controller) export NODE_IP=$(kubectl --namespace nginxssl get nodes -o jsonpath=\u0026quot;{.items[0].status.addresses[1].address}\u0026quot;) echo \u0026quot;Visit http://$NODE_IP:$HTTP_NODE_PORT to access your application via HTTP.\u0026quot; echo \u0026quot;Visit https://$NODE_IP:$HTTPS_NODE_PORT to access your application via HTTPS.\u0026quot; An example Ingress that makes use of the controller: apiVersion: networking.k8s.io/v1 kind: Ingress metadata: annotations: kubernetes.io/ingress.class: nginx name: example namespace: foo spec: ingressClassName: example-class rules: - host: www.example.com http: paths: - path: / pathType: Prefix backend: serviceName: exampleService servicePort: 80 path: / # This section is only required if TLS is to be enabled for the Ingress tls: - hosts: - www.example.com secretName: example-tls If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: apiVersion: v1 kind: Secret metadata: name: example-tls namespace: foo data: tls.crt: \u0026lt;base64 encoded cert\u0026gt; tls.key: \u0026lt;base64 encoded key\u0026gt; type: kubernetes.io/tls b) Using LoadBalancer\n$ helm install nginx-ingress -n nginxssl --set controller.extraArgs.default-ssl-certificate=oigns/governancedomain-tls-cert --set controller.service.type=LoadBalancer --set controller.admissionWebhooks.enabled=false stable/ingress-nginx The output will look similar to the following:\nNAME: nginx-ingress LAST DEPLOYED: \u0026lt;DATE\u0026gt; NAMESPACE: nginxssl STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: The ingress-nginx controller has been installed. It may take a few minutes for the LoadBalancer IP to be available. You can watch the status by running 'kubectl --namespace nginxssl get services -o wide -w nginx-ingress-ingress-nginx-controller' An example Ingress that makes use of the controller: apiVersion: networking.k8s.io/v1 kind: Ingress metadata: annotations: kubernetes.io/ingress.class: nginx name: example namespace: foo spec: rules: - host: www.example.com http: paths: - path: / pathType: Prefix backend: service: name: exampleService port: 80 # This section is only required if TLS is to be enabled for the Ingress tls: - hosts: - www.example.com secretName: example-tls If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: apiVersion: v1 kind: Secret metadata: name: example-tls namespace: foo data: tls.crt: \u0026lt;base64 encoded cert\u0026gt; tls.key: \u0026lt;base64 encoded key\u0026gt; type: kubernetes.io/tls Setup routing rules for the domain Setup routing rules by running the following commands:\n$ cd $WORKDIR/kubernetes/charts/ingress-per-domain Edit values.yaml and change the domainUID parameter to match your domainUID, for example domainUID: governancedomain. Change sslType to SSL. The file should look as follows:\n# Load balancer type. Supported values are: NGINX type: NGINX # SSL configuration Type. Supported Values are : NONSSL,SSL sslType: SSL # domainType. Supported values are: oim domainType: oim #WLS domain as backend to the load balancer wlsDomain: domainUID: governancedomain adminServerName: AdminServer adminServerPort: 7001 adminServerSSLPort: soaClusterName: soa_cluster soaManagedServerPort: 8001 soaManagedServerSSLPort: oimClusterName: oim_cluster oimManagedServerPort: 14000 oimManagedServerSSLPort: # Host specific values hostName: enabled: false admin: runtime: internal: # Ngnix specific values nginx: nginxTimeOut: 180 Create an ingress for the domain Create an Ingress for the domain (governancedomain-nginx), in the domain namespace by using the sample Helm chart:\n$ cd $WORKDIR $ helm install governancedomain-nginx kubernetes/charts/ingress-per-domain --namespace oigns --values kubernetes/charts/ingress-per-domain/values.yaml Note: The $WORKDIR/kubernetes/charts/ingress-per-domain/templates/nginx-ingress-ssl.yaml has nginx.ingress.kubernetes.io/enable-access-log set to false. If you want to enable access logs then set this value to true before executing the command. Enabling access-logs can cause issues with disk space if not regularly maintained.\nFor example:\n$ cd $WORKDIR $ helm install governancedomain-nginx kubernetes/charts/ingress-per-domain --namespace oigns --values kubernetes/charts/ingress-per-domain/values.yaml The output will look similar to the following:\nNAME: governancedomain-nginx LAST DEPLOYED: \u0026lt;DATE\u0026gt; NAMESPACE: oigns STATUS: deployed REVISION: 1 TEST SUITE: None Run the following command to show the ingress is created successfully:\n$ kubectl get ing -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl get ing -n oigns The output will look similar to the following:\nNAME CLASS HOSTS ADDRESS PORTS AGE governancedomain-nginx \u0026lt;none\u0026gt; * x.x.x.x 80 49s Find the node port of NGINX using the following command:\n$ kubectl get services -n nginxssl -o jsonpath=\u0026#34;{.spec.ports[1].nodePort}\u0026#34; nginx-ingress-ingress-nginx-controller The output will look similar to the following:\n32033 Run the following command to check the ingress:\n$ kubectl describe ing governancedomain-nginx -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl describe ing governancedomain-nginx -n oigns The output will look similar to the following:\nName: governancedomain-nginx Namespace: oigns Address: 10.111.175.104 Default backend: default-http-backend:80 (\u0026lt;error: endpoints \u0026quot;default-http-backend\u0026quot; not found\u0026gt;) Rules: Host Path Backends ---- ---- -------- * /console governancedomain-adminserver:7001 (10.244.2.50:7001) /consolehelp governancedomain-adminserver:7001 (10.244.2.50:7001) /em governancedomain-adminserver:7001 (10.244.2.50:7001) /ws_utc governancedomain-cluster-soa-cluster:8001 (10.244.2.51:8001) /soa governancedomain-cluster-soa-cluster:8001 (10.244.2.51:8001) /integration governancedomain-cluster-soa-cluster:8001 (10.244.2.51:8001) /soa-infra governancedomain-cluster-soa-cluster:8001 (10.244.2.51:8001) /identity governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) /admin governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) /oim governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) /sysadmin governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) /workflowservice governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) /callbackResponseService governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) /spml-xsd governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) /HTTPClnt governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) /reqsvc governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) /iam governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) /provisioning-callback governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) /CertificationCallbackService governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) /ucs governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) /FacadeWebApp governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) /OIGUI governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) /weblogic governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) Annotations: kubernetes.io/ingress.class: nginx meta.helm.sh/release-name: governancedomain-nginx meta.helm.sh/release-namespace: oigns nginx.ingress.kubernetes.io/affinity: cookie nginx.ingress.kubernetes.io/affinity-mode: persistent nginx.ingress.kubernetes.io/configuration-snippet: more_clear_input_headers \u0026quot;WL-Proxy-Client-IP\u0026quot; \u0026quot;WL-Proxy-SSL\u0026quot;; more_set_input_headers \u0026quot;X-Forwarded-Proto: https\u0026quot;; more_set_input_headers \u0026quot;WL-Proxy-SSL: true\u0026quot;; nginx.ingress.kubernetes.io/enable-access-log: false nginx.ingress.kubernetes.io/ingress.allow-http: false nginx.ingress.kubernetes.io/proxy-buffer-size: 2000k nginx.ingress.kubernetes.io/proxy-read-timeout: 180 nginx.ingress.kubernetes.io/proxy-send-timeout: 180 nginx.ingress.kubernetes.io/session-cookie-name: sticky Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Sync 18s (x2 over 38s) nginx-ingress-controller Scheduled for sync To confirm that the new Ingress is successfully routing to the domain\u0026rsquo;s server pods, run the following command to send a request to the URL for the WebLogic ReadyApp framework:\nNote: If using a load balancer for your ingress replace ${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT} with ${LOADBALANCER-HOSTNAME}:${LOADBALANCER-PORT}.\n$ curl -v -k https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/weblogic/ready For example:\n$ curl -v -k https://masternode.example.com:32033/weblogic/ready The output will look similar to the following:\n$ curl -v -k https://masternode.example.com:32033/weblogic/ready * About to connect() to X.X.X.X port 32033 (#0) * Trying X.X.X.X... * Connected to masternode.example.com (X.X.X.X) port 32033 (#0) * Initializing NSS with certpath: sql:/etc/pki/nssdb * skipping SSL peer certificate verification * SSL connection using TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 * Server certificate: * subject: CN=masternode.example.com * start date: \u0026lt;DATE\u0026gt; * expire date: \u0026lt;DATE\u0026gt; * common name: masternode.example.com * issuer: CN=masternode.example.com \u0026gt; GET /weblogic/ready HTTP/1.1 \u0026gt; User-Agent: curl/7.29.0 \u0026gt; Host: X.X.X.X:32033 \u0026gt; Accept: */* \u0026gt; \u0026lt; HTTP/1.1 200 OK \u0026lt; Server: nginx/1.19.1 \u0026lt; Date: \u0026lt;DATE\u0026gt; \u0026lt; Content-Length: 0 \u0026lt; Connection: keep-alive \u0026lt; Strict-Transport-Security: max-age=15724800; includeSubDomains \u0026lt; * Connection #0 to host X.X.X.X left intact Verify that you can access the domain URL After setting up the NGINX ingress, verify that the domain applications are accessible through the NGINX ingress port (for example 32033) as per Validate Domain URLs \n" +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oam/manage-oam-domains/wlst-admin-operations/", + "title": "b. WLST Administration Operations", + "tags": [], + "description": "Describes the steps for WLST administration using helper pod running in the same Kubernetes Cluster as OAM Domain.", + "content": "To use WLST to administer the OAM domain, use the helper pod in the same Kubernetes cluster as the OAM Domain.\n Check to see if the helper pod exists by running:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; | grep helper For example:\n$ kubectl get pods -n oamns | grep helper The output should look similar to the following:\nhelper 1/1 Running 0 26h If the helper pod doesn\u0026rsquo;t exist then see Step 1 in Prepare your environment to create it.\n Run the following command to start a bash shell in the helper pod:\n$ kubectl exec -it helper -n \u0026lt;domain_namespace\u0026gt; -- /bin/bash For example:\n$ kubectl exec -it helper -n oamns -- /bin/bash This will take you into a bash shell in the running helper pod:\n[oracle@helper ~]$ Connect to WLST using the following command:\n$ cd $ORACLE_HOME/oracle_common/common/bin $ ./wlst.sh The output will look similar to the following:\nInitializing WebLogic Scripting Tool (WLST) ... Jython scans all the jar files it can find at first startup. Depending on the system, this process may take a few minutes to complete, and WLST may not return a prompt right away. Welcome to WebLogic Server Administration Scripting Shell Type help() for help on available commands wls:/offline\u0026gt; To access t3 for the Administration Server connect as follows:\nwls:/offline\u0026gt; connect(\u0026#39;weblogic\u0026#39;,\u0026#39;\u0026lt;password\u0026gt;\u0026#39;,\u0026#39;t3://accessdomain-adminserver:7001\u0026#39;) The output will look similar to the following:\nConnecting to t3://accessdomain-adminserver:7001 with userid weblogic ... Successfully connected to Admin Server \u0026quot;AdminServer\u0026quot; that belongs to domain \u0026quot;accessdomain\u0026quot;. Warning: An insecure protocol was used to connect to the server. To ensure on-the-wire security, the SSL port or Admin port should be used instead. wls:/accessdomain/serverConfig/\u0026gt; Or to access t3 for the OAM Cluster service, connect as follows:\nconnect(\u0026#39;weblogic\u0026#39;,\u0026#39;\u0026lt;password\u0026gt;\u0026#39;,\u0026#39;t3://accessdomain-cluster-oam-cluster:14100\u0026#39;) The output will look similar to the following:\nConnecting to t3://accessdomain-cluster-oam-cluster:14100 with userid weblogic ... Successfully connected to managed Server \u0026quot;oam_server1\u0026quot; that belongs to domain \u0026quot;accessdomain\u0026quot;. Warning: An insecure protocol was used to connect to the server. To ensure on-the-wire security, the SSL port or Admin port should be used instead. wls:/accessdomain/serverConfig/\u0026gt; Sample operations For a full list of WLST operations refer to WebLogic Server WLST Online and Offline Command Reference.\nDisplay servers wls:/accessdomain/serverConfig/\u0026gt; cd(\u0026#39;/Servers\u0026#39;) wls:/accessdomain/serverConfig/Servers\u0026gt; ls() dr-- AdminServer dr-- oam_policy_mgr1 dr-- oam_policy_mgr2 dr-- oam_policy_mgr3 dr-- oam_policy_mgr4 dr-- oam_policy_mgr5 dr-- oam_server1 dr-- oam_server2 dr-- oam_server3 dr-- oam_server4 dr-- oam_server5 wls:/accessdomain/serverConfig/Servers\u0026gt; Configure logging for managed servers Connect to the Administration Server and run the following:\nwls:/accessdomain/serverConfig/\u0026gt; domainRuntime() Location changed to domainRuntime tree. This is a read-only tree with DomainMBean as the root MBean. For more help, use help(\u0026#39;domainRuntime\u0026#39;) wls:/accessdomain/domainRuntime/\u0026gt; wls:/accessdomain/domainRuntime/\u0026gt; listLoggers(pattern=\u0026#34;oracle.oam.*\u0026#34;,target=\u0026#34;oam_server1\u0026#34;) ------------------------------------------+----------------- Logger | Level ------------------------------------------+----------------- oracle.oam | \u0026lt;Inherited\u0026gt; oracle.oam.admin.foundation.configuration | \u0026lt;Inherited\u0026gt; oracle.oam.admin.service.config | \u0026lt;Inherited\u0026gt; oracle.oam.agent | \u0026lt;Inherited\u0026gt; oracle.oam.agent-default | \u0026lt;Inherited\u0026gt; oracle.oam.audit | \u0026lt;Inherited\u0026gt; oracle.oam.binding | \u0026lt;Inherited\u0026gt; oracle.oam.certvalidation | \u0026lt;Inherited\u0026gt; oracle.oam.certvalidation.mbeans | \u0026lt;Inherited\u0026gt; oracle.oam.common.healthcheck | \u0026lt;Inherited\u0026gt; oracle.oam.common.runtimeent | \u0026lt;Inherited\u0026gt; oracle.oam.commonutil | \u0026lt;Inherited\u0026gt; oracle.oam.config | \u0026lt;Inherited\u0026gt; oracle.oam.controller | \u0026lt;Inherited\u0026gt; oracle.oam.default | \u0026lt;Inherited\u0026gt; oracle.oam.diagnostic | \u0026lt;Inherited\u0026gt; oracle.oam.engine.authn | \u0026lt;Inherited\u0026gt; oracle.oam.engine.authz | \u0026lt;Inherited\u0026gt; oracle.oam.engine.policy | \u0026lt;Inherited\u0026gt; oracle.oam.engine.ptmetadata | \u0026lt;Inherited\u0026gt; oracle.oam.engine.session | \u0026lt;Inherited\u0026gt; oracle.oam.engine.sso | \u0026lt;Inherited\u0026gt; oracle.oam.esso | \u0026lt;Inherited\u0026gt; oracle.oam.extensibility.lifecycle | \u0026lt;Inherited\u0026gt; oracle.oam.foundation.access | \u0026lt;Inherited\u0026gt; oracle.oam.idm | \u0026lt;Inherited\u0026gt; oracle.oam.install | \u0026lt;Inherited\u0026gt; oracle.oam.install.bootstrap | \u0026lt;Inherited\u0026gt; oracle.oam.install.mbeans | \u0026lt;Inherited\u0026gt; oracle.oam.ipf.rest.api | \u0026lt;Inherited\u0026gt; oracle.oam.oauth | \u0026lt;Inherited\u0026gt; oracle.oam.plugin | \u0026lt;Inherited\u0026gt; oracle.oam.proxy.oam | \u0026lt;Inherited\u0026gt; oracle.oam.proxy.oam.workmanager | \u0026lt;Inherited\u0026gt; oracle.oam.proxy.opensso | \u0026lt;Inherited\u0026gt; oracle.oam.pswd.service.provider | \u0026lt;Inherited\u0026gt; oracle.oam.replication | \u0026lt;Inherited\u0026gt; oracle.oam.user.identity.provider | \u0026lt;Inherited\u0026gt; wls:/accessdomain/domainRuntime/\u0026gt; Set the log level to TRACE:32:\nwls:/accessdomain/domainRuntime/\u0026gt; setLogLevel(target=\u0026#39;oam_server1\u0026#39;,logger=\u0026#39;oracle.oam\u0026#39;,level=\u0026#39;TRACE:32\u0026#39;,persist=\u0026#34;1\u0026#34;,addLogger=1) wls:/accessdomain/domainRuntime/\u0026gt; wls:/accessdomain/domainRuntime/\u0026gt; listLoggers(pattern=\u0026#34;oracle.oam.*\u0026#34;,target=\u0026#34;oam_server1\u0026#34;) ------------------------------------------+----------------- Logger | Level ------------------------------------------+----------------- oracle.oam | TRACE:32 oracle.oam.admin.foundation.configuration | \u0026lt;Inherited\u0026gt; oracle.oam.admin.service.config | \u0026lt;Inherited\u0026gt; oracle.oam.agent | \u0026lt;Inherited\u0026gt; oracle.oam.agent-default | \u0026lt;Inherited\u0026gt; oracle.oam.audit | \u0026lt;Inherited\u0026gt; oracle.oam.binding | \u0026lt;Inherited\u0026gt; oracle.oam.certvalidation | \u0026lt;Inherited\u0026gt; oracle.oam.certvalidation.mbeans | \u0026lt;Inherited\u0026gt; oracle.oam.common.healthcheck | \u0026lt;Inherited\u0026gt; oracle.oam.common.runtimeent | \u0026lt;Inherited\u0026gt; oracle.oam.commonutil | \u0026lt;Inherited\u0026gt; oracle.oam.config | \u0026lt;Inherited\u0026gt; oracle.oam.controller | \u0026lt;Inherited\u0026gt; oracle.oam.default | \u0026lt;Inherited\u0026gt; oracle.oam.diagnostic | \u0026lt;Inherited\u0026gt; oracle.oam.engine.authn | \u0026lt;Inherited\u0026gt; oracle.oam.engine.authz | \u0026lt;Inherited\u0026gt; oracle.oam.engine.policy | \u0026lt;Inherited\u0026gt; oracle.oam.engine.ptmetadata | \u0026lt;Inherited\u0026gt; oracle.oam.engine.session | \u0026lt;Inherited\u0026gt; oracle.oam.engine.sso | \u0026lt;Inherited\u0026gt; oracle.oam.esso | \u0026lt;Inherited\u0026gt; oracle.oam.extensibility.lifecycle | \u0026lt;Inherited\u0026gt; oracle.oam.foundation.access | \u0026lt;Inherited\u0026gt; oracle.oam.idm | \u0026lt;Inherited\u0026gt; oracle.oam.install | \u0026lt;Inherited\u0026gt; oracle.oam.install.bootstrap | \u0026lt;Inherited\u0026gt; oracle.oam.install.mbeans | \u0026lt;Inherited\u0026gt; oracle.oam.ipf.rest.api | \u0026lt;Inherited\u0026gt; oracle.oam.oauth | \u0026lt;Inherited\u0026gt; oracle.oam.plugin | \u0026lt;Inherited\u0026gt; oracle.oam.proxy.oam | \u0026lt;Inherited\u0026gt; oracle.oam.proxy.oam.workmanager | \u0026lt;Inherited\u0026gt; oracle.oam.proxy.opensso | \u0026lt;Inherited\u0026gt; oracle.oam.pswd.service.provider | \u0026lt;Inherited\u0026gt; oracle.oam.replication | \u0026lt;Inherited\u0026gt; oracle.oam.user.identity.provider | \u0026lt;Inherited\u0026gt; wls:/accessdomain/domainRuntime/\u0026gt; Verify that TRACE:32 log level is set by connecting to the Administration Server and viewing the logs:\n$ kubectl exec -it accessdomain-adminserver -n oamns -- /bin/bash [oracle@accessdomain-adminserver oracle]$ [oracle@accessdomain-adminserver oracle]$ cd /u01/oracle/user_projects/domains/accessdomain/servers/oam_server1/logs [oracle@accessdomain-adminserver logs]$ tail oam_server1-diagnostic.log [\u0026lt;DATE\u0026gt;] [oam_server1] [TRACE:32] [] [oracle.oam.config] [tid: Configuration Store Observer] [userId: \u0026lt;anonymous\u0026gt;] [ecid: 8b3ac37b-c7cf-46dd-aeee-5ed67886be21-0000000b,0:1795] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.admin.config.util.observable.ObservableConfigStore$StoreWatcher] [SRC_METHOD: run] Start of run before start of detection at 1,635,848,774,793. Detector: oracle.security.am.admin.config.util.observable.DbStoreChangeDetector:Database configuration store:DSN:jdbc/oamds. Monitor: { StoreMonitor: { disabled: \u0026#39;false\u0026#39; } } [\u0026lt;DATE\u0026gt;] [oam_server1] [TRACE] [] [oracle.oam.config] [tid: Configuration Store Observer] [userId: \u0026lt;anonymous\u0026gt;] [ecid: 8b3ac37b-c7cf-46dd-aeee-5ed67886be21-0000000b,0:1795] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.admin.config.util.store.StoreUtil] [SRC_METHOD: getContainerProperty] Configuration property CONFIG_HISTORY not specified [\u0026lt;DATE\u0026gt;] [oam_server1] [TRACE] [] [oracle.oam.config] [tid: Configuration Store Observer] [userId: \u0026lt;anonymous\u0026gt;] [ecid: 8b3ac37b-c7cf-46dd-aeee-5ed67886be21-0000000b,0:1795] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.admin.config.util.store.StoreUtil] [SRC_METHOD: getContainerProperty] Configuration property CONFIG not specified [\u0026lt;DATE\u0026gt;] [oam_server1] [TRACE:32] [] [oracle.oam.config] [tid: Configuration Store Observer] [userId: \u0026lt;anonymous\u0026gt;] [ecid: 8b3ac37b-c7cf-46dd-aeee-5ed67886be21-0000000b,0:1795] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.admin.config.util.store.DbStore] [SRC_METHOD: getSelectSQL] SELECT SQL:SELECT version from IDM_OBJECT_STORE where id = ? and version = (select max(version) from IDM_OBJECT_STORE where id = ?) [\u0026lt;DATE\u0026gt;] [oam_server1] [TRACE] [] [oracle.oam.config] [tid: Configuration Store Observer] [userId: \u0026lt;anonymous\u0026gt;] [ecid: 8b3ac37b-c7cf-46dd-aeee-5ed67886be21-0000000b,0:1795] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.admin.config.util.store.DbStore] [SRC_METHOD: load] Time (ms) to load key CONFIG:-1{FIELD_TYPES=INT, SELECT_FIELDS=SELECT version from IDM_OBJECT_STORE }:4 Performing WLST Administration via SSL By default the SSL port is not enabled for the Administration Server or OAM Managed Servers. To configure the SSL port for the Administration Server and Managed Servers login to WebLogic Administration console https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console and navigate to Lock \u0026amp; Edit -\u0026gt; Environment -\u0026gt;Servers -\u0026gt; server_name -\u0026gt;Configuration -\u0026gt; General -\u0026gt; SSL Listen Port Enabled -\u0026gt; Provide SSL Port ( For Administration Server: 7002 and for OAM Managed Server (oam_server1): 14101) - \u0026gt; Save -\u0026gt; Activate Changes.\nNote: If configuring the OAM Managed Servers for SSL you must enable SSL on the same port for all servers (oam_server1 through oam_server5)\n Create a myscripts directory as follows:\n$ cd $WORKDIR/kubernetes/ $ mkdir myscripts $ cd myscripts For example:\n$ cd $WORKDIR/kubernetes/ $ mkdir myscripts $ cd myscripts Create a sample yaml template file in the myscripts directory called \u0026lt;domain_uid\u0026gt;-adminserver-ssl.yaml to create a Kubernetes service for the Administration Server:\nNote: Update the domainName, domainUID and namespace based on your environment. For example:\napiVersion: v1 kind: Service metadata: labels: serviceType: SERVER weblogic.domainName: accessdomain weblogic.domainUID: accessdomain weblogic.resourceVersion: domain-v2 weblogic.serverName: AdminServer name: accessdomain-adminserverssl namespace: oamns spec: clusterIP: None ports: - name: default port: 7002 protocol: TCP targetPort: 7002 selector: weblogic.createdByOperator: \u0026quot;true\u0026quot; weblogic.domainUID: accessdomain weblogic.serverName: AdminServer type: ClusterIP and the following sample yaml template file \u0026lt;domain_uid\u0026gt;-oamcluster-ssl.yaml for the OAM Managed Server:\napiVersion: v1 kind: Service metadata: labels: serviceType: SERVER weblogic.domainName: accessdomain weblogic.domainUID: accessdomain weblogic.resourceVersion: domain-v2 name: accessdomain-oamcluster-ssl namespace: oamns spec: clusterIP: None ports: - name: default port: 14101 protocol: TCP targetPort: 14101 selector: weblogic.clusterName: oam_cluster weblogic.createdByOperator: \u0026quot;true\u0026quot; weblogic.domainUID: accessdomain type: ClusterIP Apply the template using the following command for the AdminServer:\n$ kubectl apply -f \u0026lt;domain_uid\u0026gt;-adminserver-ssl.yaml For example:\n$ kubectl apply -f accessdomain-adminserver-ssl.yaml service/accessdomain-adminserverssl created and using the following command for the OAM Managed Server:\n$ kubectl apply -f \u0026lt;domain_uid\u0026gt;-oamcluster-ssl.yaml For example:\n$ kubectl apply -f accessdomain-oamcluster-ssl.yaml service/accessdomain-oamcluster-ssl created Validate that the Kubernetes Services to access SSL ports are created successfully:\n$ kubectl get svc -n \u0026lt;domain_namespace\u0026gt; |grep ssl For example:\n$ kubectl get svc -n oamns |grep ssl The output will look similar to the following:\naccessdomain-adminserverssl ClusterIP None \u0026lt;none\u0026gt; 7002/TCP 102s accessdomain-oamcluster-ssl ClusterIP None \u0026lt;none\u0026gt; 14101/TCP 35s Inside the bash shell of the running helper pod, run the following:\n[oracle@helper bin]$ export WLST_PROPERTIES=\u0026#34;-Dweblogic.security.SSL.ignoreHostnameVerification=true -Dweblogic.security.TrustKeyStore=DemoTrust\u0026#34; [oracle@helper bin]$ cd /u01/oracle/oracle_common/common/bin [oracle@helper bin]$ ./wlst.sh Initializing WebLogic Scripting Tool (WLST) ... Welcome to WebLogic Server Administration Scripting Shell Type help() for help on available commands wls:/offline\u0026gt; To connect to the Administration Server t3s service:\nwls:/offline\u0026gt; connect(\u0026#39;weblogic\u0026#39;,\u0026#39;\u0026lt;password\u0026gt;\u0026#39;,\u0026#39;t3s://accessdomain-adminserverssl:7002\u0026#39;) Connecting to t3s://accessdomain-adminserverssl:7002 with userid weblogic ... \u0026lt;\u0026lt;DATE\u0026gt;\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;Security\u0026gt; \u0026lt;BEA-090905\u0026gt; \u0026lt;Disabling the CryptoJ JCE Provider self-integrity check for better startup performance. To enable this check, specify -Dweblogic.security.allowCryptoJDefaultJCEVerification=true.\u0026gt; \u0026lt;\u0026lt;DATE\u0026gt;\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;Security\u0026gt; \u0026lt;BEA-090906\u0026gt; \u0026lt;Changing the default Random Number Generator in RSA CryptoJ from ECDRBG128 to HMACDRBG. To disable this change, specify -Dweblogic.security.allowCryptoJDefaultPRNG=true.\u0026gt; \u0026lt;\u0026lt;DATE\u0026gt;\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;Security\u0026gt; \u0026lt;BEA-090909\u0026gt; \u0026lt;Using the configured custom SSL Hostname Verifier implementation: weblogic.security.utils.SSLWLSHostnameVerifier$NullHostnameVerifier.\u0026gt; Successfully connected to Admin Server \u0026#34;AdminServer\u0026#34; that belongs to domain \u0026#34;accessdomain\u0026#34;. wls:/accessdomain/serverConfig/\u0026gt; To connect to the OAM Managed Server t3s service:\nwls:/offline\u0026gt; connect(\u0026#39;weblogic\u0026#39;,\u0026#39;\u0026lt;password\u0026gt;\u0026#39;,\u0026#39;t3s://accessdomain-oamcluster-ssl:14101\u0026#39;) Connecting to t3s://accessdomain-oamcluster-ssl:14101 with userid weblogic ... \u0026lt;\u0026lt;DATE\u0026gt;\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;Security\u0026gt; \u0026lt;BEA-090905\u0026gt; \u0026lt;Disabling the CryptoJ JCE Provider self-integrity check for better startup performance. To enable this check, specify -Dweblogic.security.allowCryptoJDefaultJCEVerification=true.\u0026gt; \u0026lt;\u0026lt;DATE\u0026gt;\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;Security\u0026gt; \u0026lt;BEA-090906\u0026gt; \u0026lt;Changing the default Random Number Generator in RSA CryptoJ from ECDRBG128 to HMACDRBG. To disable this change, specify -Dweblogic.security.allowCryptoJDefaultPRNG=true.\u0026gt; \u0026lt;\u0026lt;DATE\u0026gt;\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;Security\u0026gt; \u0026lt;BEA-090909\u0026gt; \u0026lt;Using the configured custom SSL Hostname Verifier implementation: weblogic.security.utils.SSLWLSHostnameVerifier$NullHostnameVerifier.\u0026gt; Successfully connected to managed Server \u0026#34;oam_server1\u0026#34; that belongs to domain \u0026#34;accessdomain\u0026#34;. " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oig/manage-oig-domains/wlst-admin-operations/", + "title": "b. WLST administration operations", + "tags": [], + "description": "Describes the steps for WLST administration using helper pod running in the same Kubernetes Cluster as OIG Domain.", + "content": "Invoke WLST and access Administration Server To use WLST to administer the OIG domain, use a helper pod in the same Kubernetes cluster as the OIG Domain.\n Check to see if the helper pod exists by running:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; | grep helper For example:\n$ kubectl get pods -n oigns | grep helper The output should look similar to the following:\nhelper 1/1 Running 0 26h If the helper pod doesn\u0026rsquo;t exist then see Step 1 in Prepare your environment to create it.\n Run the following command to start a bash shell in the helper pod:\n$ kubectl exec -it helper -n \u0026lt;domain_namespace\u0026gt; -- /bin/bash For example:\n$ kubectl exec -it helper -n oigns -- /bin/bash This will take you into a bash shell in the running helper pod:\n[oracle@helper ~]$ Connect to WLST using the following commands:\n[oracle@helper ~]$ cd $ORACLE_HOME/oracle_common/common/bin [oracle@helper ~]$ ./wlst.sh The output will look similar to the following:\nInitializing WebLogic Scripting Tool (WLST) ... Jython scans all the jar files it can find at first startup. Depending on the system, this process may take a few minutes to complete, and WLST may not return a prompt right away. Welcome to WebLogic Server Administration Scripting Shell Type help() for help on available commands wls:/offline\u0026gt; To access t3 for the Administration Server connect as follows:\nwls:/offline\u0026gt; connect(\u0026#39;weblogic\u0026#39;,\u0026#39;\u0026lt;password\u0026gt;\u0026#39;,\u0026#39;t3://governancedomain-adminserver:7001\u0026#39;) The output will look similar to the following:\nConnecting to t3://governancedomain-adminserver:7001 with userid weblogic ... Successfully connected to Admin Server \u0026quot;AdminServer\u0026quot; that belongs to domain \u0026quot;governancedomain\u0026quot;. Warning: An insecure protocol was used to connect to the server. To ensure on-the-wire security, the SSL port or Admin port should be used instead. wls:/governancedomain/serverConfig/\u0026gt; Or to access t3 for the OIG Cluster service, connect as follows:\nwls:/offline\u0026gt; connect(\u0026#39;weblogic\u0026#39;,\u0026#39;\u0026lt;password\u0026gt;\u0026#39;,\u0026#39;t3://governancedomain-cluster-oim-cluster:14000\u0026#39;) The output will look similar to the following:\nConnecting to t3://governancedomain-cluster-oim-cluster:14000 with userid weblogic ... Successfully connected to managed Server \u0026quot;oim_server1\u0026quot; that belongs to domain \u0026quot;governancedomain\u0026quot;. Warning: An insecure protocol was used to connect to the server. To ensure on-the-wire security, the SSL port or Admin port should be used instead. wls:/governancedomain/serverConfig/\u0026gt; Sample operations For a full list of WLST operations refer to WebLogic Server WLST Online and Offline Command Reference.\nDisplay servers wls:/governancedomain/serverConfig/\u0026gt; cd('/Servers') wls:/governancedomain/serverConfig/Servers\u0026gt; ls () dr-- AdminServer dr-- oim_server1 dr-- oim_server2 dr-- oim_server3 dr-- oim_server4 dr-- oim_server5 dr-- soa_server1 dr-- soa_server2 dr-- soa_server3 dr-- soa_server4 dr-- soa_server5 wls:/governancedomain/serverConfig/Servers\u0026gt; Performing WLST administration via SSL By default the SSL port is not enabled for the Administration Server or OIG Managed Servers. To configure the SSL port for the Administration Server and Managed Servers login to WebLogic Administration console https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console and navigate to Lock \u0026amp; Edit -\u0026gt; Environment -\u0026gt;Servers -\u0026gt; server_name -\u0026gt;Configuration -\u0026gt; General -\u0026gt; SSL Listen Port Enabled -\u0026gt; Provide SSL Port ( For Administration Server: 7002 and for OIG Managed Server (oim_server1): 14101) - \u0026gt; Save -\u0026gt; Activate Changes.\nNote: If configuring the OIG Managed Servers for SSL you must enable SSL on the same port for all servers (oim_server1 through oim_server4)\n Create a myscripts directory as follows:\n$ cd $WORKDIR/kubernetes $ mkdir myscripts $ cd myscripts Create a sample yaml template file in the myscripts directory called \u0026lt;domain_uid\u0026gt;-adminserver-ssl.yaml to create a Kubernetes service for the Administration Server:\nNote: Update the domainName, domainUID and namespace based on your environment.\napiVersion: v1 kind: Service metadata: labels: serviceType: SERVER weblogic.domainName: governancedomain weblogic.domainUID: governancedomain weblogic.resourceVersion: domain-v2 weblogic.serverName: AdminServer name: governancedomain-adminserver-ssl namespace: oigns spec: clusterIP: None ports: - name: default port: 7002 protocol: TCP targetPort: 7002 selector: weblogic.createdByOperator: \u0026quot;true\u0026quot; weblogic.domainUID: governancedomain weblogic.serverName: AdminServer type: ClusterIP and create the following sample yaml template file \u0026lt;domain_uid\u0026gt;-oim-cluster-ssl.yaml for the OIG Managed Server:\napiVersion: v1 kind: Service metadata: labels: serviceType: SERVER weblogic.domainName: governancedomain weblogic.domainUID: governancedomain weblogic.resourceVersion: domain-v2 name: governancedomain-cluster-oim-cluster-ssl namespace: oigns spec: clusterIP: None ports: - name: default port: 14101 protocol: TCP targetPort: 14101 selector: weblogic.clusterName: oim_cluster weblogic.createdByOperator: \u0026quot;true\u0026quot; weblogic.domainUID: governancedomain type: ClusterIP Apply the template using the following command for the Administration Server:\n$ kubectl apply -f governancedomain-adminserver-ssl.yaml service/governancedomain-adminserver-ssl created or using the following command for the OIG Managed Server:\n$ kubectl apply -f governancedomain-oim-cluster-ssl.yaml service/governancedomain-cluster-oim-cluster-ssl created Validate that the Kubernetes Services to access SSL ports are created successfully:\n$ kubectl get svc -n \u0026lt;domain_namespace\u0026gt; |grep ssl For example:\n$ kubectl get svc -n oigns |grep ssl The output will look similar to the following:\ngovernancedomain-adminserver-ssl ClusterIP None \u0026lt;none\u0026gt; 7002/TCP 74s governancedomain-cluster-oim-cluster-ssl ClusterIP None \u0026lt;none\u0026gt; 14101/TCP 21s Connect to a bash shell of the helper pod:\n$ kubectl exec -it helper -n oigns -- /bin/bash In the bash shell run the following:\n[oracle@helper bin]$ export WLST_PROPERTIES=\u0026#34;-Dweblogic.security.SSL.ignoreHostnameVerification=true -Dweblogic.security.TrustKeyStore=DemoTrust\u0026#34; [oracle@helper bin]$ cd /u01/oracle/oracle_common/common/bin [oracle@helper bin]$ ./wlst.sh Initializing WebLogic Scripting Tool (WLST) ... Welcome to WebLogic Server Administration Scripting Shell Type help() for help on available commands wls:/offline\u0026gt; Connect to the Administration Server t3s service:\nwls:/offline\u0026gt; connect(\u0026#39;weblogic\u0026#39;,\u0026#39;\u0026lt;password\u0026gt;\u0026#39;,\u0026#39;t3s://governancedomain-adminserver-ssl:7002\u0026#39;) Connecting to t3s://governancedomain-adminserver-ssl:7002 with userid weblogic ... \u0026lt;DATE\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;Security\u0026gt; \u0026lt;BEA-090905\u0026gt; \u0026lt;Disabling the CryptoJ JCE Provider self-integrity check for better startup performance. To enable this check, specify -Dweblogic.security.allowCryptoJDefaultJCEVerification=true.\u0026gt; \u0026lt;DATE\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;Security\u0026gt; \u0026lt;BEA-090906\u0026gt; \u0026lt;Changing the default Random Number Generator in RSA CryptoJ from ECDRBG128 to HMACDRBG. To disable this change, specify -Dweblogic.security.allowCryptoJDefaultPRNG=true.\u0026gt; \u0026lt;DATE\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;Security\u0026gt; \u0026lt;BEA-090909\u0026gt; \u0026lt;Using the configured custom SSL Hostname Verifier implementation: weblogic.security.utils.SSLWLSHostnameVerifier$NullHostnameVerifier.\u0026gt; Successfully connected to Admin Server \u0026#34;AdminServer\u0026#34; that belongs to domain \u0026#34;governancedomain\u0026#34;. wls:/governancedomain/serverConfig/\u0026gt; To connect to the OIG Managed Server t3s service:\nwls:/offline\u0026gt; connect(\u0026#39;weblogic\u0026#39;,\u0026#39;\u0026lt;password\u0026gt;\u0026#39;,\u0026#39;t3s://governancedomain-cluster-oim-cluster-ssl:14101\u0026#39;) Connecting to t3s://governancedomain-cluster-oim-cluster-ssl:14101 with userid weblogic ... \u0026lt;DATE\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;Security\u0026gt; \u0026lt;BEA-090905\u0026gt; \u0026lt;Disabling the CryptoJ JCE Provider self-integrity check for better startup performance. To enable this check, specify -Dweblogic.security.allowCryptoJDefaultJCEVerification=true.\u0026gt; \u0026lt;DATE\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;Security\u0026gt; \u0026lt;BEA-090906\u0026gt; \u0026lt;Changing the default Random Number Generator in RSA CryptoJ from ECDRBG128 to HMACDRBG. To disable this change, specify -Dweblogic.security.allowCryptoJDefaultPRNG=true.\u0026gt; \u0026lt;DATE\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;Security\u0026gt; \u0026lt;BEA-090909\u0026gt; \u0026lt;Using the configured custom SSL Hostname Verifier implementation: weblogic.security.utils.SSLWLSHostnameVerifier$NullHostnameVerifier.\u0026gt; Successfully connected to managed Server \u0026#34;oim_server1\u0026#34; that belongs to domain \u0026#34;governancedomain\u0026#34;. wls:/governancedomain/serverConfig/\u0026gt; " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oud/manage-oud-containers/monitoring-oud-instance/", + "title": "c) Monitoring an Oracle Unified Directory Instance", + "tags": [], + "description": "Describes the steps for Monitoring the Oracle Unified Directory environment.", + "content": " Introduction Install Prometheus and Grafana Create a Kubernetes namespace Add Prometheus and Grafana Helm repositories Install the Prometheus operator View Prometheus and Grafana objects created Add the NodePort Verify using Grafana GUI Introduction After the Oracle Unified Directory instance (OUD) is set up you can monitor it using Prometheus and Grafana.\nInstall Prometheus and Grafana Create a Kubernetes namespace Create a Kubernetes namespace to provide a scope for Prometheus and Grafana objects such as pods and services that you create in the environment. To create your namespace issue the following command:\n$ kubectl create namespace \u0026lt;namespace\u0026gt; For example:\n$ kubectl create namespace monitoring The output will look similar to the following:\nnamespace/monitoring created Add Prometheus and Grafana Helm repositories Add the Prometheus and Grafana Helm repositories by issuing the following command:\n$ helm repo add prometheus https://prometheus-community.github.io/helm-charts The output will look similar to the following:\n\u0026#34;prometheus\u0026#34; has been added to your repositories Run the following command to update the repositories:\n$ helm repo update The output will look similar to the following:\nHang tight while we grab the latest from your chart repositories... ...Successfully got an update from the \u0026#34;stable\u0026#34; chart repository ...Successfully got an update from the \u0026#34;prometheus\u0026#34; chart repository ...Successfully got an update from the \u0026#34;prometheus-community\u0026#34; chart repository Update Complete. Happy Helming! Install the Prometheus operator Install the Prometheus operator using the helm command:\n$ helm install \u0026lt;release_name\u0026gt; prometheus/kube-prometheus-stack -n \u0026lt;namespace\u0026gt; For example:\n$ helm install monitoring prometheus/kube-prometheus-stack -n monitoring The output should look similar to the following:\nNAME: monitoring LAST DEPLOYED: \u0026lt;DATE\u0026gt; NAMESPACE: monitoring STATUS: deployed REVISION: 1 NOTES: kube-prometheus-stack has been installed. Check its status by running: kubectl --namespace monitoring get pods -l \u0026#34;release=monitoring\u0026#34; Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create \u0026amp; configure Alertmanager and Prometheus instances using the Operator. Note: If your cluster does not have access to the internet to pull external images, such as prometheus or grafana, you must load the images in a local container registry. You must then install as follows:\nhelm install --set grafana.image.repository==container-registry.example.com/grafana --set grafana.image.tag=8.4.2 monitoring prometheus/kube-prometheus-stack -n monitoring View Prometheus and Grafana Objects created View the objects created for Prometheus and Grafana by issuing the following command:\n$ kubectl get all,service,pod -o wide -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl get all,service,pod -o wide -n monitoring The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/alertmanager-monitoring-kube-prometheus-alertmanager-0 2/2 Running 0 36s 10.244.1.78 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/monitoring-grafana-578f79599c-qc9gd 3/3 Running 0 47s 10.244.2.200 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/monitoring-kube-prometheus-operator-65cdf7995-kndgg 1/1 Running 0 47s 10.244.2.199 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/monitoring-kube-state-metrics-56bfd4f44f-85l4p 1/1 Running 0 47s 10.244.1.76 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/monitoring-prometheus-node-exporter-g2x9g 1/1 Running 0 47s 100.102.48.121 \u0026lt;master-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/monitoring-prometheus-node-exporter-p9kkq 1/1 Running 0 47s 100.102.48.84 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/monitoring-prometheus-node-exporter-rzhrd 1/1 Running 0 47s 100.102.48.28 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/prometheus-monitoring-kube-prometheus-prometheus-0 2/2 Running 0 35s 10.244.1.79 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR service/alertmanager-operated ClusterIP None \u0026lt;none\u0026gt; 9093/TCP,9094/TCP,9094/UDP 36s app.kubernetes.io/name=alertmanager service/monitoring-grafana ClusterIP 10.110.193.30 \u0026lt;none\u0026gt; 80/TCP 47s app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=grafana service/monitoring-kube-prometheus-alertmanager ClusterIP 10.104.2.37 \u0026lt;none\u0026gt; 9093/TCP 47s alertmanager=monitoring-kube-prometheus-alertmanager,app.kubernetes.io/name=alertmanager service/monitoring-kube-prometheus-operator ClusterIP 10.99.162.229 \u0026lt;none\u0026gt; 443/TCP 47s app=kube-prometheus-stack-operator,release=monitoring service/monitoring-kube-prometheus-prometheus ClusterIP 10.108.161.46 \u0026lt;none\u0026gt; 9090/TCP 47s app.kubernetes.io/name=prometheus,prometheus=monitoring-kube-prometheus-prometheus service/monitoring-kube-state-metrics ClusterIP 10.111.162.185 \u0026lt;none\u0026gt; 8080/TCP 47s app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=kube-state-metrics service/monitoring-prometheus-node-exporter ClusterIP 10.109.21.136 \u0026lt;none\u0026gt; 9100/TCP 47s app=prometheus-node-exporter,release=monitoring service/prometheus-operated ClusterIP None \u0026lt;none\u0026gt; 9090/TCP 35s app.kubernetes.io/name=prometheus NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE CONTAINERS IMAGES SELECTOR daemonset.apps/monitoring-prometheus-node-exporter 3 3 3 3 3 \u0026lt;none\u0026gt; 47s node-exporter quay.io/prometheus/node-exporter:v1.3.1 app=prometheus-node-exporter,release=monitoring NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR deployment.apps/monitoring-grafana 1/1 1 1 47s grafana-sc-dashboard,grafana-sc-datasources,grafana quay.io/kiwigrid/k8s-sidecar:1.15.6,quay.io/kiwigrid/k8s-sidecar:1.15.6,grafana/grafana:8.4.2 app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=grafana deployment.apps/monitoring-kube-prometheus-operator 1/1 1 1 47s kube-prometheus-stack quay.io/prometheus-operator/prometheus-operator:v0.55.0 app=kube-prometheus-stack-operator,release=monitoring deployment.apps/monitoring-kube-state-metrics 1/1 1 1 47s kube-state-metrics k8s.gcr.io/kube-state-metrics/kube-state-metrics:v2.4.1 app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=kube-state-metrics NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR replicaset.apps/monitoring-grafana-578f79599c 1 1 1 47s grafana-sc-dashboard,grafana-sc-datasources,grafana quay.io/kiwigrid/k8s-sidecar:1.15.6,quay.io/kiwigrid/k8s-sidecar:1.15.6,grafana/grafana:8.4.2 app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=grafana,pod-template-hash=578f79599c replicaset.apps/monitoring-kube-prometheus-operator-65cdf7995 1 1 1 47s kube-prometheus-stack quay.io/prometheus-operator/prometheus-operator:v0.55.0 app=kube-prometheus-stack-operator,pod-template-hash=65cdf7995,release=monitoring replicaset.apps/monitoring-kube-state-metrics-56bfd4f44f 1 1 1 47s kube-state-metrics k8s.gcr.io/kube-state-metrics/kube-state-metrics:v2.4.1 app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=kube-state-metrics,pod-template-hash=56bfd4f44f NAME READY AGE CONTAINERS IMAGES statefulset.apps/alertmanager-monitoring-kube-prometheus-alertmanager 1/1 36s alertmanager,config-reloader quay.io/prometheus/alertmanager:v0.23.0,quay.io/prometheus-operator/prometheus-config-reloader:v0.55.0 statefulset.apps/prometheus-monitoring-kube-prometheus-prometheus 1/1 35s prometheus,config-reloader quay.io/prometheus/prometheus:v2.33.5,quay.io/prometheus-operator/prometheus-config-reloader:v0.55.0 Add the NodePort Edit the grafana service to add the NodePort:\n$ kubectl edit service/\u0026lt;deployment_name\u0026gt;-grafana -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl edit service/monitoring-grafana -n monitoring Note: This opens an edit session for the domain where parameters can be changed using standard vi commands.\nChange the ports entry and add nodePort: 30091 and type: NodePort:\n ports: - name: http-web nodePort: 30091 port: 80 protocol: TCP targetPort: 3000 selector: app.kubernetes.io/instance: monitoring app.kubernetes.io/name: grafana sessionAffinity: None type: NodePort Save the file and exit (:wq).\n Verify Using Grafana GUI Access the Grafana GUI using http://\u0026lt;HostIP\u0026gt;:\u0026lt;nodeport\u0026gt; and login with admin/prom-operator. Change the password when prompted.\n Download the K8 Cluster Detail Dashboard json file from: https://grafana.com/grafana/dashboards/10856.\n Import the Grafana dashboard by navigating on the left hand menu to Dashboards \u0026gt; Import. Click Upload JSON file and select the json downloaded file. In the Prometheus drop down box select Prometheus. Click Import. The dashboard should be displayed.\n Verify your installation by viewing some of the customized dashboard views.\n " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oudsm/manage-oudsm-containers/monitoring-oudsm-instance/", + "title": "c) Monitoring an Oracle Unified Directory Services Manager Instance", + "tags": [], + "description": "Describes the steps for Monitoring the Oracle Unified Directory Services Manager environment.", + "content": " Introduction Install Prometheus and Grafana Create a Kubernetes namespace Add Prometheus and Grafana Helm repositories Install the Prometheus operator View Prometheus and Grafana Objects Created Add the NodePort Verify Using Grafana GUI Introduction After the Oracle Unified Directory Services Manager instance is set up you can monitor it using Prometheus and Grafana.\nInstall Prometheus and Grafana Create a Kubernetes namespace Create a Kubernetes namespace to provide a scope for Prometheus and Grafana objects such as pods and services that you create in the environment. To create your namespace issue the following command:\n$ kubectl create namespace \u0026lt;namespace\u0026gt; For example:\n$ kubectl create namespace monitoring The output will look similar to the following:\nnamespace/monitoring created Add Prometheus and Grafana Helm repositories Add the Prometheus and Grafana Helm repositories by issuing the following command:\n$ helm repo add prometheus https://prometheus-community.github.io/helm-charts The output will look similar to the following:\n\u0026#34;prometheus\u0026#34; has been added to your repositories Run the following command to update the repositories:\n$ helm repo update The output will look similar to the following:\nHang tight while we grab the latest from your chart repositories... ...Successfully got an update from the \u0026#34;stable\u0026#34; chart repository ...Successfully got an update from the \u0026#34;prometheus\u0026#34; chart repository ...Successfully got an update from the \u0026#34;prometheus-community\u0026#34; chart repository Update Complete. Happy Helming! Install the Prometheus operator Install the Prometheus operator using the helm command:\n$ helm install \u0026lt;release_name\u0026gt; prometheus/kube-prometheus-stack -n \u0026lt;namespace\u0026gt; For example:\n$ helm install monitoring prometheus/kube-prometheus-stack -n monitoring The output should look similar to the following:\nNAME: monitoring LAST DEPLOYED: \u0026lt;DATE\u0026gt; NAMESPACE: monitoring STATUS: deployed REVISION: 1 NOTES: kube-prometheus-stack has been installed. Check its status by running: kubectl --namespace monitoring get pods -l \u0026#34;release=monitoring\u0026#34; Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create \u0026amp; configure Alertmanager and Prometheus instances using the Operator. Note: If your cluster does not have access to the internet to pull external images, such as prometheus or grafana, you must load the images in a local container registry. You must then set install as follows:\nhelm install --set grafana.image.repository==container-registry.example.com/grafana --set grafana.image.tag=8.4.2 monitoring prometheus/kube-prometheus-stack -n monitoring View Prometheus and Grafana Objects created View the objects created for Prometheus and Grafana by issuing the following command:\n$ kubectl get all,service,pod -o wide -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl get all,service,pod -o wide -n monitoring The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/alertmanager-monitoring-kube-prometheus-alertmanager-0 2/2 Running 0 27s 10.244.2.141 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/monitoring-grafana-578f79599c-qqdfb 3/3 Running 0 34s 10.244.1.127 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/monitoring-kube-prometheus-operator-65cdf7995-w6btr 1/1 Running 0 34s 10.244.1.126 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/monitoring-kube-state-metrics-56bfd4f44f-5ls8t 1/1 Running 0 34s 10.244.2.139 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/monitoring-prometheus-node-exporter-5b2f6 1/1 Running 0 34s 100.102.48.84 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/monitoring-prometheus-node-exporter-fw9xh 1/1 Running 0 34s 100.102.48.28 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/monitoring-prometheus-node-exporter-s5n9g 1/1 Running 0 34s 100.102.48.121 \u0026lt;master-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/prometheus-monitoring-kube-prometheus-prometheus-0 2/2 Running 0 26s 10.244.1.128 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR service/alertmanager-operated ClusterIP None \u0026lt;none\u0026gt; 9093/TCP,9094/TCP,9094/UDP 27s app.kubernetes.io/name=alertmanager service/monitoring-grafana ClusterIP 10.110.97.252 \u0026lt;none\u0026gt; 80/TCP 34s app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=grafana service/monitoring-kube-prometheus-alertmanager ClusterIP 10.110.82.176 \u0026lt;none\u0026gt; 9093/TCP 34s alertmanager=monitoring-kube-prometheus-alertmanager,app.kubernetes.io/name=alertmanager service/monitoring-kube-prometheus-operator ClusterIP 10.104.147.173 \u0026lt;none\u0026gt; 443/TCP 34s app=kube-prometheus-stack-operator,release=monitoring service/monitoring-kube-prometheus-prometheus ClusterIP 10.110.109.245 \u0026lt;none\u0026gt; 9090/TCP 34s app.kubernetes.io/name=prometheus,prometheus=monitoring-kube-prometheus-prometheus service/monitoring-kube-state-metrics ClusterIP 10.107.111.214 \u0026lt;none\u0026gt; 8080/TCP 34s app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=kube-state-metrics service/monitoring-prometheus-node-exporter ClusterIP 10.108.97.196 \u0026lt;none\u0026gt; 9100/TCP 34s app=prometheus-node-exporter,release=monitoring service/prometheus-operated ClusterIP None \u0026lt;none\u0026gt; 9090/TCP 26s app.kubernetes.io/name=prometheus NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE CONTAINERS IMAGES SELECTOR daemonset.apps/monitoring-prometheus-node-exporter 3 3 3 3 3 \u0026lt;none\u0026gt; 34s node-exporter quay.io/prometheus/node-exporter:v1.3.1 app=prometheus-node-exporter,release=monitoring NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR deployment.apps/monitoring-grafana 0/1 1 0 34s grafana-sc-dashboard,grafana-sc-datasources,grafana quay.io/kiwigrid/k8s-sidecar:1.15.6,quay.io/kiwigrid/k8s-sidecar:1.15.6,grafana/grafana:8.4.2 app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=grafana deployment.apps/monitoring-kube-prometheus-operator 1/1 1 1 34s kube-prometheus-stack quay.io/prometheus-operator/prometheus-operator:v0.55.0 app=kube-prometheus-stack-operator,release=monitoring deployment.apps/monitoring-kube-state-metrics 1/1 1 1 34s kube-state-metrics k8s.gcr.io/kube-state-metrics/kube-state-metrics:v2.4.1 app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=kube-state-metrics NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR replicaset.apps/monitoring-grafana-578f79599c 1 1 0 34s grafana-sc-dashboard,grafana-sc-datasources,grafana quay.io/kiwigrid/k8s-sidecar:1.15.6,quay.io/kiwigrid/k8s-sidecar:1.15.6,grafana/grafana:8.4.2 app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=grafana,pod-template-hash=578f79599c replicaset.apps/monitoring-kube-prometheus-operator-65cdf7995 1 1 1 34s kube-prometheus-stack quay.io/prometheus-operator/prometheus-operator:v0.55.0 app=kube-prometheus-stack-operator,pod-template-hash=65cdf7995,release=monitoring replicaset.apps/monitoring-kube-state-metrics-56bfd4f44f 1 1 1 34s kube-state-metrics k8s.gcr.io/kube-state-metrics/kube-state-metrics:v2.4.1 app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=kube-state-metrics,pod-template-hash=56bfd4f44f NAME READY AGE CONTAINERS IMAGES statefulset.apps/alertmanager-monitoring-kube-prometheus-alertmanager 1/1 27s alertmanager,config-reloader quay.io/prometheus/alertmanager:v0.23.0,quay.io/prometheus-operator/prometheus-config-reloader:v0.55.0 statefulset.apps/prometheus-monitoring-kube-prometheus-prometheus 1/1 26s prometheus,config-reloader quay.io/prometheus/prometheus:v2.33.5,quay.io/prometheus-operator/prometheus-config-reloader:v0.55.0 Add the NodePort Edit the grafana service to add the NodePort:\n$ kubectl edit service/\u0026lt;deployment_name\u0026gt;-grafana -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl edit service/monitoring-grafana -n monitoring Note: This opens an edit session for the domain where parameters can be changed using standard vi commands.\nChange the ports entry and add nodePort: 30091 and type: NodePort:\n ports: - name: http-web nodePort: 30091 port: 80 protocol: TCP targetPort: 3000 selector: app.kubernetes.io/instance: monitoring app.kubernetes.io/name: grafana sessionAffinity: None type: NodePort Save the file and exit (:wq).\n Verify Using Grafana GUI Access the Grafana GUI using http://\u0026lt;HostIP\u0026gt;:\u0026lt;nodeport\u0026gt; and login with admin/prom-operator. Change the password when prompted.\n Download the K8 Cluster Detail Dashboard json file from: https://grafana.com/grafana/dashboards/10856.\n Import the Grafana dashboard by navigating on the left hand menu to Dashboards \u0026gt; Import. Click Upload JSON file and select the json downloaded file. In the Prometheus drop down box select Prometheus. Click Import. The dashboard should be displayed.\n Verify your installation by viewing some of the customized dashboard views.\n " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oam/manage-oam-domains/logging-and-visualization/", + "title": "c. Logging and Visualization", + "tags": [], + "description": "Describes the steps for logging and visualization with Elasticsearch and Kibana.", + "content": "After the OAM domain is set up you can publish operator and WebLogic Server logs into Elasticsearch and interact with them in Kibana.\nInstall Elasticsearch stack and Kibana If you do not already have a centralized Elasticsearch (ELK) stack then you must configure this first. For details on how to configure the ELK stack, follow Installing Elasticsearch (ELK) Stack and Kibana\nCreate the logstash pod Variables used in this chapter In order to create the logstash pod, you must create several files. These files contain variables which you must substitute with variables applicable to your environment.\nMost of the values for the variables will be based on your ELK deployment as per Installing Elasticsearch (ELK) Stack and Kibana.\nThe table below outlines the variables and values you must set:\n Variable Sample Value Description \u0026lt;ELK_VER\u0026gt; 8.3.1 The version of logstash you want to install. \u0026lt;ELK_SSL\u0026gt; true If SSL is enabled for ELK set the value to true, or if NON-SSL set to false. This value must be lowercase. \u0026lt;ELK_HOSTS\u0026gt; https://elasticsearch.example.com:9200 The URL for sending logs to Elasticsearch. HTTP if NON-SSL is used. \u0026lt;ELKNS\u0026gt; oamns The domain namespace. \u0026lt;ELK_USER\u0026gt; logstash_internal The name of the user for logstash to access Elasticsearch. \u0026lt;ELK_PASSWORD\u0026gt; password The password for ELK_USER. \u0026lt;ELK_APIKEY\u0026gt; apikey The API key details. You will also need the BASE64 version of the Certificate Authority (CA) certificate(s) that signed the certificate of the Elasticsearch server. If using a self-signed certificate, this is the self signed certificate of the Elasticsearch server. See Copying the Elasticsearch Certificate for details on how to get the correct certificate. In the example below the certificate is called elk.crt.\nCreate Kubernetes secrets Create a Kubernetes secret for Elasticsearch using the API Key or Password.\na) If ELK uses an API Key for authentication:\n$ kubectl create secret generic elasticsearch-pw-elastic -n \u0026lt;domain_namespace\u0026gt; --from-literal password=\u0026lt;ELK_APIKEY\u0026gt; For example:\n$ kubectl create secret generic elasticsearch-pw-elastic -n oamns --from-literal password=\u0026lt;ELK_APIKEY\u0026gt; The output will look similar to the following:\nsecret/elasticsearch-pw-elastic created b) If ELK uses a password for authentication:\n$ kubectl create secret generic elasticsearch-pw-elastic -n \u0026lt;domain_namespace\u0026gt; --from-literal password=\u0026lt;ELK_PASSWORD\u0026gt; For example:\n$ kubectl create secret generic elasticsearch-pw-elastic -n oamns --from-literal password=\u0026lt;ELK_PASSWORD\u0026gt; The output will look similar to the following:\nsecret/elasticsearch-pw-elastic created Note: It is recommended that the ELK Stack is created with authentication enabled. If no authentication is enabled you may create a secret using the values above.\n Create a Kubernetes secret to access the required images on hub.docker.com:\nNote: Before executing the command below, you must first have a user account on hub.docker.com.\nkubectl create secret docker-registry \u0026#34;dockercred\u0026#34; --docker-server=\u0026#34;https://index.docker.io/v1/\u0026#34; \\ --docker-username=\u0026#34;\u0026lt;DOCKER_USER_NAME\u0026gt;\u0026#34; \\ --docker-password=\u0026lt;DOCKER_PASSWORD\u0026gt; --docker-email=\u0026lt;DOCKER_EMAIL_ID\u0026gt; \\ --namespace=\u0026lt;domain_namespace\u0026gt; For example,\nkubectl create secret docker-registry \u0026#34;dockercred\u0026#34; --docker-server=\u0026#34;https://index.docker.io/v1/\u0026#34; \\ --docker-username=\u0026#34;user@example.com\u0026#34; \\ --docker-password=password --docker-email=user@example.com \\ --namespace=oamns The output will look similar to the following:\nsecret/dockercred created Find the mountPath details Run the following command to get the mountPath of your domain:\n$ kubectl describe domains \u0026lt;domain_uid\u0026gt; -n \u0026lt;domain_namespace\u0026gt; | grep \u0026#34;Mount Path\u0026#34; For example:\n$ kubectl describe domains accessdomain -n oamns | grep \u0026#34;Mount Path\u0026#34; The output will look similar to the following:\nMount Path: /u01/oracle/user_projects/domains Find the persistentVolumeClaim details Run the following command to get the OAM domain persistence volume details:\n$ kubectl get pv -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pv -n oamns The output will look similar to the following:\nNAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE accessdomain-domain-pv 10Gi RWX Retain Bound oamns/accessdomain-domain-pvc accessdomain-domain-storage-class 23h Make note of the CLAIM value, for example in this case accessdomain-domain-pvc.\n Create the Configmap Copy the elk.crt file to the $WORKDIR/kubernetes/elasticsearch-and-kibana directory.\n Navigate to the $WORKDIR/kubernetes/elasticsearch-and-kibana directory and run the following:\nkubectl create configmap elk-cert --from-file=elk.crt -n \u0026lt;namespace\u0026gt; For example:\nkubectl create configmap elk-cert --from-file=elk.crt -n oamns The output will look similar to the following:\nconfigmap/elk-cert created Create a logstash_cm.yaml file in the $WORKDIR/kubernetes/elasticsearch-and-kibana directory as follows:\napiVersion: v1 kind: ConfigMap metadata: name: oam-logstash-configmap namespace: \u0026lt;ELKNS\u0026gt; data: logstash.yml: | #http.host: \u0026quot;0.0.0.0\u0026quot; logstash-config.conf: | input { file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/logs/accessdomain/AdminServer*.log\u0026quot; tags =\u0026gt; \u0026quot;Adminserver_log\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/logs/accessdomain/oam_policy_mgr*.log\u0026quot; tags =\u0026gt; \u0026quot;Policymanager_log\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/logs/accessdomain/oam_server*.log\u0026quot; tags =\u0026gt; \u0026quot;Oamserver_log\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/accessdomain/servers/AdminServer/logs/AdminServer-diagnostic.log\u0026quot; tags =\u0026gt; \u0026quot;Adminserver_diagnostic\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/accessdomain/servers/**/logs/oam_policy_mgr*-diagnostic.log\u0026quot; tags =\u0026gt; \u0026quot;Policy_diagnostic\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/accessdomain/servers/AdminServer/logs/auditlogs/OAM/audit.log\u0026quot; tags =\u0026gt; \u0026quot;Audit_logs\u0026quot; start_position =\u0026gt; beginning } } filter { grok { match =\u0026gt; [ \u0026quot;message\u0026quot;, \u0026quot;\u0026lt;%{DATA:log_timestamp}\u0026gt; \u0026lt;%{WORD:log_level}\u0026gt; \u0026lt;%{WORD:thread}\u0026gt; \u0026lt;%{HOSTNAME:hostname}\u0026gt; \u0026lt;%{HOSTNAME:servername}\u0026gt; \u0026lt;%{DATA:timer}\u0026gt; \u0026lt;\u0026lt;%{DATA:kernel}\u0026gt;\u0026gt; \u0026lt;\u0026gt; \u0026lt;%{DATA:uuid}\u0026gt; \u0026lt;%{NUMBER:timestamp}\u0026gt; \u0026lt;%{DATA:misc}\u0026gt; \u0026lt;%{DATA:log_number}\u0026gt; \u0026lt;%{DATA:log_message}\u0026gt;\u0026quot; ] } if \u0026quot;_grokparsefailure\u0026quot; in [tags] { mutate { remove_tag =\u0026gt; [ \u0026quot;_grokparsefailure\u0026quot; ] } } } output { elasticsearch { hosts =\u0026gt; [\u0026quot;\u0026lt;ELK_HOSTS\u0026gt;\u0026quot;] cacert =\u0026gt; '/usr/share/logstash/config/certs/elk.crt' index =\u0026gt; \u0026quot;oamlogs-000001\u0026quot; ssl =\u0026gt; true ssl_certificate_verification =\u0026gt; false user =\u0026gt; \u0026quot;\u0026lt;ELK_USER\u0026gt;\u0026quot; password =\u0026gt; \u0026quot;${ELASTICSEARCH_PASSWORD}\u0026quot; api_key =\u0026gt; \u0026quot;${ELASTICSEARCH_PASSWORD}\u0026quot; } } Change the values in the above file as follows:\n Change the \u0026lt;ELKNS\u0026gt;, \u0026lt;ELK_HOSTS\u0026gt;, \u0026lt;ELK_SSL\u0026gt;, and \u0026lt;ELK_USER\u0026gt; to match the values for your environment. Change /u01/oracle/user_projects/domains to match the mountPath returned earlier If your domainUID is anything other than accessdomain, change each instance of accessdomain to your domainUID. If using API KEY for your ELK authentication, delete the user and password lines. If using a password for ELK authentication, delete the api_key line. If no authentication is used for ELK, delete the user, password, and api_key lines. For example:\napiVersion: v1 kind: ConfigMap metadata: name: oam-logstash-configmap namespace: oamns data: logstash.yml: | #http.host: \u0026quot;0.0.0.0\u0026quot; logstash-config.conf: | input { file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/logs/accessdomain/AdminServer*.log\u0026quot; tags =\u0026gt; \u0026quot;Adminserver_log\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/logs/accessdomain/oam_policy_mgr*.log\u0026quot; tags =\u0026gt; \u0026quot;Policymanager_log\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/logs/accessdomain/oam_server*.log\u0026quot; tags =\u0026gt; \u0026quot;Oamserver_log\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/accessdomain/servers/AdminServer/logs/AdminServer-diagnostic.log\u0026quot; tags =\u0026gt; \u0026quot;Adminserver_diagnostic\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/accessdomain/servers/**/logs/oam_policy_mgr*-diagnostic.log\u0026quot; tags =\u0026gt; \u0026quot;Policy_diagnostic\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/accessdomain/servers/AdminServer/logs/auditlogs/OAM/audit.log\u0026quot; tags =\u0026gt; \u0026quot;Audit_logs\u0026quot; start_position =\u0026gt; beginning } } filter { grok { match =\u0026gt; [ \u0026quot;message\u0026quot;, \u0026quot;\u0026lt;%{DATA:log_timestamp}\u0026gt; \u0026lt;%{WORD:log_level}\u0026gt; \u0026lt;%{WORD:thread}\u0026gt; \u0026lt;%{HOSTNAME:hostname}\u0026gt; \u0026lt;%{HOSTNAME:servername}\u0026gt; \u0026lt;%{DATA:timer}\u0026gt; \u0026lt;\u0026lt;%{DATA:kernel}\u0026gt;\u0026gt; \u0026lt;\u0026gt; \u0026lt;%{DATA:uuid}\u0026gt; \u0026lt;%{NUMBER:timestamp}\u0026gt; \u0026lt;%{DATA:misc}\u0026gt; \u0026lt;%{DATA:log_number}\u0026gt; \u0026lt;%{DATA:log_message}\u0026gt;\u0026quot; ] } if \u0026quot;_grokparsefailure\u0026quot; in [tags] { mutate { remove_tag =\u0026gt; [ \u0026quot;_grokparsefailure\u0026quot; ] } } } output { elasticsearch { hosts =\u0026gt; [\u0026quot;https://elasticsearch.example.com:9200\u0026quot;] cacert =\u0026gt; '/usr/share/logstash/config/certs/elk.crt' index =\u0026gt; \u0026quot;oamlogs-000001\u0026quot; ssl =\u0026gt; true ssl_certificate_verification =\u0026gt; false user =\u0026gt; \u0026quot;logstash_internal\u0026quot; password =\u0026gt; \u0026quot;${ELASTICSEARCH_PASSWORD}\u0026quot; } } Run the following command to create the configmap:\n$ kubectl apply -f logstash_cm.yaml The output will look similar to the following:\nconfigmap/oam-logstash-configmap created Deploy the logstash pod Navigate to the $WORKDIR/kubernetes/elasticsearch-and-kibana directory and create a logstash.yaml file as follows:\napiVersion: apps/v1 kind: Deployment metadata: name: oam-logstash namespace: \u0026lt;ELKNS\u0026gt; spec: selector: matchLabels: k8s-app: logstash template: # create pods using pod definition in this template metadata: labels: k8s-app: logstash spec: imagePullSecrets: - name: dockercred containers: - command: - logstash image: logstash:\u0026lt;ELK_VER\u0026gt; imagePullPolicy: IfNotPresent name: oam-logstash env: - name: ELASTICSEARCH_PASSWORD valueFrom: secretKeyRef: name: elasticsearch-pw-elastic key: password resources: ports: - containerPort: 5044 name: logstash volumeMounts: - mountPath: /u01/oracle/user_projects name: weblogic-domain-storage-volume - name: shared-logs mountPath: /shared-logs - mountPath: /usr/share/logstash/pipeline/ name: oam-logstash-pipeline - mountPath: /usr/share/logstash/config/logstash.yml subPath: logstash.yml name: config-volume - mountPath: /usr/share/logstash/config/certs name: elk-cert volumes: - configMap: defaultMode: 420 items: - key: elk.crt path: elk.crt name: elk-cert name: elk-cert - configMap: defaultMode: 420 items: - key: logstash-config.conf path: logstash-config.conf name: oam-logstash-configmap name: oam-logstash-pipeline - configMap: defaultMode: 420 items: - key: logstash.yml path: logstash.yml name: oam-logstash-configmap name: config-volume - name: weblogic-domain-storage-volume persistentVolumeClaim: claimName: accessdomain-domain-pvc - name: shared-logs emptyDir: {} Change the \u0026lt;ELKNS\u0026gt;, \u0026lt;ELK_VER\u0026gt; to match the values for your environment. Change /u01/oracle/user_projects/domains to match the mountPath returned earlier Change the claimName value to match the claimName returned earlier If your Kubernetes environment does not allow access to the internet to pull the logstash image, you must load the logstash image in your own container registry and change image: logstash:\u0026lt;ELK_VER\u0026gt; to the location of the image in your container registry e.g: container-registry.example.com/logstash:8.3.1 For example:\napiVersion: apps/v1 kind: Deployment metadata: name: oam-logstash namespace: oamns spec: selector: matchLabels: k8s-app: logstash template: # create pods using pod definition in this template metadata: labels: k8s-app: logstash spec: imagePullSecrets: - name: dockercred containers: - command: - logstash image: logstash:8.3.1 imagePullPolicy: IfNotPresent name: oam-logstash env: - name: ELASTICSEARCH_PASSWORD valueFrom: secretKeyRef: name: elasticsearch-pw-elastic key: password resources: ports: - containerPort: 5044 name: logstash volumeMounts: - mountPath: /u01/oracle/user_projects/domains name: weblogic-domain-storage-volume - name: shared-logs mountPath: /shared-logs - mountPath: /usr/share/logstash/pipeline/ name: oam-logstash-pipeline - mountPath: /usr/share/logstash/config/logstash.yml subPath: logstash.yml name: config-volume - mountPath: /usr/share/logstash/config/certs name: elk-cert volumes: - configMap: defaultMode: 420 items: - key: elk.crt path: elk.crt name: elk-cert name: elk-cert - configMap: defaultMode: 420 items: - key: logstash-config.conf path: logstash-config.conf name: oam-logstash-configmap name: oam-logstash-pipeline - configMap: defaultMode: 420 items: - key: logstash.yml path: logstash.yml name: oam-logstash-configmap name: config-volume - name: weblogic-domain-storage-volume persistentVolumeClaim: claimName: accessdomain-domain-pvc - name: shared-logs emptyDir: {} Deploy the logstash pod by executing the following command:\n$ kubectl create -f $WORKDIR/kubernetes/elasticsearch-and-kibana/logstash.yaml The output will look similar to the following:\ndeployment.apps/oam-logstash created Run the following command to check the logstash pod is created correctly:\n$ kubectl get pods -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl get pods -n oamns The output should look similar to the following:\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Running 0 18h accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 23h accessdomain-oam-policy-mgr1 1/1 Running 0 18h accessdomain-oam-policy-mgr2 1/1 Running 0 18h accessdomain-oam-server1 1/1 Running 1 18h accessdomain-oam-server2 1/1 Running 1 18h elasticsearch-f7b7c4c4-tb4pp 1/1 Running 0 5m helper 1/1 Running 0 23h nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 20h oam-logstash-bbbdf5876-85nkd 1/1 Running 0 4m23s Note: Wait a couple of minutes to make sure the pod has not had any failures or restarts. If the pod fails you can view the pod log using:\n$ kubectl logs -f oam-logstash-\u0026lt;pod\u0026gt; -n oamns Most errors occur due to misconfiguration of the logstash_cm.yaml or logstash.yaml. This is usually because of an incorrect value set, or the certificate was not pasted with the correct indentation.\nIf the pod has errors, delete the pod and configmap as follows:\n$ kubectl delete -f $WORKDIR/kubernetes/elasticsearch-and-kibana/logstash.yaml $ kubectl delete -f $WORKDIR/kubernetes/elasticsearch-and-kibana/logstash_cm.yaml Once you have resolved the issue in the yaml files, run the commands outlined earlier to recreate the configmap and logstash pod.\n Verify and access the Kibana console To access the Kibana console you will need the Kibana URL as per Installing Elasticsearch (ELK) Stack and Kibana.\nFor Kibana 7.7.x and below:\n Access the Kibana console with http://\u0026lt;hostname\u0026gt;:\u0026lt;port\u0026gt;/app/kibana and login with your username and password.\n From the Navigation menu, navigate to Management \u0026gt; Kibana \u0026gt; Index Patterns.\n In the Create Index Pattern page enter oamlogs* for the Index pattern and click Next Step.\n In the Configure settings page, from the Time Filter field name drop down menu select @timestamp and click Create index pattern.\n Once the index pattern is created click on Discover in the navigation menu to view the OAM logs.\n For Kibana version 7.8.X and above:\n Access the Kibana console with http://\u0026lt;hostname\u0026gt;:\u0026lt;port\u0026gt;/app/kibana and login with your username and password.\n From the Navigation menu, navigate to Management \u0026gt; Stack Management.\n Click Data Views in the Kibana section.\n Click Create Data View and enter the following information:\n Name: oamlogs* Timestamp: @timestamp Click Create Data View.\n From the Navigation menu, click Discover to view the log file entries.\n From the drop down menu, select oamlogs* to view the log file entries.\n " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oig/manage-oig-domains/running-oig-utilities/", + "title": "c. Runnning OIG utilities", + "tags": [], + "description": "Describes the steps for running OIG utilities in Kubernetes.", + "content": "Run OIG utlities inside the OIG Kubernetes cluster.\nRun utilities in an interactive bash shell Access a bash shell inside the \u0026lt;domain_uid\u0026gt;-oim-server1 pod:\n$ kubectl -n oigns exec -it \u0026lt;domain_uid\u0026gt;-oim-server1 -- bash For example:\n$ kubectl -n oigns exec -it governancedomain-oim-server1 -- bash This will take you into a bash shell in the running \u0026lt;domain_uid\u0026gt;-oim-server1 pod:\n[oracle@governancedomain-oim-server1 oracle]$ Navigate to the /u01/oracle/idm/server/bin directory and execute the utility as required. For example:\n[oracle@governancedomain-oim-server1 oracle] cd /u01/oracle/idm/server/bin [oracle@governancedomain-oim-server1 bin]$ ./\u0026lt;filename\u0026gt;.sh Note: Some utilties such as PurgeCache.sh, GenerateSnapshot.sh etc, may prompt to enter the t3 URL, for example:\n[oracle@governancedomain-oim-server1 bin]$ sh GenerateSnapshot.sh For running the Utilities the following environment variables need to be set APP_SERVER is weblogic OIM_ORACLE_HOME is /u01/oracle/idm/ JAVA_HOME is /u01/jdk MW_HOME is /u01/oracle WL_HOME is /u01/oracle/wlserver DOMAIN_HOME is /u01/oracle/user_projects/domains/governancedomain Executing -Dweblogic.security.SSL.trustedCAKeyStore= in IPv4 mode [Enter Xellerate admin username :]xelsysadm [Enter password for xelsysadm :] [Threads to use [ 8 ]] [Enter serverURL :[t3://oimhostname:oimportno ]] To find the t3 URL run:\n$ kubectl get services -n oigns | grep oim-cluster The output will look similar to the following:\ngovernancedomain-cluster-oim-cluster ClusterIP 10.110.161.82 \u0026lt;none\u0026gt; 14002/TCP,14000/TCP 4d In this case the t3 URL is: t3://governancedomain-cluster-oim-cluster:14000.\n Passing inputs as a jar/xml file Copy the input file to pass to a directory of your choice.\n Run the following command to copy the input file to the running governancedomain-oim-server1 pod.\n$ kubectl -n oigns cp /\u0026lt;path\u0026gt;/\u0026lt;inputFile\u0026gt; governancedomain-oim-server1:/u01/oracle/idm/server/bin/ Access a bash shell inside the governancedomain-oim-server1 pod:\n$ kubectl -n oigns exec -it governancedomain-oim-server1 -- bash This will take you into a bash shell in the running governancedomain-oim-server1 pod:\n[oracle@governancedomain-oim-server1 oracle]$ Navigate to the /u01/oracle/idm/server/bin directory and execute the utility as required, passing the input file. For example:\n[oracle@governancedomain-oim-server1 oracle] cd /u01/oracle/idm/server/bin [oracle@governancedomain-oim-server1 bin]$ ./\u0026lt;filename\u0026gt;.sh -inputFile \u0026lt;inputFile\u0026gt; Note As pods are stateless the copied input file will remain until the pod restarts.\n Editing property/profile files To edit a property/profile file in the Kubernetes cluster:\n Copy the input file from the pod to a on the local system, for example:\n$ kubectl -n oigns cp governancedomain-oim-server1:/u01/oracle/idm/server/bin/\u0026lt;file.properties_profile\u0026gt; /\u0026lt;path\u0026gt;/\u0026lt;file.properties_profile\u0026gt; Note: If you see the message tar: Removing leading '/' from member names this can be ignored.\n Edit the \u0026lt;/path\u0026gt;/\u0026lt;file.properties_profile\u0026gt; in an editor of your choice.\n Copy the file back to the pod:\n$ kubectl -n oigns cp /\u0026lt;path\u0026gt;/\u0026lt;file.properties_profile\u0026gt; governancedomain-oim-server1:/u01/oracle/idm/server/bin/ Note: As pods are stateless the copied input file will remain until the pod restarts. Preserve a local copy in case you need to copy files back after pod restart.\n " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oam/patch-and-upgrade/upgrade-an-ingress/", + "title": "c. Upgrade Ingress", + "tags": [], + "description": "Instructions on how to upgrade the ingress.", + "content": "This section shows how to upgrade the ingress.\nTo determine if this step is required for the version you are upgrading to, refer to the Release Notes.\nDownload the latest code repository Download the latest code repository as follows:\n Create a working directory to setup the source code.\n$ mkdir \u0026lt;workdir\u0026gt; For example:\n$ mkdir /scratch/OAMK8Slatest Download the latest OAM deployment scripts from the OAM repository.\n$ cd \u0026lt;workdir\u0026gt; $ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1 For example:\n$ cd /scratch/OAMK8Slatest $ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1 Set the $WORKDIR environment variable as follows:\n$ export WORKDIR=\u0026lt;workdir\u0026gt;/fmw-kubernetes/OracleAccessManagement For example:\n$ export WORKDIR=/scratch/OAMK8Slatest/fmw-kubernetes/OracleAccessManagement Upgrading the ingress To upgrade the existing ingress rules, follow the steps below:\n List the existing ingress:\n$ helm list -n oamns The output will look similar to the following:\nNAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION nginx-ingress oamns 1 \u0026lt;DATE\u0026gt; deployed ingress-nginx-4.3.0 1.4.0 oam-nginx oamns 1 \u0026lt;DATE\u0026gt; deployed ingress-per-domain-0.1.0 1.0 Edit the $WORKDIR/kubernetes/charts/ingress-per-domain/values.yaml and change the domainUID parameter to match your domainUID, for example domainUID: accessdomain. For example:\n# Load balancer type. Supported values are: NGINX type: NGINX # SSL configuration Type. Supported Values are : NONSSL,SSL sslType: SSL # domainType. Supported values are: oam domainType: oam #WLS domain as backend to the load balancer wlsDomain: domainUID: accessdomain adminServerName: AdminServer adminServerPort: 7001 adminServerSSLPort: oamClusterName: oam_cluster oamManagedServerPort: 14100 oamManagedServerSSLPort: policyClusterName: policy_cluster policyManagedServerPort: 15100 policyManagedServerSSLPort: # Host specific values hostName: enabled: false admin: runtime: Upgrade the oam-nginx with the following command:\n$ helm upgrade oam-nginx kubernetes/charts/ingress-per-domain/ --namespace oamns --values kubernetes/charts/ingress-per-domain/values.yaml --reuse-values The output will look similar to the following:\nRelease \u0026quot;oam-nginx\u0026quot; has been upgraded. Happy Helming! NAME: oam-nginx LAST DEPLOYED: \u0026lt;DATE\u0026gt; NAMESPACE: oamns STATUS: deployed REVISION: 2 TEST SUITE: None List the ingress:\n$ kubectl get ing -n oamns The output will look similar to the following:\nNAME CLASS HOSTS ADDRESS PORTS AGE accessdomain-nginx \u0026lt;none\u0026gt; * 10.99.189.61 80 18s Describe the ingress and make sure all the listed paths are accessible:\n$ kubectl describe ing accessdomain-nginx -n oamns The output will look similar to the following:\nName: accessdomain-nginx Labels: app.kubernetes.io/managed-by=Helm Namespace: oamns Address: 10.99.189.61 Default backend: default-http-backend:80 (\u0026lt;error: endpoints \u0026quot;default-http-backend\u0026quot; not found\u0026gt;) Rules: Host Path Backends ---- ---- -------- * /console accessdomain-adminserver:7001 (10.244.1.224:7001) /consolehelp accessdomain-adminserver:7001 (10.244.1.224:7001) /rreg/rreg accessdomain-adminserver:7001 (10.244.1.224:7001) /em accessdomain-adminserver:7001 (10.244.1.224:7001) /oamconsole accessdomain-adminserver:7001 (10.244.1.224:7001) /dms accessdomain-adminserver:7001 (10.244.1.224:7001) /oam/services/rest accessdomain-adminserver:7001 (10.244.1.224:7001) /iam/admin/config accessdomain-adminserver:7001 (10.244.1.224:7001) /iam/admin/diag accessdomain-adminserver:7001 (10.244.1.224:7001) /iam/access accessdomain-cluster-oam-cluster:14100 (10.244.1.225:14100) /oam/admin/api accessdomain-adminserver:7001 (10.244.1.224:7001) /oam/services/rest/access/api accessdomain-cluster-oam-cluster:14100 (10.244.1.225:14100) /access accessdomain-cluster-policy-cluster:15100 (10.244.1.226:15100) / accessdomain-cluster-oam-cluster:14100 (10.244.1.225:14100) Annotations: kubernetes.io/ingress.class: nginx meta.helm.sh/release-name: oam-nginx meta.helm.sh/release-namespace: oamns nginx.ingress.kubernetes.io/configuration-snippet: more_clear_input_headers \u0026quot;WL-Proxy-Client-IP\u0026quot; \u0026quot;WL-Proxy-SSL\u0026quot;; more_set_input_headers \u0026quot;X-Forwarded-Proto: https\u0026quot;; more_set_input_headers \u0026quot;WL-Proxy-SSL: true\u0026quot;; nginx.ingress.kubernetes.io/enable-access-log: false nginx.ingress.kubernetes.io/ingress.allow-http: false nginx.ingress.kubernetes.io/proxy-buffer-size: 2000k Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Sync 55s (x2 over 63s) nginx-ingress-controller Scheduled for sync " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oig/patch-and-upgrade/upgrade-an-ingress/", + "title": "c. Upgrade Ingress", + "tags": [], + "description": "Instructions on how to upgrade the ingress.", + "content": "This section shows how to upgrade the ingress.\nTo determine if this step is required for the version you are upgrading to, refer to the Release Notes.\nUpgrading the ingress To upgrade the existing ingress rules, follow the steps below:\n List the existing ingress:\n$ helm list -n \u0026lt;domain_namespace\u0026gt; For example:\n$ helm list -n oigns The output will look similar to the following:\nNAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION governancedomain-nginx oigns 1 \u0026lt;DATE\u0026gt; deployed ingress-per-domain-0.1.0 1.0 Make sure you have downloaded the latest code as per Download the latest code repository.\n Edit the $WORKDIR/kubernetes/charts/ingress-per-domain/values.yaml and change the domainUID parameter to match your domainUID, for example domainUID: governancedomain. Change sslType to NONSSL or SSL depending on your existing configuration. For example:\n# Load balancer type. Supported values are: NGINX type: NGINX # SSL configuration Type. Supported Values are : NONSSL,SSL sslType: SSL # domainType. Supported values are: oim domainType: oim #WLS domain as backend to the load balancer wlsDomain: domainUID: governancedomain adminServerName: AdminServer adminServerPort: 7001 adminServerSSLPort: soaClusterName: soa_cluster soaManagedServerPort: 8001 soaManagedServerSSLPort: oimClusterName: oim_cluster oimManagedServerPort: 14000 oimManagedServerSSLPort: # Host specific values hostName: enabled: false admin: runtime: internal: # Ngnix specific values nginx: nginxTimeOut: 180 Upgrade the governancedomain-nginx with the following command:\n$ cd $WORKDIR $ helm upgrade \u0026lt;ingress\u0026gt; kubernetes/charts/ingress-per-domain/ --namespace \u0026lt;domain_namespace\u0026gt; --values kubernetes/charts/ingress-per-domain/values.yaml --reuse-values For example:\n$ cd $WORKDIR $ helm upgrade governancedomain-nginx kubernetes/charts/ingress-per-domain/ --namespace oigns --values kubernetes/charts/ingress-per-domain/values.yaml --reuse-values The output will look similar to the following:\nRelease \u0026quot;governancedomain-nginx\u0026quot; has been upgraded. Happy Helming! NAME: governancedomain-nginx LAST DEPLOYED: \u0026lt;DATE\u0026gt; NAMESPACE: oigns STATUS: deployed REVISION: 2 TEST SUITE: None List the ingress:\n$ kubectl get ing -n oigns The output will look similar to the following:\nNAME CLASS HOSTS ADDRESS PORTS AGE governancedomain-nginx \u0026lt;none\u0026gt; * 10.107.182.40 80 18s Describe the ingress and make sure all the listed paths are accessible:\n$ kubectl describe ing governancedomain-nginx -n oigns The output will look similar to the following:\nName: governancedomain-nginx Namespace: oigns Address: 10.107.182.40 Default backend: default-http-backend:80 (\u0026lt;error: endpoints \u0026quot;default-http-backend\u0026quot; not found\u0026gt;) Rules: Host Path Backends ---- ---- -------- * /console governancedomain-adminserver:7001 (10.244.4.240:7001) /consolehelp governancedomain-adminserver:7001 (10.244.4.240:7001) /em governancedomain-adminserver:7001 (10.244.4.240:7001) /ws_utc governancedomain-cluster-soa-cluster:8001 (10.244.4.242:8001) /soa governancedomain-cluster-soa-cluster:8001 (10.244.4.242:8001) /integration governancedomain-cluster-soa-cluster:8001 (10.244.4.242:8001) /soa-infra governancedomain-cluster-soa-cluster:8001 (10.244.4.242:8001) /identity governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000) /admin governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000) /oim governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000) /sysadmin governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000) /workflowservice governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000) /callbackResponseService governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000) /spml-xsd governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000) /HTTPClnt governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000) /reqsvc governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000) /iam governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000) /provisioning-callback governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000) /CertificationCallbackService governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000) /ucs governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000) /FacadeWebApp governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000) /OIGUI governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000) /weblogic governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000) Annotations: kubernetes.io/ingress.class: nginx meta.helm.sh/release-name: governancedomain-nginx meta.helm.sh/release-namespace: oigns nginx.ingress.kubernetes.io/affinity: cookie nginx.ingress.kubernetes.io/affinity-mode: persistent nginx.ingress.kubernetes.io/configuration-snippet: more_clear_input_headers \u0026quot;WL-Proxy-Client-IP\u0026quot; \u0026quot;WL-Proxy-SSL\u0026quot;; more_set_input_headers \u0026quot;X-Forwarded-Proto: https\u0026quot;; more_set_input_headers \u0026quot;WL-Proxy-SSL: true\u0026quot;; nginx.ingress.kubernetes.io/enable-access-log: false nginx.ingress.kubernetes.io/ingress.allow-http: false nginx.ingress.kubernetes.io/proxy-buffer-size: 2000k nginx.ingress.kubernetes.io/proxy-read-timeout: 180 nginx.ingress.kubernetes.io/proxy-send-timeout: 180 nginx.ingress.kubernetes.io/session-cookie-name: sticky Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Sync 51m (x3 over 54m) nginx-ingress-controller Scheduled for sync " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/categories/", + "title": "Categories", + "tags": [], + "description": "", + "content": "" +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oud/manage-oud-containers/hpa/", + "title": "d. Kubernetes Horizontal Pod Autoscaler", + "tags": [], + "description": "Describes the steps for implementing the Horizontal Pod Autoscaler.", + "content": " Prerequisite configuration Deploy the Kubernetes Metrics Server Troubleshooting Deploy HPA Testing HPA Delete the HPA Other considerations Kubernetes Horizontal Pod Autoscaler (HPA) allows automatic scaling (up and down) of the OUD servers. If load increases then extra OUD servers will be started as required. Similarly, if load decreases, OUD servers will be automatically shutdown.\nFor more information on HPA, see Horizontal Pod Autoscaling.\nThe instructions below show you how to configure and run an HPA to scale OUD servers, based on CPU utilization or memory resource metrics.\nNote: If you enable HPA and then decide you want to start/stop/scale OUD servers manually as per Scaling Up/Down OUD Pods, it is recommended to delete HPA beforehand as per Delete the HPA.\nPrerequisite configuration In order to use HPA, OUD must have been created with the required resources parameter as per Create OUD instances. For example:\noudConfig: # memory, cpu parameters for both requests and limits for oud instances resources: limits: cpu: \u0026quot;1\u0026quot; memory: \u0026quot;8Gi\u0026quot; requests: cpu: \u0026quot;500m\u0026quot; memory: \u0026quot;4Gi\u0026quot; If you created the OUD servers at any point since July 22 (22.3.1) then these values are the defaults. You can check using the following command:\n$ helm show values oud-ds-rs -n oudns The output will look similar to the following:\n... # memory, cpu parameters for both requests and limits for oud instances resources: requests: memory: \u0026quot;4Gi\u0026quot; cpu: \u0026quot;500m\u0026quot; limits: memory: \u0026quot;8Gi\u0026quot; cpu: \u0026quot;2\u0026quot; ... Deploy the Kubernetes Metrics Server Before deploying HPA you must deploy the Kubernetes Metrics Server.\n Check to see if the Kubernetes Metrics Server is already deployed:\n$ kubectl get pods -n kube-system | grep metric If a row is returned as follows, then Kubernetes Metric Server is deployed and you can move to Deploy HPA.\nmetrics-server-d9694457-mf69d 1/1 Running 0 5m13s If no rows are returned by the previous command, then the Kubernetes Metric Server needs to be deployed. Run the following commands to get the components.yaml:\n$ mkdir $WORKDIR/kubernetes/hpa $ cd $WORKDIR/kubernetes/hpa $ wget https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml Deploy the Kubernetes Metrics Server by running the following command:\n$ kubectl apply -f components.yaml The output will look similar to the following:\nserviceaccount/metrics-server created clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created clusterrole.rbac.authorization.k8s.io/system:metrics-server created rolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader created clusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator created clusterrolebinding.rbac.authorization.k8s.io/system:metrics-server created service/metrics-server created deployment.apps/metrics-server created apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created Run the following command to check Kubernetes Metric Server is running:\n$ kubectl get pods -n kube-system | grep metric Make sure the pod has a READY status of 1/1:\nmetrics-server-d9694457-mf69d 1/1 Running 0 39s Troubleshooting If the Kubernetes Metric Server does not reach the READY 1/1 state, run the following commands:\n$ kubectl describe pod \u0026lt;metrics-server-pod\u0026gt; -n kube-system $ kubectl logs \u0026lt;metrics-server-pod\u0026gt; -n kube-system If you see errors such as:\nReadiness probe failed: HTTP probe failed with statuscode: 500 and:\nE0907 13:07:50.937308 1 scraper.go:140] \u0026quot;Failed to scrape node\u0026quot; err=\u0026quot;Get \\\u0026quot;https://X.X.X.X:10250/metrics/resource\\\u0026quot;: x509: cannot validate certificate for 100.105.18.113 because it doesn't contain any IP SANs\u0026quot; node=\u0026quot;worker-node1\u0026quot; then you may need to install a valid cluster certificate for your Kubernetes cluster.\nFor testing purposes, you can resolve this issue by:\n Delete the Kubernetes Metrics Server by running the following command:\n$ kubectl delete -f $WORKDIR/kubernetes/hpa/components.yaml Edit the $WORKDIR/hpa/components.yaml and locate the args: section. Add kubelet-insecure-tls to the arguments. For example:\nspec: containers: - args: - --cert-dir=/tmp - --secure-port=4443 - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname - --kubelet-use-node-status-port - --kubelet-insecure-tls - --metric-resolution=15s image: registry.k8s.io/metrics-server/metrics-server:v0.6.4 ... Deploy the Kubenetes Metrics Server using the command:\n$ kubectl apply -f components.yaml Run the following and make sure the READY status shows 1/1:\n$ kubectl get pods -n kube-system | grep metric The output should look similar to the following:\nmetrics-server-d9694457-mf69d 1/1 Running 0 40s Deploy HPA The steps below show how to configure and run an HPA to scale OUD, based on the CPU or memory utilization resource metrics.\nAssuming the example OUD configuration in Create OUD instances, three OUD servers are started by default (oud-ds-rs-0, oud-ds-rs-1, oud-ds-rs-2).\nIn the following example an HPA resource is created, targeted at the statefulset oud-ds-rs. This resource will autoscale OUD servers from a minimum of 3 OUD servers up to 5 OUD servers. Scaling up will occur when the average CPU is consistently over 70%. Scaling down will occur when the average CPU is consistently below 70%.\n Navigate to the $WORKDIR/kubernetes/hpa and create an autoscalehpa.yaml file that contains the following.\n# apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler metadata: name: oud-sts-hpa namespace: oudns spec: scaleTargetRef: apiVersion: apps/v1 kind: StatefulSet name: oud-ds-rs #statefulset name of oud behavior: scaleDown: stabilizationWindowSeconds: 60 scaleUp: stabilizationWindowSeconds: 60 minReplicas: 3 maxReplicas: 5 metrics: - type: Resource resource: name: cpu target: type: Utilization averageUtilization: 70 Note : minReplicas should match the number of OUD servers started by default. Set maxReplicas to the maximum amount of OUD servers that can be started.\nNote: To find the statefulset name, in this example oud-ds-rs, run \u0026ldquo;kubectl get statefulset -n oudns\u0026rdquo;.\nNote: For setting HPA based on Memory Metrics, update the metrics block with the following content. Please note we recommend using only CPU or Memory, not both.\nmetrics: - type: Resource resource: name: memory target: type: Utilization averageUtilization: 70 Run the following command to create the autoscaler:\n$ kubectl apply -f autoscalehpa.yaml The output will look similar to the following:\nhorizontalpodautoscaler.autoscaling/oud-sts-hpa created Verify the status of the autoscaler by running the following:\n$ kubectl get hpa -n oudns The output will look similar to the following:\nNAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE oud-sts-hpa StatefulSet/oud-ds-rs 5%/70% 3 5 3 33s In the example above, this shows that CPU is currently running at 5% for the oud-sts-hpa.\n Testing HPA Check the current status of the OUD servers:\n$ kubectl get pods -n oudns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE oud-ds-rs-0 1/1 Running 0 5h15m oud-ds-rs-1 1/1 Running 0 5h9m oud-ds-rs-2 1/1 Running 0 5h2m oud-pod-cron-job-28242120-bwtcz 0/1 Completed 0 61m oud-pod-cron-job-28242150-qf8fg 0/1 Completed 0 31m oud-pod-cron-job-28242180-q69lm 0/1 Completed 0 92s In the above oud-ds-rs-0, oud-ds-rs-0, oud-ds-rs-2 are running.\n To test HPA can scale up the OUD servers, run the following commands:\n$ kubectl exec --stdin --tty oud-ds-rs-0 -n oudns -- /bin/bash This will take you inside a bash shell inside the oud-ds-rs-0 pod:\n[oracle@oud-ds-rs-0 oracle]$ Inside the bash shell, run the following command to increase the load on the CPU:\n[oracle@oud-ds-rs-0 oracle]$ dd if=/dev/zero of=/dev/null This command will continue to run in the foreground.\n Repeat the step above for the oud-ds-rs-1 pod:\n$ kubectl exec --stdin --tty oud-ds-rs-1 -n oudns -- /bin/bash [oracle@oud-ds-rs-1 oracle]$ [oracle@oud-ds-rs-1 oracle]$ dd if=/dev/zero of=/dev/null In a command window outside the bash shells, run the following command to view the current CPU usage:\n$ kubectl get hpa -n oudns The output will look similar to the following:\nNAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE oud-sts-hpa StatefulSet/oud-ds-rs 125%/70% 3 5 3 5m15s In the above example the CPU has increased to 125%. As this is above the 70% limit, the autoscaler increases the replicas by starting additional OUD servers.\n Run the following to see if any more OUD servers are started:\n$ kubectl get pods -n oudns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE oud-ds-rs-0 1/1 Running 0 5h50m oud-ds-rs-1 1/1 Running 0 5h44m oud-ds-rs-2 1/1 Running 0 5h37m oud-ds-rs-3 1/1 Running 0 9m29s oud-ds-rs-4 1/1 Running 0 5m17s oud-pod-cron-job-28242150-qf8fg 0/1 Completed 0 66m oud-pod-cron-job-28242180-q69lm 0/1 Completed 0 36m oud-pod-cron-job-28242210-kn7sv 0/1 Completed 0 6m28s In the example above one more OUD server has started (oud-ds-rs-4).\nNote: It may take some time for the server to appear and start. Once the server is at READY status of 1/1, the server is started.\n To stop the load on the CPU, in both bash shells, issue a Control C, and then exit the bash shell:\n[oracle@oud-ds-rs-0 oracle]$ dd if=/dev/zero of=/dev/null ^C [oracle@oud-ds-rs-0 oracle]$ exit Run the following command to view the current CPU usage:\n$ kubectl get hpa -n oudns The output will look similar to the following:\nNAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE oud-sts-hpa StatefulSet/oud-ds-rs 4%/70% 3 5 5 40m In the above example CPU has dropped to 4%. As this is below the 70% threshold, you should see the autoscaler scale down the servers:\n$ kubectl get pods -n oudns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE oud-ds-rs-0 1/1 Running 0 5h54m oud-ds-rs-1 1/1 Running 0 5h48m oud-ds-rs-2 1/1 Running 0 5h41m oud-ds-rs-3 1/1 Running 0 13m oud-ds-rs-4 1/1 Terminating 0 8m27s oud-pod-cron-job-28242150-qf8fg 0/1 Completed 0 70m oud-pod-cron-job-28242180-q69lm 0/1 Completed 0 40m oud-pod-cron-job-28242210-kn7sv 0/1 Completed 0 10m Eventually, the extra server will disappear:\nNAME READY STATUS RESTARTS AGE oud-ds-rs-0 1/1 Running 0 5h57m oud-ds-rs-1 1/1 Running 0 5h51m oud-ds-rs-2 1/1 Running 0 5h44m oud-ds-rs-3 1/1 Running 0 16m oud-pod-cron-job-28242150-qf8fg 0/1 Completed 0 73m oud-pod-cron-job-28242180-q69lm 0/1 Completed 0 43m oud-pod-cron-job-28242210-kn7sv 0/1 Completed 0 13m Delete the HPA If you need to delete the HPA, you can do so by running the following command:\n$ cd $WORKDIR/kubernetes/hpa $ kubectl delete -f autoscalehpa.yaml Other considerations If HPA is deployed and you need to upgrade the OUD image, then you must delete the HPA before upgrading. Once the upgrade is successful you can deploy HPA again. If you choose to scale up or scale down an OUD server manually as per Scaling Up/Down OUD Pods, then it is recommended to delete the HPA before doing so. " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oig/manage-oig-domains/logging-and-visualization/", + "title": "d. Logging and visualization", + "tags": [], + "description": "Describes the steps for logging and visualization with Elasticsearch and Kibana.", + "content": "After the OIG domain is set up you can publish operator and WebLogic Server logs into Elasticsearch and interact with them in Kibana.\nInstall Elasticsearch and Kibana If you do not already have a centralized Elasticsearch (ELK) stack then you must configure this first. For details on how to configure the ELK stack, follow Installing Elasticsearch (ELK) Stack and Kibana\nCreate the logstash pod Variables used in this chapter In order to create the logstash pod, you must create several files. These files contain variables which you must substitute with variables applicable to your environment.\nMost of the values for the variables will be based on your ELK deployment as per Installing Elasticsearch (ELK) Stack and Kibana.\nThe table below outlines the variables and values you must set:\n Variable Sample Value Description \u0026lt;ELK_VER\u0026gt; 8.3.1 The version of logstash you want to install. \u0026lt;ELK_SSL\u0026gt; true If SSL is enabled for ELK set the value to true, or if NON-SSL set to false. This value must be lowercase. \u0026lt;ELK_HOSTS\u0026gt; https://elasticsearch.example.com:9200 The URL for sending logs to Elasticsearch. HTTP if NON-SSL is used. \u0026lt;ELKNS\u0026gt; oigns The domain namespace. \u0026lt;ELK_USER\u0026gt; logstash_internal The name of the user for logstash to access Elasticsearch. \u0026lt;ELK_PASSWORD\u0026gt; password The password for ELK_USER. \u0026lt;ELK_APIKEY\u0026gt; apikey The API key details. You will also need the BASE64 version of the Certificate Authority (CA) certificate(s) that signed the certificate of the Elasticsearch server. If using a self-signed certificate, this is the self signed certificate of the Elasticsearch server. See Copying the Elasticsearch Certificate for details on how to get the correct certificate. In the example below the certificate is called elk.crt.\nCreate kubernetes secrets Create a Kubernetes secret for Elasticsearch using the API Key or Password.\na) If ELK uses an API Key for authentication:\n$ kubectl create secret generic elasticsearch-pw-elastic -n \u0026lt;domain_namespace\u0026gt; --from-literal password=\u0026lt;ELK_APIKEY\u0026gt; For example:\n$ kubectl create secret generic elasticsearch-pw-elastic -n oigns --from-literal password=\u0026lt;ELK_APIKEY\u0026gt; The output will look similar to the following:\nsecret/elasticsearch-pw-elastic created b) If ELK uses a password for authentication:\n$ kubectl create secret generic elasticsearch-pw-elastic -n \u0026lt;domain_namespace\u0026gt; --from-literal password=\u0026lt;ELK_PASSWORD\u0026gt; For example:\n$ kubectl create secret generic elasticsearch-pw-elastic -n oigns --from-literal password=\u0026lt;ELK_PASSWORD\u0026gt; The output will look similar to the following:\nsecret/elasticsearch-pw-elastic created Note: It is recommended that the ELK Stack is created with authentication enabled. If no authentication is enabled you may create a secret using the values above.\n Create a Kubernetes secret to access the required images on hub.docker.com:\nNote: Before executing the command below, you must first have a user account on hub.docker.com.\nkubectl create secret docker-registry \u0026#34;dockercred\u0026#34; --docker-server=\u0026#34;https://index.docker.io/v1/\u0026#34; \\ --docker-username=\u0026#34;\u0026lt;DOCKER_USER_NAME\u0026gt;\u0026#34; \\ --docker-password=\u0026lt;DOCKER_PASSWORD\u0026gt; --docker-email=\u0026lt;DOCKER_EMAIL_ID\u0026gt; \\ --namespace=\u0026lt;domain_namespace\u0026gt; For example,\nkubectl create secret docker-registry \u0026#34;dockercred\u0026#34; --docker-server=\u0026#34;https://index.docker.io/v1/\u0026#34; \\ --docker-username=\u0026#34;user@example.com\u0026#34; \\ --docker-password=password --docker-email=user@example.com \\ --namespace=oigns The output will look similar to the following:\nsecret/dockercred created Find the mountPath details Run the following command to get the mountPath of your domain:\n$ kubectl describe domains \u0026lt;domain_uid\u0026gt; -n \u0026lt;domain_namespace\u0026gt; | grep \u0026#34;Mount Path\u0026#34; For example:\n$ kubectl describe domains governancedomain -n oigns | grep \u0026#34;Mount Path\u0026#34; The output will look similar to the following:\nMount Path: /u01/oracle/user_projects/domains Find the persistentVolumeClaim details Run the following command to get the OIG domain persistence volume details:\n$ kubectl get pv -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pv -n oigns The output will look similar to the following:\nNAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE governancedomain-domain-pv 10Gi RWX Retain Bound oigns/governancedomain-domain-pvc governancedomain-oim-storage-class 28h Make note of the CLAIM value, for example in this case governancedomain-oim-pvc.\n Create the Configmap Copy the elk.crt file to the $WORKDIR/kubernetes/elasticsearch-and-kibana directory.\n Navigate to the $WORKDIR/kubernetes/elasticsearch-and-kibana directory and run the following:\nkubectl create configmap elk-cert --from-file=elk.crt -n \u0026lt;namespace\u0026gt; For example:\nkubectl create configmap elk-cert --from-file=elk.crt -n oigns The output will look similar to the following:\nconfigmap/elk-cert created Create a logstash_cm.yaml file in the $WORKDIR/kubernetes/elasticsearch-and-kibana directory as follows:\napiVersion: v1 kind: ConfigMap metadata: name: oig-logstash-configmap namespace: \u0026lt;ELKNS\u0026gt; data: logstash.yml: | #http.host: \u0026quot;0.0.0.0\u0026quot; logstash-config.conf: | input { file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/logs/governancedomain/AdminServer*.log\u0026quot; tags =\u0026gt; \u0026quot;Adminserver_log\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/logs/governancedomain/soa_server*.log\u0026quot; tags =\u0026gt; \u0026quot;soaserver_log\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/logs/governancedomain/oim_server*.log\u0026quot; tags =\u0026gt; \u0026quot;Oimserver_log\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/governancedomain/servers/AdminServer/logs/AdminServer-diagnostic.log\u0026quot; tags =\u0026gt; \u0026quot;Adminserver_diagnostic\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/governancedomain/servers/**/logs/soa_server*-diagnostic.log\u0026quot; tags =\u0026gt; \u0026quot;Soa_diagnostic\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/governancedomain/servers/**/logs/oim_server*-diagnostic.log\u0026quot; tags =\u0026gt; \u0026quot;Oimserver_diagnostic\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/governancedomain/servers/**/logs/access*.log\u0026quot; tags =\u0026gt; \u0026quot;Access_logs\u0026quot; start_position =\u0026gt; beginning } } filter { grok { match =\u0026gt; [ \u0026quot;message\u0026quot;, \u0026quot;\u0026lt;%{DATA:log_timestamp}\u0026gt; \u0026lt;%{WORD:log_level}\u0026gt; \u0026lt;%{WORD:thread}\u0026gt; \u0026lt;%{HOSTNAME:hostname}\u0026gt; \u0026lt;%{HOSTNAME:servername}\u0026gt; \u0026lt;%{DATA:timer}\u0026gt; \u0026lt;\u0026lt;%{DATA:kernel}\u0026gt;\u0026gt; \u0026lt;\u0026gt; \u0026lt;%{DATA:uuid}\u0026gt; \u0026lt;%{NUMBER:timestamp}\u0026gt; \u0026lt;%{DATA:misc} \u0026gt; \u0026lt;%{DATA:log_number}\u0026gt; \u0026lt;%{DATA:log_message}\u0026gt;\u0026quot; ] } if \u0026quot;_grokparsefailure\u0026quot; in [tags] { mutate { remove_tag =\u0026gt; [ \u0026quot;_grokparsefailure\u0026quot; ] } } } output { elasticsearch { hosts =\u0026gt; [\u0026quot;\u0026lt;ELK_HOSTS\u0026gt;\u0026quot;] cacert =\u0026gt; '/usr/share/logstash/config/certs/elk.crt' index =\u0026gt; \u0026quot;oiglogs-000001\u0026quot; ssl =\u0026gt; \u0026lt;ELK_SSL\u0026gt; ssl_certificate_verification =\u0026gt; false user =\u0026gt; \u0026quot;\u0026lt;ELK_USER\u0026gt;\u0026quot; password =\u0026gt; \u0026quot;${ELASTICSEARCH_PASSWORD}\u0026quot; api_key =\u0026gt; \u0026quot;${ELASTICSEARCH_PASSWORD}\u0026quot; } } Change the values in the above file as follows:\n Change the \u0026lt;ELKNS\u0026gt;, \u0026lt;ELK_HOSTS\u0026gt;, \u0026lt;ELK_SSL\u0026gt;, and \u0026lt;ELK_USER\u0026gt; to match the values for your environment. Change /u01/oracle/user_projects/domains to match the mountPath returned earlier. If your domainUID is anything other than governancedomain, change each instance of governancedomain to your domainUID. If using API KEY for your ELK authentication, delete the user and password lines. If using a password for ELK authentication, delete the api_key line. If no authentication is used for ELK, delete the user, password, and api_key lines. For example:\napiVersion: v1 kind: ConfigMap metadata: name: oig-logstash-configmap namespace: oigns data: logstash.yml: | #http.host: \u0026quot;0.0.0.0\u0026quot; logstash-config.conf: | input { file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/logs/governancedomain/AdminServer*.log\u0026quot; tags =\u0026gt; \u0026quot;Adminserver_log\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/logs/governancedomain/soa_server*.log\u0026quot; tags =\u0026gt; \u0026quot;soaserver_log\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/logs/governancedomain/oim_server*.log\u0026quot; tags =\u0026gt; \u0026quot;Oimserver_log\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/governancedomain/servers/AdminServer/logs/AdminServer-diagnostic.log\u0026quot; tags =\u0026gt; \u0026quot;Adminserver_diagnostic\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/governancedomain/servers/**/logs/soa_server*-diagnostic.log\u0026quot; tags =\u0026gt; \u0026quot;Soa_diagnostic\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/governancedomain/servers/**/logs/oim_server*-diagnostic.log\u0026quot; tags =\u0026gt; \u0026quot;Oimserver_diagnostic\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/governancedomain/servers/**/logs/access*.log\u0026quot; tags =\u0026gt; \u0026quot;Access_logs\u0026quot; start_position =\u0026gt; beginning } } filter { grok { match =\u0026gt; [ \u0026quot;message\u0026quot;, \u0026quot;\u0026lt;%{DATA:log_timestamp}\u0026gt; \u0026lt;%{WORD:log_level}\u0026gt; \u0026lt;%{WORD:thread}\u0026gt; \u0026lt;%{HOSTNAME:hostname}\u0026gt; \u0026lt;%{HOSTNAME:servername}\u0026gt; \u0026lt;%{DATA:timer}\u0026gt; \u0026lt;\u0026lt;%{DATA:kernel}\u0026gt;\u0026gt; \u0026lt;\u0026gt; \u0026lt;%{DATA:uuid}\u0026gt; \u0026lt;%{NUMBER:timestamp}\u0026gt; \u0026lt;%{DATA:misc} \u0026gt; \u0026lt;%{DATA:log_number}\u0026gt; \u0026lt;%{DATA:log_message}\u0026gt;\u0026quot; ] } if \u0026quot;_grokparsefailure\u0026quot; in [tags] { mutate { remove_tag =\u0026gt; [ \u0026quot;_grokparsefailure\u0026quot; ] } } } output { elasticsearch { hosts =\u0026gt; [\u0026quot;https://elasticsearch.example.com:9200\u0026quot;] cacert =\u0026gt; '/usr/share/logstash/config/certs/elk.crt' index =\u0026gt; \u0026quot;oiglogs-000001\u0026quot; ssl =\u0026gt; true ssl_certificate_verification =\u0026gt; false user =\u0026gt; \u0026quot;logstash_internal\u0026quot; password =\u0026gt; \u0026quot;${ELASTICSEARCH_PASSWORD}\u0026quot; } } Run the following command to create the configmap:\n$ kubectl apply -f logstash_cm.yaml The output will look similar to the following:\nconfigmap/oig-logstash-configmap created Deploy the logstash pod Navigate to the $WORKDIR/kubernetes/elasticsearch-and-kibana directory and create a logstash.yaml file as follows:\napiVersion: apps/v1 kind: Deployment metadata: name: oig-logstash namespace: \u0026lt;ELKNS\u0026gt; spec: selector: matchLabels: k8s-app: logstash template: # create pods using pod definition in this template metadata: labels: k8s-app: logstash spec: imagePullSecrets: - name: dockercred containers: - command: - logstash image: logstash:\u0026lt;ELK_VER\u0026gt; imagePullPolicy: IfNotPresent name: oig-logstash env: - name: ELASTICSEARCH_PASSWORD valueFrom: secretKeyRef: name: elasticsearch-pw-elastic key: password resources: ports: - containerPort: 5044 name: logstash volumeMounts: - mountPath: /u01/oracle/user_projects/domains name: weblogic-domain-storage-volume - name: shared-logs mountPath: /shared-logs - mountPath: /usr/share/logstash/pipeline/ name: oig-logstash-pipeline - mountPath: /usr/share/logstash/config/logstash.yml subPath: logstash.yml name: config-volume - mountPath: /usr/share/logstash/config/certs name: elk-cert volumes: - configMap: defaultMode: 420 items: - key: elk.crt path: elk.crt name: elk-cert name: elk-cert - configMap: defaultMode: 420 items: - key: logstash-config.conf path: logstash-config.conf name: oig-logstash-configmap name: oig-logstash-pipeline - configMap: defaultMode: 420 items: - key: logstash.yml path: logstash.yml name: oig-logstash-configmap name: config-volume - name: weblogic-domain-storage-volume persistentVolumeClaim: claimName: governancedomain-domain-pvc - name: shared-logs emptyDir: {} Change the \u0026lt;ELKNS\u0026gt;, and \u0026lt;ELK_VER\u0026gt; to match the values for your environment. Change /u01/oracle/user_projects/domains to match the mountPath returned earlier Change the claimName value to match the claimName returned earlier If your Kubernetes environment does not allow access to the internet to pull the logstash image, you must load the logstash image in your own container registry and change image: logstash:\u0026lt;ELK_VER\u0026gt; to the location of the image in your container registry e.g: container-registry.example.com/logstash:8.3.1 For example:\napiVersion: apps/v1 kind: Deployment metadata: name: oig-logstash namespace: oigns spec: selector: matchLabels: k8s-app: logstash template: # create pods using pod definition in this template metadata: labels: k8s-app: logstash spec: imagePullSecrets: - name: dockercred containers: - command: - logstash image: logstash:8.3.1 imagePullPolicy: IfNotPresent name: oig-logstash env: - name: ELASTICSEARCH_PASSWORD valueFrom: secretKeyRef: name: elasticsearch-pw-elastic key: password resources: ports: - containerPort: 5044 name: logstash volumeMounts: - mountPath: /u01/oracle/user_projects/domains name: weblogic-domain-storage-volume - name: shared-logs mountPath: /shared-logs - mountPath: /usr/share/logstash/pipeline/ name: oig-logstash-pipeline - mountPath: /usr/share/logstash/config/logstash.yml subPath: logstash.yml name: config-volume - mountPath: /usr/share/logstash/config/certs name: elk-cert volumes: - configMap: defaultMode: 420 items: - key: elk.crt path: elk.crt name: elk-cert name: elk-cert name: oig-logstash-configmap name: elk-cert - configMap: defaultMode: 420 items: - key: logstash-config.conf path: logstash-config.conf name: oig-logstash-configmap name: oig-logstash-pipeline - configMap: defaultMode: 420 items: - key: logstash.yml path: logstash.yml name: oig-logstash-configmap name: config-volume - name: weblogic-domain-storage-volume persistentVolumeClaim: claimName: governancedomain-domain-pvc - name: shared-logs emptyDir: {} Deploy the logstash pod by executing the following command:\n$ kubectl create -f $WORKDIR/kubernetes/elasticsearch-and-kibana/logstash.yaml The output will look similar to the following:\ndeployment.apps/oig-logstash created Run the following command to check the logstash pod is created correctly:\n$ kubectl get pods -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl get pods -n oigns The output should look similar to the following:\nNAME READY STATUS RESTARTS AGE governancedomain-adminserver 1/1 Running 0 90m governancedomain-create-fmw-infra-sample-domain-job-fqgnr 0/1 Completed 0 2d19h governancedomain-oim-server1 1/1 Running 0 88m governancedomain-soa-server1 1/1 Running 0 88m helper 1/1 Running 0 2d20h oig-logstash-77fbbc66f8-lsvcw 1/1 Running 0 3m25s Note: Wait a couple of minutes to make sure the pod has not had any failures or restarts. If the pod fails you can view the pod log using:\n$ kubectl logs -f oig-logstash-\u0026lt;pod\u0026gt; -n oigns Most errors occur due to misconfiguration of the logstash_cm.yaml or logstash.yaml. This is usually because of an incorrect value set, or the certificate was not pasted with the correct indentation.\nIf the pod has errors, delete the pod and configmap as follows:\n$ kubectl delete -f $WORKDIR/kubernetes/elasticsearch-and-kibana/logstash.yaml $ kubectl delete -f $WORKDIR/kubernetes/elasticsearch-and-kibana/logstash_cm.yaml Once you have resolved the issue in the yaml files, run the commands outlined earlier to recreate the configmap and logstash pod.\n Verify and access the Kibana console To access the Kibana console you will need the Kibana URL as per Installing Elasticsearch (ELK) Stack and Kibana.\nFor Kibana 7.7.x and below:\n Access the Kibana console with http://\u0026lt;hostname\u0026gt;:\u0026lt;port\u0026gt;/app/kibana and login with your username and password.\n From the Navigation menu, navigate to Management \u0026gt; Kibana \u0026gt; Index Patterns.\n In the Create Index Pattern page enter oiglogs* for the Index pattern and click Next Step.\n In the Configure settings page, from the Time Filter field name drop down menu select @timestamp and click Create index pattern.\n Once the index pattern is created click on Discover in the navigation menu to view the OIG logs.\n For Kibana version 7.8.X and above:\n Access the Kibana console with http://\u0026lt;hostname\u0026gt;:\u0026lt;port\u0026gt;/app/kibana and login with your username and password.\n From the Navigation menu, navigate to Management \u0026gt; Stack Management.\n Click Data Views in the Kibana section.\n Click Create Data View and enter the following information:\n Name: oiglogs* Timestamp: @timestamp Click Create Data View.\n From the Navigation menu, click Discover to view the log file entries.\n From the drop down menu, select oiglogs* to view the log file entries.\n " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oam/manage-oam-domains/monitoring-oam-domains/", + "title": "d. Monitoring an OAM domain", + "tags": [], + "description": "Describes the steps for Monitoring the OAM domain.", + "content": "After the OAM domain is set up you can monitor the OAM instance using Prometheus and Grafana. See Monitoring a domain.\nThe WebLogic Monitoring Exporter uses the WLS RESTful Management API to scrape runtime information and then exports Prometheus-compatible metrics. It is deployed as a web application in a WebLogic Server (WLS) instance, version 12.2.1 or later, typically, in the instance from which you want to get metrics.\nThere are two ways to setup monitoring and you should choose one method or the other:\n Setup automatically using setup-monitoring.sh Setup using manual configuration Setup automatically using setup-monitoring.sh The $WORKDIR/kubernetes/monitoring-service/setup-monitoring.sh sets up the monitoring for the OAM domain. It installs Prometheus, Grafana, WebLogic Monitoring Exporter and deploys the web applications to the OAM domain. It also deploys the WebLogic Server Grafana dashboard.\nFor usage details execute ./setup-monitoring.sh -h.\n Edit the $WORKDIR/kubernetes/monitoring-service/monitoring-inputs.yaml and change the domainUID, domainNamespace, and weblogicCredentialsSecretName to correspond to your deployment. Also change wlsMonitoringExporterTooamCluster, wlsMonitoringExporterTopolicyCluster, exposeMonitoringNodePort to true. For example:\nversion: create-accessdomain-monitoring-inputs-v1 # Unique ID identifying your domain. # This ID must not contain an underscope (\u0026quot;_\u0026quot;), and must be lowercase and unique across all domains in a Kubernetes cluster. domainUID: accessdomain # Name of the domain namespace domainNamespace: oamns # Boolean value indicating whether to install kube-prometheus-stack setupKubePrometheusStack: true # Additional parameters for helm install kube-prometheus-stack # Refer https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/values.yaml for additional parameters # Sample : # additionalParamForKubePrometheusStack: --set nodeExporter.enabled=false --set prometheusOperator.tls.enabled=false --set prometheusOperator.admissionWebhooks.enabled=false additionalParamForKubePrometheusStack: # Name of the monitoring namespace monitoringNamespace: monitoring # Name of the Admin Server adminServerName: AdminServer # # Port number for admin server adminServerPort: 7001 # Cluster name oamClusterName: oam_cluster # Port number for managed server oamManagedServerPort: 14100 # WebLogic Monitoring Exporter to Cluster wlsMonitoringExporterTooamCluster: true # Cluster name policyClusterName: policy_cluster # Port number for managed server policyManagedServerPort: 15100 # WebLogic Monitoring Exporter to Cluster wlsMonitoringExporterTopolicyCluster: true # Boolean to indicate if the adminNodePort will be exposed exposeMonitoringNodePort: true # NodePort to expose Prometheus prometheusNodePort: 32101 # NodePort to expose Grafana grafanaNodePort: 32100 # NodePort to expose Alertmanager alertmanagerNodePort: 32102 # Name of the Kubernetes secret for the Admin Server's username and password weblogicCredentialsSecretName: accessdomain-credentials Note: If your cluster does not have access to the internet to pull external images, such as grafana or prometheus, you must load the images in a local container registry. You must then set additionalParamForKubePrometheusStack to set the location of the image in your local container registry, for example:\n# Additional parameters for helm install kube-prometheus-stack # Refer https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/values.yaml for additional parameters # Sample : # additionalParamForKubePrometheusStack: --set nodeExporter.enabled=false --set prometheusOperator.tls.enabled=false --set prometheusOperator.admissionWebhooks.enabled=false additionalParamForKubePrometheusStack: --set grafana.image.repository=container-registry.example.com/grafana --set grafana.image.tag=8.3.4 Run the following command to setup monitoring.\n$ cd $WORKDIR/kubernetes/monitoring-service $ ./setup-monitoring.sh -i monitoring-inputs.yaml The output should be similar to the following:\nMonitoring setup in monitoring in progress node/worker-node1 not labeled node/worker-node2 not labeled node/master-node not labeled Setup prometheus-community/kube-prometheus-stack started \u0026quot;prometheus-community\u0026quot; has been added to your repositories Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the \u0026quot;stable\u0026quot; chart repository ...Successfully got an update from the \u0026quot;prometheus\u0026quot; chart repository ...Successfully got an update from the \u0026quot;prometheus-community\u0026quot; chart repository ...Successfully got an update from the \u0026quot;appscode\u0026quot; chart repository Update Complete. ⎈ Happy Helming!⎈ Setup prometheus-community/kube-prometheus-stack in progress W0320 \u0026lt;DATE\u0026gt; 19121 warnings.go:70] policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+ W0320 \u0026lt;DATE\u0026gt; 19121 warnings.go:70] policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+ W0320 \u0026lt;DATE\u0026gt; 19121 warnings.go:70] policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+ .. W0320 \u0026lt;DATE\u0026gt; 19121 warnings.go:70] policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+ NAME: monitoring LAST DEPLOYED: \u0026lt;DATE\u0026gt; NAMESPACE: monitoring STATUS: deployed REVISION: 1 NOTES: kube-prometheus-stack has been installed. Check its status by running: kubectl --namespace monitoring get pods -l \u0026quot;release=monitoring\u0026quot; Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create \u0026amp; configure Alertmanager and Prometheus instances using the Operator. Setup prometheus-community/kube-prometheus-stack completed Deploy WebLogic Monitoring Exporter started Deploying WebLogic Monitoring Exporter with domainNamespace[oamns], domainUID[accessdomain], adminServerPodName[accessdomain-adminserver] % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 655 100 655 0 0 1564 0 --:--:-- --:--:-- --:--:-- 1566 100 2196k 100 2196k 0 0 2025k 0 0:00:01 0:00:01 --:--:-- 5951k created $WORKDIR/kubernetes/monitoring-service/scripts/wls-exporter-deploy dir created /tmp/ci-EHhB7bP847 /tmp/ci-EHhB7bP847 $WORKDIR/kubernetes/monitoring-service in temp dir adding: WEB-INF/weblogic.xml (deflated 61%) adding: config.yml (deflated 60%) $WORKDIR/kubernetes/monitoring-service created /tmp/ci-e7wPrlLlud 14:26 /tmp/ci-e7wPrlLlud $WORKDIR/kubernetes/monitoring-service in temp dir adding: WEB-INF/weblogic.xml (deflated 61%) adding: config.yml (deflated 60%) $WORKDIR/kubernetes/monitoring-service created /tmp/ci-U38XXs6d06 /tmp/ci-U38XXs6d06 $WORKDIR/kubernetes/monitoring-service in temp dir adding: WEB-INF/weblogic.xml (deflated 61%) adding: config.yml (deflated 60%) $WORKDIR/kubernetes/monitoring-service Initializing WebLogic Scripting Tool (WLST) ... Welcome to WebLogic Server Administration Scripting Shell Type help() for help on available commands Connecting to t3://accessdomain-adminserver:7001 with userid weblogic ... Successfully connected to Admin Server \u0026quot;AdminServer\u0026quot; that belongs to domain \u0026quot;accessdomain\u0026quot;. Warning: An insecure protocol was used to connect to the server. To ensure on-the-wire security, the SSL port or Admin port should be used instead. Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war to targets AdminServer (upload=true) ... \u0026lt;DATE\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating deploy operation for application, wls-exporter-adminserver [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war], to AdminServer .\u0026gt; .Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-adminserver. \u0026lt;DATE\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating start operation for application, wls-exporter-adminserver [archive: null], to AdminServer .\u0026gt; Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start Deployment State : completed 14:27 Deployment command type: start Deployment State : completed Deployment Message : no message Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-oam.war to targets oam_cluster (upload=true) ... \u0026lt;DATE\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating deploy operation for application, wls-exporter-oam [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-oam.war], to oam_cluster .\u0026gt; .Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-oam. \u0026lt;DATE\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating start operation for application, wls-exporter-oam [archive: null], to oam_cluster .\u0026gt; .Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start Deployment State : completed Deployment Message : no message Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-policy.war to targets policy_cluster (upload=true) ... \u0026lt;DATE\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating deploy operation for application, wls-exporter-policy [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-policy.war], to policy_cluster .\u0026gt; .Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-policy. \u0026lt;DATE\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating start operation for application, wls-exporter-policy [archive: null], to policy_cluster .\u0026gt; .Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start Deployment State : completed Deployment Message : no message Disconnected from weblogic server: AdminServer Exiting WebLogic Scripting Tool. \u0026lt;DATE\u0026gt; \u0026lt;Warning\u0026gt; \u0026lt;JNDI\u0026gt; \u0026lt;BEA-050001\u0026gt; \u0026lt;WLContext.close() was called in a different thread than the one in which it was created.\u0026gt; 14:27 Deploy WebLogic Monitoring Exporter completed secret/basic-auth created servicemonitor.monitoring.coreos.com/wls-exporter created Deploying WebLogic Server Grafana Dashboard.... {\u0026quot;id\u0026quot;:25,\u0026quot;slug\u0026quot;:\u0026quot;weblogic-server-dashboard\u0026quot;,\u0026quot;status\u0026quot;:\u0026quot;success\u0026quot;,\u0026quot;uid\u0026quot;:\u0026quot;5yUwzbZWz\u0026quot;,\u0026quot;url\u0026quot;:\u0026quot;/d/5yUwzbZWz/weblogic-server-dashboard\u0026quot;,\u0026quot;version\u0026quot;:1} Deployed WebLogic Server Grafana Dashboard successfully Grafana is available at NodePort: 32100 Prometheus is available at NodePort: 32101 Altermanager is available at NodePort: 32102 ============================================================== Prometheus service discovery After the ServiceMonitor is deployed, the wls-exporter should be discovered by Prometheus and be able to collect metrics.\n Access the following URL to view Prometheus service discovery: http://${MASTERNODE-HOSTNAME}:32101/service-discovery\n Click on serviceMonitor/oamns/wls-exporter/0 and then show more. Verify all the targets are mentioned.\n Note : It may take several minutes for serviceMonitor/oamns/wls-exporter/0 to appear, so refresh the page until it does.\nGrafana dashboard Access the Grafana dashboard with the following URL: http://${MASTERNODE-HOSTNAME}:32100 and login with admin/admin. Change your password when prompted.\n In the Dashboards panel, click on WebLogic Server Dashboard. The dashboard for your OAM domain should be displayed. If it is not displayed, click the Search icon in the left hand menu and search for WebLogic Server Dashboard.\n Cleanup To uninstall the Prometheus, Grafana, WebLogic Monitoring Exporter and the deployments, you can run the $WORKDIR/monitoring-service/kubernetes/delete-monitoring.sh script. For usage details execute ./delete-monitoring.sh -h.\n To uninstall run the following command:\nFor example:\n$ cd $WORKDIR/kubernetes/monitoring-service $ ./delete-monitoring.sh -i monitoring-inputs.yaml $ kubectl delete namespace monitoring Setup using manual configuration Install Prometheus, Grafana and WebLogic Monitoring Exporter manually. Create the web applications and deploy to the OAM domain.\nDeploy the Prometheus operator Kube-Prometheus requires all nodes to be labelled with kubernetes.io/os=linux. To check if your nodes are labelled, run the following:\n$ kubectl get nodes --show-labels If the nodes are labelled the output will look similar to the following:\nNAME STATUS ROLES AGE VERSION LABELS worker-node1 Ready \u0026lt;none\u0026gt; 42d v1.20.10 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker-node1,kubernetes.io/os=linux worker-node2 Ready \u0026lt;none\u0026gt; 42d v1.20.10 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker-node2,kubernetes.io/os=linux master-node Ready master 42d v1.20.10 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=master-node,kubernetes.io/os=linux,node-role.kubernetes.io/master= If the nodes are not labelled, run the following command:\n$ kubectl label nodes --all kubernetes.io/os=linux Clone Prometheus by running the following commands:\n$ cd $WORKDIR/kubernetes/monitoring-service $ git clone https://github.com/coreos/kube-prometheus.git -b v0.7.0 Note: Please refer the compatibility matrix of Kube Prometheus. Please download the release of the repository according to the Kubernetes version of your cluster.\n If your cluster does not have access to the internet to pull external images, such as grafana, you must load the images in a local container registry.\nFor grafana, edit the $WORKDIR/kubernetes/monitoring-service/kube-prometheus/manifests/grafana-deployment.yaml and change image: grafana/grafana:7.3.4 to your local container registry image location, for example image: container-registry.example.com/grafana/grafana:8.3.4.\nFor any other images check the $WORKDIR/kubernetes/monitoring-service/kube-prometheus/manifests/*deployment.yaml files.\n Run the following command to create the namespace and custom resource definitions:\n$ cd $WORKDIR/kubernetes/monitoring-service/kube-prometheus $ kubectl create -f manifests/setup The output will look similar to the following:\nnamespace/monitoring created customresourcedefinition.apiextensions.k8s.io/alertmanagerconfigs.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/alertmanagers.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/podmonitors.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/probes.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/prometheuses.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/prometheusrules.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/servicemonitors.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/thanosrulers.monitoring.coreos.com created clusterrole.rbac.authorization.k8s.io/prometheus-operator created clusterrolebinding.rbac.authorization.k8s.io/prometheus-operator created deployment.apps/prometheus-operator created service/prometheus-operator created serviceaccount/prometheus-operator created Run the following command to created the rest of the resources:\n$ kubectl create -f manifests/ The output will look similar to the following:\nalertmanager.monitoring.coreos.com/main created networkpolicy.networking.k8s.io/alertmanager-main created poddisruptionbudget.policy/alertmanager-main created prometheusrule.monitoring.coreos.com/alertmanager-main-rules created secret/alertmanager-main created service/alertmanager-main created serviceaccount/alertmanager-main created servicemonitor.monitoring.coreos.com/alertmanager-main created clusterrole.rbac.authorization.k8s.io/blackbox-exporter created clusterrolebinding.rbac.authorization.k8s.io/blackbox-exporter created configmap/blackbox-exporter-configuration created deployment.apps/blackbox-exporter created networkpolicy.networking.k8s.io/blackbox-exporter created service/blackbox-exporter created serviceaccount/blackbox-exporter created servicemonitor.monitoring.coreos.com/blackbox-exporter created secret/grafana-config created secret/grafana-datasources created configmap/grafana-dashboard-alertmanager-overview created configmap/grafana-dashboard-apiserver created configmap/grafana-dashboard-cluster-total created configmap/grafana-dashboard-controller-manager created configmap/grafana-dashboard-grafana-overview created configmap/grafana-dashboard-k8s-resources-cluster created configmap/grafana-dashboard-k8s-resources-namespace created configmap/grafana-dashboard-k8s-resources-node created configmap/grafana-dashboard-k8s-resources-pod created configmap/grafana-dashboard-k8s-resources-workload created configmap/grafana-dashboard-k8s-resources-workloads-namespace created configmap/grafana-dashboard-kubelet created configmap/grafana-dashboard-namespace-by-pod created configmap/grafana-dashboard-namespace-by-workload created configmap/grafana-dashboard-node-cluster-rsrc-use created configmap/grafana-dashboard-node-rsrc-use created configmap/grafana-dashboard-nodes-darwin created configmap/grafana-dashboard-nodes created configmap/grafana-dashboard-persistentvolumesusage created configmap/grafana-dashboard-pod-total created configmap/grafana-dashboard-prometheus-remote-write created configmap/grafana-dashboard-prometheus created configmap/grafana-dashboard-proxy created configmap/grafana-dashboard-scheduler created configmap/grafana-dashboard-workload-total created configmap/grafana-dashboards created deployment.apps/grafana created networkpolicy.networking.k8s.io/grafana created prometheusrule.monitoring.coreos.com/grafana-rules created service/grafana created serviceaccount/grafana created servicemonitor.monitoring.coreos.com/grafana created prometheusrule.monitoring.coreos.com/kube-prometheus-rules created clusterrole.rbac.authorization.k8s.io/kube-state-metrics created clusterrolebinding.rbac.authorization.k8s.io/kube-state-metrics created deployment.apps/kube-state-metrics created networkpolicy.networking.k8s.io/kube-state-metrics created prometheusrule.monitoring.coreos.com/kube-state-metrics-rules created service/kube-state-metrics created serviceaccount/kube-state-metrics created servicemonitor.monitoring.coreos.com/kube-state-metrics created prometheusrule.monitoring.coreos.com/kubernetes-monitoring-rules created servicemonitor.monitoring.coreos.com/kube-apiserver created servicemonitor.monitoring.coreos.com/coredns created servicemonitor.monitoring.coreos.com/kube-controller-manager created servicemonitor.monitoring.coreos.com/kube-scheduler created servicemonitor.monitoring.coreos.com/kubelet created clusterrole.rbac.authorization.k8s.io/node-exporter created clusterrolebinding.rbac.authorization.k8s.io/node-exporter created daemonset.apps/node-exporter created networkpolicy.networking.k8s.io/node-exporter created prometheusrule.monitoring.coreos.com/node-exporter-rules created service/node-exporter created serviceaccount/node-exporter created servicemonitor.monitoring.coreos.com/node-exporter created clusterrole.rbac.authorization.k8s.io/prometheus-k8s created clusterrolebinding.rbac.authorization.k8s.io/prometheus-k8s created networkpolicy.networking.k8s.io/prometheus-k8s created poddisruptionbudget.policy/prometheus-k8s created prometheus.monitoring.coreos.com/k8s created prometheusrule.monitoring.coreos.com/prometheus-k8s-prometheus-rules created rolebinding.rbac.authorization.k8s.io/prometheus-k8s-config created rolebinding.rbac.authorization.k8s.io/prometheus-k8s created rolebinding.rbac.authorization.k8s.io/prometheus-k8s created rolebinding.rbac.authorization.k8s.io/prometheus-k8s created role.rbac.authorization.k8s.io/prometheus-k8s-config created role.rbac.authorization.k8s.io/prometheus-k8s created role.rbac.authorization.k8s.io/prometheus-k8s created role.rbac.authorization.k8s.io/prometheus-k8s created service/prometheus-k8s created serviceaccount/prometheus-k8s created servicemonitor.monitoring.coreos.com/prometheus-k8s created apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created clusterrole.rbac.authorization.k8s.io/prometheus-adapter created clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created clusterrolebinding.rbac.authorization.k8s.io/prometheus-adapter created clusterrolebinding.rbac.authorization.k8s.io/resource-metrics:system:auth-delegator created clusterrole.rbac.authorization.k8s.io/resource-metrics-server-resources created configmap/adapter-config created deployment.apps/prometheus-adapter created networkpolicy.networking.k8s.io/prometheus-adapter created poddisruptionbudget.policy/prometheus-adapter created rolebinding.rbac.authorization.k8s.io/resource-metrics-auth-reader created service/prometheus-adapter created serviceaccount/prometheus-adapter created servicemonitor.monitoring.coreos.com/prometheus-adapter created clusterrole.rbac.authorization.k8s.io/prometheus-operator created clusterrolebinding.rbac.authorization.k8s.io/prometheus-operator created deployment.apps/prometheus-operator created networkpolicy.networking.k8s.io/prometheus-operator created prometheusrule.monitoring.coreos.com/prometheus-operator-rules created service/prometheus-operator created serviceaccount/prometheus-operator created servicemonitor.monitoring.coreos.com/prometheus-operator created Provide external access for Grafana, Prometheus, and Alertmanager, by running the following commands:\n$ kubectl patch svc grafana -n monitoring --type=json -p \u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/type\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;NodePort\u0026#34; },{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/ports/0/nodePort\u0026#34;, \u0026#34;value\u0026#34;: 32100 }]\u0026#39; $ kubectl patch svc prometheus-k8s -n monitoring --type=json -p \u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/type\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;NodePort\u0026#34; },{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/ports/0/nodePort\u0026#34;, \u0026#34;value\u0026#34;: 32101 }]\u0026#39; $ kubectl patch svc alertmanager-main -n monitoring --type=json -p \u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/type\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;NodePort\u0026#34; },{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/ports/0/nodePort\u0026#34;, \u0026#34;value\u0026#34;: 32102 }]\u0026#39; Note: This assigns port 32100 to Grafana, 32101 to Prometheus, and 32102 to Alertmanager.\nThe output will look similar to the following:\nservice/grafana patched service/prometheus-k8s patched service/alertmanager-main patched Verify that the Prometheus, Grafana, and Alertmanager pods are running in the monitoring namespace and the respective services have the exports configured correctly:\n$ kubectl get pods,services -o wide -n monitoring The output should look similar to the following:\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/alertmanager-main-0 2/2 Running 0 67s 10.244.1.7 worker-node1 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/alertmanager-main-1 2/2 Running 0 67s 10.244.2.26 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/alertmanager-main-2 2/2 Running 0 67s 10.244.1.8 worker-node1 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/grafana-f8cd57fcf-tmlqt 1/1 Running 0 65s 10.244.2.28 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/kube-state-metrics-587bfd4f97-l8knh 3/3 Running 0 65s 10.244.1.9 worker-node1 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/node-exporter-2ztpd 2/2 Running 0 65s 10.247.95.26 worker-node1 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/node-exporter-92sxb 2/2 Running 0 65s 10.250.40.59 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/node-exporter-d77tl 2/2 Running 0 65s 10.196.54.36 master-node \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/prometheus-adapter-69b8496df6-6gqrz 1/1 Running 0 65s 10.244.2.29 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/prometheus-k8s-0 2/2 Running 1 66s 10.244.2.27 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/prometheus-k8s-1 2/2 Running 1 66s 10.244.1.10 worker-node1 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/prometheus-operator-7649c7454f-9p747 2/2 Running 0 2m 10.244.2.25 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR service/alertmanager-main NodePort 10.104.92.62 \u0026lt;none\u0026gt; 9093:32102/TCP 67s alertmanager=main,app=alertmanager service/alertmanager-operated ClusterIP None \u0026lt;none\u0026gt; 9093/TCP,9094/TCP,9094/UDP 67s app=alertmanager service/grafana NodePort 10.100.171.3 \u0026lt;none\u0026gt; 3000:32100/TCP 66s app=grafana service/kube-state-metrics ClusterIP None \u0026lt;none\u0026gt; 8443/TCP,9443/TCP 66s app.kubernetes.io/name=kube-state-metrics service/node-exporter ClusterIP None \u0026lt;none\u0026gt; 9100/TCP 66s app.kubernetes.io/name=node-exporter service/prometheus-adapter ClusterIP 10.109.248.92 \u0026lt;none\u0026gt; 443/TCP 66s name=prometheus-adapter service/prometheus-k8s NodePort 10.98.212.247 \u0026lt;none\u0026gt; 9090:32101/TCP 66s app=prometheus,prometheus=k8s service/prometheus-operated ClusterIP None \u0026lt;none\u0026gt; 9090/TCP 66s app=prometheus service/prometheus-operator ClusterIP None \u0026lt;none\u0026gt; 8443/TCP 2m1s app.kubernetes.io/component=controller,app.kubernetes.io/name=prometheus-operator Deploy WebLogic Monitoring Exporter Generate the WebLogic Monitoring Exporter deployment package. The wls-exporter.war package need to be updated and created for each listening port (Administration Server and Managed Servers) in the domain. Set the below environment values and run the script get-wls-exporter.sh to generate the required WAR files at ${WORKDIR}/kubernetes/monitoring-service/scripts/wls-exporter-deploy:\n$ cd $WORKDIR/kubernetes/monitoring-service/scripts $ export adminServerPort=7001 $ export wlsMonitoringExporterTopolicyCluster=true $ export policyManagedServerPort=15100 $ export wlsMonitoringExporterTooamCluster=true $ export oamManagedServerPort=14100 $ sh get-wls-exporter.sh The output will look similar to the following:\n % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 655 100 655 0 0 1107 0 --:--:-- --:--:-- --:--:-- 1108 100 2196k 100 2196k 0 0 1787k 0 0:00:01 0:00:01 --:--:-- 9248k created $WORKDIR/kubernetes/monitoring-service/scripts/wls-exporter-deploy dir domainNamespace is empty, setting to default oamns domainUID is empty, setting to default accessdomain weblogicCredentialsSecretName is empty, setting to default \u0026quot;accessdomain-domain-credentials\u0026quot; adminServerName is empty, setting to default \u0026quot;AdminServer\u0026quot; oamClusterName is empty, setting to default \u0026quot;oam_cluster\u0026quot; policyClusterName is empty, setting to default \u0026quot;policy_cluster\u0026quot; created /tmp/ci-Bu74rCBxwu /tmp/ci-Bu74rCBxwu $WORKDIR/kubernetes/monitoring-service/scripts in temp dir adding: WEB-INF/weblogic.xml (deflated 61%) adding: config.yml (deflated 60%) $WORKDIR/kubernetes/monitoring-service/scripts created /tmp/ci-RQv3rLbLsX /tmp/ci-RQv3rLbLsX $WORKDIR/kubernetes/monitoring-service/scripts in temp dir adding: WEB-INF/weblogic.xml (deflated 61%) adding: config.yml (deflated 60%) $WORKDIR/kubernetes/monitoring-service/scripts created /tmp/ci-DWIYlocP5e /tmp/ci-DWIYlocP5e $WORKDIR/kubernetes/monitoring-service/scripts in temp dir adding: WEB-INF/weblogic.xml (deflated 61%) adding: config.yml (deflated 60%) $WORKDIR/kubernetes/monitoring-service/scripts Deploy the WebLogic Monitoring Exporter WAR files into the Oracle Access Management domain:\n$ cd $WORKDIR/kubernetes/monitoring-service/scripts $ kubectl cp wls-exporter-deploy \u0026lt;domain_namespace\u0026gt;/\u0026lt;domain_uid\u0026gt;-adminserver:/u01/oracle $ kubectl cp deploy-weblogic-monitoring-exporter.py \u0026lt;domain_namespace\u0026gt;/\u0026lt;domain_uid\u0026gt;-adminserver:/u01/oracle/wls-exporter-deploy $ kubectl exec -it -n \u0026lt;domain_namespace\u0026gt; \u0026lt;domain_uid\u0026gt;-adminserver -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/deploy-weblogic-monitoring-exporter.py -domainName \u0026lt;domain_uid\u0026gt; -adminServerName AdminServer -adminURL \u0026lt;domain_uid\u0026gt;-adminserver:7001 -username weblogic -password \u0026lt;password\u0026gt; -oamClusterName oam_cluster -wlsMonitoringExporterTooamCluster true -policyClusterName policy_cluster -wlsMonitoringExporterTopolicyCluster true For example:\n$ cd $WORKDIR/kubernetes/monitoring-service/scripts $ kubectl cp wls-exporter-deploy oamns/accessdomain-adminserver:/u01/oracle $ kubectl cp deploy-weblogic-monitoring-exporter.py oamns/accessdomain-adminserver:/u01/oracle/wls-exporter-deploy $ kubectl exec -it -n oamns accessdomain-adminserver -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/deploy-weblogic-monitoring-exporter.py -domainName accessdomain -adminServerName AdminServer -adminURL accessdomain-adminserver:7001 -username weblogic -password \u0026lt;password\u0026gt; -oamClusterName oam_cluster -wlsMonitoringExporterTooamCluster true -policyClusterName policy_cluster -wlsMonitoringExporterTopolicyCluster true The output will look similar to the following:\nInitializing WebLogic Scripting Tool (WLST) ... Welcome to WebLogic Server Administration Scripting Shell Type help() for help on available commands Connecting to t3://accessdomain-adminserver:7001 with userid weblogic ... Successfully connected to Admin Server \u0026quot;AdminServer\u0026quot; that belongs to domain \u0026quot;accessdomain\u0026quot;. Warning: An insecure protocol was used to connect to the server. To ensure on-the-wire security, the SSL port or Admin port should be used instead. Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war to targets AdminServer (upload=true) ... \u0026lt;DATE\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating deploy operation for application, wls-exporter-adminserver [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war], to AdminServer .\u0026gt; ..Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-adminserver. \u0026lt;DATE\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating start operation for application, wls-exporter-adminserver [archive: null], to AdminServer .\u0026gt; .Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start Deployment State : completed Deployment Message : no message Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-oam.war to targets oam_cluster (upload=true) ... \u0026lt;DATE\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating deploy operation for application, wls-exporter-oam [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-oam.war], to oam_cluster .\u0026gt; .Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-oam. \u0026lt;DATE\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating start operation for application, wls-exporter-oam [archive: null], to oam_cluster .\u0026gt; .Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start Deployment State : completed Deployment Message : no message Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-policy.war to targets policy_cluster (upload=true) ... \u0026lt;DATE\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating deploy operation for application, wls-exporter-policy [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-policy.war], to policy_cluster .\u0026gt; .Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-policy. \u0026lt;DATE\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating start operation for application, wls-exporter-policy [archive: null], to policy_cluster .\u0026gt; .Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start Deployment State : completed Deployment Message : no message Disconnected from weblogic server: AdminServer Exiting WebLogic Scripting Tool. \u0026lt;DATE\u0026gt; \u0026lt;Warning\u0026gt; \u0026lt;JNDI\u0026gt; \u0026lt;BEA-050001\u0026gt; \u0026lt;WLContext.close() was called in a different thread than the one in which it was created.\u0026gt; Configure Prometheus Operator Prometheus enables you to collect metrics from the WebLogic Monitoring Exporter. The Prometheus Operator identifies the targets using service discovery. To get the WebLogic Monitoring Exporter end point discovered as a target, you must create a service monitor pointing to the service.\nThe exporting of metrics from wls-exporter requires basicAuth, so a Kubernetes Secret is created with the user name and password that are base64 encoded. This Secret is used in the ServiceMonitor deployment. The wls-exporter-ServiceMonitor.yaml has basicAuth with credentials as username: weblogic and password: \u0026lt;password\u0026gt; in base64 encoded.\n Run the following command to get the base64 encoded version of the weblogic password:\n$ echo -n \u0026#34;\u0026lt;password\u0026gt;\u0026#34; | base64 The output will look similar to the following:\nV2VsY29tZTE= Update the $WORKDIR/kubernetes/monitoring-service/manifests/wls-exporter-ServiceMonitor.yaml and change the password: value to the value returned above. Also change the namespace: and weblogic.domainName: values to match your OAM namespace and domain name:\napiVersion: v1 kind: Secret metadata: name: basic-auth namespace: oamns data: password: V2VsY29tZTE= user: d2VibG9naWM= type: Opaque --- apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: name: wls-exporter namespace: oamns labels: k8s-app: wls-exporter release: monitoring spec: namespaceSelector: matchNames: - oamns selector: matchLabels: weblogic.domainName: accessdomain endpoints: - basicAuth: password: name: basic-auth key: password username: name: basic-auth key: user port: default relabelings: - action: labelmap regex: __meta_kubernetes_service_label_(.+) interval: 10s honorLabels: true path: /wls-exporter/metrics Update the $WORKDIR/kubernetes/monitoring-service/manifests/prometheus-roleSpecific-domain-namespace.yaml and change the namespace to match your OAM namespace. For example:\napiVersion: rbac.authorization.k8s.io/v1 items: - apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: prometheus-k8s namespace: oamns rules: - apiGroups: - \u0026quot;\u0026quot; resources: - services - endpoints - pods verbs: - get - list - watch kind: RoleList Update the $WORKDIR/kubernetes/monitoring-service/manifests/prometheus-roleBinding-domain-namespace.yaml and change the namespace to match your OAM namespace. For example:\napiVersion: rbac.authorization.k8s.io/v1 items: - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: prometheus-k8s namespace: oamns roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: prometheus-k8s subjects: - kind: ServiceAccount name: prometheus-k8s namespace: monitoring kind: RoleBindingList Run the following command to enable Prometheus:\n$ cd $WORKDIR/kubernetes/monitoring-service/manifests $ kubectl apply -f . The output will look similar to the following:\nrolebinding.rbac.authorization.k8s.io/prometheus-k8s created role.rbac.authorization.k8s.io/prometheus-k8s created secret/basic-auth created servicemonitor.monitoring.coreos.com/wls-exporter created Prometheus Service Discovery After the ServiceMonitor is deployed, the wls-exporter should be discovered by Prometheus and be able to collect metrics.\n Access the following URL to view Prometheus service discovery: http://${MASTERNODE-HOSTNAME}:32101/service-discovery\n Click on oamns/wls-exporter/0 and then show more. Verify all the targets are mentioned.\n Grafana Dashboard Access the Grafana dashboard with the following URL: http://${MASTERNODE-HOSTNAME}:32100 and login with admin/admin. Change your password when prompted.\n Import the Grafana dashboard by navigating on the left hand menu to Create \u0026gt; Import. Copy the content from $WORKDIR/kubernetes/monitoring-service/config/weblogic-server-dashboard-import.json and paste. Then click Load and Import. The dashboard should be displayed in the Dashboards panel.\n Cleanup To clean up a manual installation:\n Run the following commands:\n$ cd $WORKDIR/kubernetes/monitoring-service/manifests/ $ kubectl delete -f . Delete the deployments:\n$ cd $WORKDIR/kubernetes/monitoring-service/scripts/ $ kubectl cp undeploy-weblogic-monitoring-exporter.py \u0026lt;domain_namespace\u0026gt;/\u0026lt;domain_uid\u0026gt;-adminserver:/u01/oracle/wls-exporter-deploy $ kubectl exec -it -n \u0026lt;domain_namespace\u0026gt; \u0026lt;domain_uid\u0026gt;-adminserver -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/undeploy-weblogic-monitoring-exporter.py -domainName \u0026lt;domain_uid\u0026gt; -adminServerName AdminServer -adminURL \u0026lt;domain_uid\u0026gt;-adminserver:7001 -username weblogic -password \u0026lt;password\u0026gt; -oamClusterName oam_cluster -wlsMonitoringExporterTooamCluster true -policyClusterName policy_cluster -wlsMonitoringExporterTopolicyCluster true Delete Prometheus:\n$ cd $WORKDIR/kubernetes/monitoring-service/kube-prometheus $ kubectl delete -f manifests " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oam/patch-and-upgrade/upgrade-elk/", + "title": "d. Upgrade Elasticsearch and Kibana", + "tags": [], + "description": "Instructions on how to upgrade Elastic Search and Kibana.", + "content": "This section shows how to upgrade Elasticsearch and Kibana.\nTo determine if this step is required for the version you are upgrading to, refer to the Release Notes.\nUndeploy Elasticsearch and Kibana From October 22 (22.4.1) onwards, OAM logs should be stored on a centralized Elasticsearch and Kibana stack.\nDeployments prior to October 22 (22.4.1) used local deployments of Elasticsearch and Kibana.\nIf you are upgrading from July 22 (22.3.1) or earlier, to October 22 (22.4.1) or later, you must first undeploy Elasticsearch and Kibana using the steps below:\n Make sure you have downloaded the latest code repository as per Download the latest code repository\n Edit the $WORKDIR/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml and change all instances of namespace to correspond to your deployment.\n Delete the Elasticsearch and Kibana resources using the following command:\n$ kubectl delete -f $WORKDIR/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml Deploy Elasticsearch and Kibana in centralized stack Follow Install Elasticsearch stack and Kibana to deploy Elasticsearch and Kibana in a centralized stack. " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oig/patch-and-upgrade/upgrade-elk/", + "title": "d. Upgrade Elasticsearch and Kibana", + "tags": [], + "description": "Instructions on how to upgrade Elastic Search and Kibana.", + "content": "This section shows how to upgrade Elasticsearch and Kibana.\nTo determine if this step is required for the version you are upgrading to, refer to the Release Notes.\nDownload the latest code repository Make sure you have downloaded the latest code as per Download the latest code repository. Undeploy Elasticsearch and Kibana From October 22 (22.4.1) onwards, OIG logs should be stored on a centralized Elasticsearch and Kibana stack.\nDeployments prior to October 22 (22.4.1) used local deployments of Elasticsearch and Kibana.\nIf you are upgrading from July 22 (22.3.1) or earlier, to October 22 (22.4.1) or later, you must first undeploy Elasticsearch and Kibana using the steps below:\n Edit the $WORKDIR/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml and change all instances of namespace to correspond to your deployment.\n Delete the Elasticsearch and Kibana resources using the following command:\n$ kubectl delete -f $WORKDIR/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml Deploy Elasticsearch and Kibana in centralized stack Follow Install Elasticsearch stack and Kibana to deploy Elasticsearch and Kibana in a centralized stack. " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oam/manage-oam-domains/hpa/", + "title": "e. Kubernetes Horizontal Pod Autoscaler", + "tags": [], + "description": "Describes the steps for implementing the Horizontal Pod Autoscaler.", + "content": " Prerequisite configuration Deploy the Kubernetes Metrics Server Troubleshooting Deploy HPA Testing HPA Delete the HPA Other considerations Kubernetes Horizontal Pod Autoscaler (HPA) is supported from Weblogic Kubernetes Operator 4.0.X and later.\nHPA allows automatic scaling (up and down) of the OAM Managed Servers. If load increases then extra OAM Managed Servers will be started as required, up to the value configuredManagedServerCount defined when the domain was created (see Prepare the create domain script). Similarly, if load decreases, OAM Managed Servers will be automatically shutdown.\nFor more information on HPA, see Horizontal Pod Autoscaling.\nThe instructions below show you how to configure and run an HPA to scale an OAM cluster (accessdomain-oam-cluster) resource, based on CPU utilization or memory resource metrics. If required, you can also perform the following for the accessdomain-policy-cluster.\nNote: If you enable HPA and then decide you want to start/stop/scale OAM Managed servers manually as per Domain Life Cycle, it is recommended to delete HPA beforehand as per Delete the HPA.\nPrerequisite configuration In order to use HPA, the OAM domain must have been created with the required resources parameter as per Set the OAM server memory parameters. For example:\nserverPod: env: - name: USER_MEM_ARGS value: \u0026quot;-XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m\u0026quot; resources: limits: cpu: \u0026quot;2\u0026quot; memory: \u0026quot;8Gi\u0026quot; requests: cpu: \u0026quot;1000m\u0026quot; memory: \u0026quot;4Gi\u0026quot; If you created the OAM domain without setting these parameters, then you can update the domain using the following steps:\n Run the following command to edit the cluster:\n$ kubectl edit cluster accessdomain-oam-cluster -n oamns Note: This opens an edit session for the oam-cluster where parameters can be changed using standard vi commands.\n In the edit session, search for spec:, and then look for the replicas parameter under clusterName: oam_cluster. Change the entry so it looks as follows:\nspec: clusterName: oam_cluster replicas: 1 serverPod: env: - name: USER_MEM_ARGS value: -XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m resources: limits: cpu: \u0026quot;2\u0026quot; memory: 8Gi requests: cpu: 1000m memory: 4Gi serverService: precreateService: true ... Save the file and exit (:wq!)\nThe output will look similar to the following:\ncluster.weblogic.oracle/accessdomain-oam-cluster edited The OAM Managed Server pods will then automatically be restarted.\n Deploy the Kubernetes Metrics Server Before deploying HPA you must deploy the Kubernetes Metrics Server.\n Check to see if the Kubernetes Metrics Server is already deployed:\n$ kubectl get pods -n kube-system | grep metric If a row is returned as follows, then Kubernetes Metric Server is deployed and you can move to Deploy HPA.\nmetrics-server-d9694457-mf69d 1/1 Running 0 5m13s If no rows are returned by the previous command, then the Kubernetes Metric Server needs to be deployed. Run the following commands to get the components.yaml:\n$ mkdir $WORKDIR/kubernetes/hpa $ cd $WORKDIR/kubernetes/hpa $ wget https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml Deploy the Kubernetes Metrics Server by running the following command:\n$ kubectl apply -f components.yaml The output will look similar to the following:\nserviceaccount/metrics-server created clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created clusterrole.rbac.authorization.k8s.io/system:metrics-server created rolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader created clusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator created clusterrolebinding.rbac.authorization.k8s.io/system:metrics-server created service/metrics-server created deployment.apps/metrics-server created apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created Run the following command to check Kubernetes Metric Server is running:\n$ kubectl get pods -n kube-system | grep metric Make sure the pod has a READY status of 1/1:\nmetrics-server-d9694457-mf69d 1/1 Running 0 39s Troubleshooting If the Kubernetes Metric Server does not reach the READY 1/1 state, run the following commands:\n$ kubectl describe pod \u0026lt;metrics-server-pod\u0026gt; -n kube-system $ kubectl logs \u0026lt;metrics-server-pod\u0026gt; -n kube-system If you see errors such as:\nReadiness probe failed: HTTP probe failed with statuscode: 500 and:\nE0907 13:07:50.937308 1 scraper.go:140] \u0026quot;Failed to scrape node\u0026quot; err=\u0026quot;Get \\\u0026quot;https://100.105.18.113:10250/metrics/resource\\\u0026quot;: x509: cannot validate certificate for 100.105.18.113 because it doesn't contain any IP SANs\u0026quot; node=\u0026quot;worker-node1\u0026quot; then you may need to install a valid cluster certificate for your Kubernetes cluster.\nFor testing purposes, you can resolve this issue by:\n Delete the Kubernetes Metrics Server by running the following command:\n$ kubectl delete -f $WORKDIR/kubernetes/hpa/components.yaml Edit the $WORKDIR/hpa/components.yaml and locate the args: section. Add kubelet-insecure-tls to the arguments. For example:\nspec: containers: - args: - --cert-dir=/tmp - --secure-port=4443 - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname - --kubelet-use-node-status-port - --kubelet-insecure-tls - --metric-resolution=15s image: registry.k8s.io/metrics-server/metrics-server:v0.6.4 ... Deploy the Kubenetes Metrics Server using the command:\n$ kubectl apply -f components.yaml Run the following and make sure the READY status shows 1/1:\n$ kubectl get pods -n kube-system | grep metric The output should look similar to the following:\nmetrics-server-d9694457-mf69d 1/1 Running 0 40s Deploy HPA The steps below show how to configure and run an HPA to scale the accessdomain-oam-cluster, based on the CPU or memory utilization resource metrics.\nThe default OAM deployment creates the cluster accessdomain-oam-cluster which starts one OAM Managed Server (oam_server1). The deployment also creates, but doesn’t start, four extra OAM Managed Servers (oam-server2 to oam-server5).\nIn the following example an HPA resource is created, targeted at the cluster resource accessdomain-oam-cluster. This resource will autoscale OAM Managed Servers from a minimum of 1 cluster member up to 5 cluster members. Scaling up will occur when the average CPU is consistently over 70%. Scaling down will occur when the average CPU is consistently below 70%.\n Navigate to the $WORKDIR/kubernetes/hpa and create an autoscalehpa.yaml file that contains the following.\n# apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler metadata: name: accessdomain-oam-cluster-hpa namespace: oamns spec: scaleTargetRef: apiVersion: weblogic.oracle/v1 kind: Cluster name: accessdomain-oam-cluster behavior: scaleDown: stabilizationWindowSeconds: 60 scaleUp: stabilizationWindowSeconds: 60 minReplicas: 1 maxReplicas: 5 metrics: - type: Resource resource: name: cpu target: type: Utilization averageUtilization: 70 Note : minReplicas and maxReplicas should match your current domain settings.\nNote: For setting HPA based on Memory Metrics, update the metrics block with the following content. Please note we recommend using only CPU or Memory, not both.\nmetrics: - type: Resource resource: name: memory target: type: Utilization averageUtilization: 70 Run the following command to create the autoscaler:\n$ kubectl apply -f autoscalehpa.yaml The output will look similar to the following:\nhorizontalpodautoscaler.autoscaling/accessdomain-oam-cluster-hpa created Verify the status of the autoscaler by running the following:\n$ kubectl get hpa -n oamns The output will look similar to the following:\nNAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE accessdomain-oam-cluster-hpa Cluster/accessdomain-oam-cluster 5%/70% 1 5 1 21s In the example above, this shows that CPU is currently running at 5% for the accessdomain-oam-cluster-hpa.\n Testing HPA Check the current status of the OAM Managed Servers:\n$ kubectl get pods -n oamns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 0/1 Running 0 141m accessdomain-create-oam-infra-domain-job-6br2j 0/1 Completed 0 5h19m accessdomain-oam-policy-mgr1 0/1 Running 0 138m accessdomain-oam-server1 1/1 Running 0 138m helper 1/1 Running 0 21h nginx-ingress-ingress-nginx-controller-5f9bdf4c9-f5trt 1/1 Running 0 4h33m In the above, only accessdomain-oam-server1 is running.\n To test HPA can scale up the WebLogic cluster accessdomain-oam-cluster, run the following commands:\n$ kubectl exec --stdin --tty accessdomain-oam-server1 -n oamns -- /bin/bash This will take you inside a bash shell inside the oam_server1 pod:\n[oracle@accessdomain-oam-server1 oracle]$ Inside the bash shell, run the following command to increase the load on the CPU:\n[oracle@accessdomain-oam-server1 oracle]$ dd if=/dev/zero of=/dev/null This command will continue to run in the foreground.\n In a command window outside the bash shell, run the following command to view the current CPU usage:\n$ kubectl get hpa -n oamns The output will look similar to the following:\nNAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE accessdomain-oam-cluster-hpa Cluster/accessdomain-oam-cluster 470%/70% 1 5 1 21s In the above example the CPU has increased to 470%. As this is above the 70% limit, the autoscaler increases the replicas on the Cluster resource and the operator responds by starting additional cluster members.\n Run the following to see if any more OAM Managed Servers are started:\n$ kubectl get pods -n oamns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 0/1 Running 143m accessdomain-create-oam-infra-domain-job-6br2j 0/1 Completed 0 5h21m accessdomain-oam-policy-mgr1 0/1 Running 0 140m accessdomain-oam-server1 1/1 Running 0 140m accessdomain-oam-server2 1/1 Running 0 3m20s accessdomain-oam-server3 1/1 Running 0 3m20s accessdomain-oam-server4 1/1 Running 0 3m19s accessdomain-oam-server5 1/1 Running 0 3m5s helper 1/1 Running 0 21h In the example above four more OAM Managed Servers have been started (oam-server2 - oam-server5).\nNote: It may take some time for the servers to appear and start. Once the servers are at READY status of 1/1, the servers are started.\n To stop the load on the CPU, in the bash shell, issue a Control C, and then exit the bash shell:\n[oracle@accessdomain-oam-server1 oracle]$ dd if=/dev/zero of=/dev/null ^C [oracle@accessdomain-oam-server1 oracle]$ exit Run the following command to view the current CPU usage:\n$ kubectl get hpa -n oamns The output will look similar to the following:\nNAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE accessdomain-oam-cluster-hpa Cluster/accessdomain-oam-cluster 19%/70% 1 5 5 19m In the above example CPU has dropped to 19%. As this is below the 70% threshold, you should see the autoscaler scale down the servers:\n$ kubectl get pods -n oamns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Running 0 152m accessdomain-create-oam-infra-domain-job-6br2j 0/1 Completed 0 5h30m accessdomain-oam-policy-mgr1 1/1 Running 0 149m accessdomain-oam-server1 1/1 Running 0 149m accessdomain-oam-server2 1/1 Running 0 14m accessdomain-oam-server3 0/1 Terminating 0 14m helper 1/1 Running 0 21h nginx-ingress-ingress-nginx-controller-5f9bdf4c9-f5trt 1/1 Running 0 4h45m Eventually, all the servers except oam-server1 will disappear:\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Running 0 154m accessdomain-create-oam-infra-domain-job-6br2j 0/1 Completed 0 5h32m accessdomain-oam-policy-mgr1 1/1 Running 0 151m accessdomain-oam-server1 1/1 Running 0 151m helper 1/1 Running 0 21h nginx-ingress-ingress-nginx-controller-5f9bdf4c9-f5trt 1/1 Running 0 4h47m Delete the HPA If you need to delete the HPA, you can do so by running the following command:\n$ cd $WORKDIR/kubernetes/hpa $ kubectl delete -f autoscalehpa.yaml Other considerations If HPA is deployed and you need to upgrade the OAM image, then you must delete the HPA before upgrading. Once the upgrade is successful you can deploy HPA again. If you choose to start/stop an OAM Managed Server manually as per Domain Life Cycle, then it is recommended to delete the HPA before doing so. " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oig/manage-oig-domains/monitoring-oim-domains/", + "title": "e. Monitoring an OIG domain", + "tags": [], + "description": "Describes the steps for Monitoring the OIG domain and Publishing the logs to Elasticsearch.", + "content": "After the OIG domain is set up you can monitor the OIG instance using Prometheus and Grafana. See Monitoring a domain.\nThe WebLogic Monitoring Exporter uses the WLS RESTful Management API to scrape runtime information and then exports Prometheus-compatible metrics. It is deployed as a web application in a WebLogic Server (WLS) instance, version 12.2.1 or later, typically, in the instance from which you want to get metrics.\nThere are two ways to setup monitoring and you should choose one method or the other:\n Setup automatically using setup-monitoring.sh Setup using manual configuration Setup automatically using setup-monitoring.sh The $WORKDIR/kubernetes/monitoring-service/setup-monitoring.sh sets up the monitoring for the OIG domain. It installs Prometheus, Grafana, WebLogic Monitoring Exporter and deploys the web applications to the OIG domain. It also deploys the WebLogic Server Grafana dashboard.\nFor usage details execute ./setup-monitoring.sh -h.\n Edit the $WORKDIR/kubernetes/monitoring-service/monitoring-inputs.yaml and change the domainUID, domainNamespace, and weblogicCredentialsSecretName to correspond to your deployment. Also change wlsMonitoringExporterTosoaCluster, wlsMonitoringExporterTooimCluster, exposeMonitoringNodePort to true. For example:\nversion: create-governancedomain-monitoring-inputs-v1 # Unique ID identifying your domain. # This ID must not contain an underscope (\u0026quot;_\u0026quot;), and must be lowercase and unique across all domains in a Kubernetes cluster. domainUID: governancedomain # Name of the domain namespace domainNamespace: oigns # Boolean value indicating whether to install kube-prometheus-stack setupKubePrometheusStack: true # Additional parameters for helm install kube-prometheus-stack # Refer https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/values.yaml for additional parameters # Sample : # additionalParamForKubePrometheusStack: --set nodeExporter.enabled=false --set prometheusOperator.tls.enabled=false --set prometheusOperator.admissionWebhooks.enabled=false additionalParamForKubePrometheusStack: # Name of the monitoring namespace monitoringNamespace: monitoring # Name of the Admin Server adminServerName: AdminServer # # Port number for admin server adminServerPort: 7001 # Cluster name soaClusterName: soa_cluster # Port number for managed server soaManagedServerPort: 8001 # WebLogic Monitoring Exporter to Cluster wlsMonitoringExporterTosoaCluster: true # Cluster name oimClusterName: oim_cluster # Port number for managed server oimManagedServerPort: 14000 # WebLogic Monitoring Exporter to Cluster wlsMonitoringExporterTooimCluster: true # Boolean to indicate if the adminNodePort will be exposed exposeMonitoringNodePort: true # NodePort to expose Prometheus prometheusNodePort: 32101 # NodePort to expose Grafana grafanaNodePort: 32100 # NodePort to expose Alertmanager alertmanagerNodePort: 32102 # Name of the Kubernetes secret for the Admin Server's username and password weblogicCredentialsSecretName: oig-domain-credentials Note: If your cluster does not have access to the internet to pull external images, such as grafana or prometheus, you must load the images in a local container registry. You must then set additionalParamForKubePrometheusStack to set the location of the image in your local container registry, for example:\n# Additional parameters for helm install kube-prometheus-stack # Refer https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/values.yaml for additional parameters # Sample : # additionalParamForKubePrometheusStack: --set nodeExporter.enabled=false --set prometheusOperator.tls.enabled=false --set prometheusOperator.admissionWebhooks.enabled=false additionalParamForKubePrometheusStack: --set grafana.image.repository=container-registry.example.com/grafana --set grafana.image.tag=8.3.4 Run the following command to setup monitoring:\n$ cd $WORKDIR/kubernetes/monitoring-service $ ./setup-monitoring.sh -i monitoring-inputs.yaml The output should be similar to the following:\nMonitoring setup in monitoring in progress node/worker-node1 not labeled node/worker-node2 not labeled node/master-node not labeled Setup prometheus-community/kube-prometheus-stack started \u0026quot;prometheus-community\u0026quot; already exists with the same configuration, skipping Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the \u0026quot;stable\u0026quot; chart repository ...Successfully got an update from the \u0026quot;prometheus\u0026quot; chart repository ...Successfully got an update from the \u0026quot;prometheus-community\u0026quot; chart repository Update Complete. ⎈Happy Helming!⎈ Setup prometheus-community/kube-prometheus-stack in progress NAME: monitoring LAST DEPLOYED: \u0026lt;DATE\u0026gt; NAMESPACE: monitoring STATUS: deployed REVISION: 1 NOTES: kube-prometheus-stack has been installed. Check its status by running: kubectl --namespace monitoring get pods -l \u0026quot;release=monitoring\u0026quot; Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create \u0026amp; configure Alertmanager and Prometheus instances using the Operator. Setup prometheus-community/kube-prometheus-stack completed Deploy WebLogic Monitoring Exporter started Deploying WebLogic Monitoring Exporter with domainNamespace[oigns], domainUID[governancedomain], adminServerPodName[governancedomain-adminserver] % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 655 100 655 0 0 1159 0 --:--:-- --:--:-- --:--:-- 1159 100 2196k 100 2196k 0 0 1763k 0 0:00:01 0:00:01 --:--:-- 20.7M created $WORKDIR/kubernetes/monitoring-service/scripts/wls-exporter-deploy dir created /tmp/ci-GJSQsiXrFE /tmp/ci-GJSQsiXrFE $WORKDIR/kubernetes/monitoring-service in temp dir adding: WEB-INF/weblogic.xml (deflated 61%) adding: config.yml (deflated 60%) $WORKDIR/kubernetes/monitoring-service created /tmp/ci-KeyZrdouMD /tmp/ci-KeyZrdouMD $WORKDIR/kubernetes/monitoring-service in temp dir adding: WEB-INF/weblogic.xml (deflated 61%) adding: config.yml (deflated 60%) $WORKDIR/kubernetes/monitoring-service created /tmp/ci-QE9HawIIgT /tmp/ci-QE9HawIIgT $WORKDIR/kubernetes/monitoring-service in temp dir adding: WEB-INF/weblogic.xml (deflated 61%) adding: config.yml (deflated 60%) $WORKDIR/kubernetes/monitoring-service Initializing WebLogic Scripting Tool (WLST) ... Welcome to WebLogic Server Administration Scripting Shell Type help() for help on available commands Connecting to t3://governancedomain-adminserver:7001 with userid weblogic ... Successfully connected to Admin Server \u0026quot;AdminServer\u0026quot; that belongs to domain \u0026quot;governancedomain\u0026quot;. Warning: An insecure protocol was used to connect to the server. To ensure on-the-wire security, the SSL port or Admin port should be used instead. Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war to targets AdminServer (upload=true) ... \u0026lt;DATE\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating deploy operation for application, wls-exporter-adminserver [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war], to AdminServer .\u0026gt; .Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-adminserver. \u0026lt;DATE\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating start operation for application, wls-exporter-adminserver [archive: null], to AdminServer .\u0026gt; .Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start Deployment State : completed Deployment Message : no message Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-soa.war to targets soa_cluster (upload=true) ... \u0026lt;DATE\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating deploy operation for application, wls-exporter-soa [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-soa.war], to soa_cluster .\u0026gt; .Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-soa. \u0026lt;DATE\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating start operation for application, wls-exporter-soa [archive: null], to soa_cluster .\u0026gt; .Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start Deployment State : completed Deployment Message : no message Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-oim.war to targets oim_cluster (upload=true) ... \u0026lt;DATE\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating deploy operation for application, wls-exporter-oim [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-oim.war], to oim_cluster .\u0026gt; .Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-oim. \u0026lt;DATE\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating start operation for application, wls-exporter-oim [archive: null], to oim_cluster .\u0026gt; .Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start Deployment State : completed Deployment Message : no message Disconnected from weblogic server: AdminServer Exiting WebLogic Scripting Tool. \u0026lt;DATE\u0026gt; \u0026lt;Warning\u0026gt; \u0026lt;JNDI\u0026gt; \u0026lt;BEA-050001\u0026gt; \u0026lt;WLContext.close() was called in a different thread than the one in which it was created.\u0026gt; Deploy WebLogic Monitoring Exporter completed secret/basic-auth created servicemonitor.monitoring.coreos.com/wls-exporter created Deploying WebLogic Server Grafana Dashboard.... {\u0026quot;id\u0026quot;:25,\u0026quot;slug\u0026quot;:\u0026quot;weblogic-server-dashboard\u0026quot;,\u0026quot;status\u0026quot;:\u0026quot;success\u0026quot;,\u0026quot;uid\u0026quot;:\u0026quot;5yUwzbZWz\u0026quot;,\u0026quot;url\u0026quot;:\u0026quot;/d/5yUwzbZWz/weblogic-server-dashboard\u0026quot;,\u0026quot;version\u0026quot;:1} Deployed WebLogic Server Grafana Dashboard successfully Grafana is available at NodePort: 32100 Prometheus is available at NodePort: 32101 Altermanager is available at NodePort: 32102 ============================================================== Prometheus service discovery After the ServiceMonitor is deployed, the wls-exporter should be discovered by Prometheus and be able to collect metrics.\n Access the following URL to view Prometheus service discovery: http://${MASTERNODE-HOSTNAME}:32101/service-discovery\n Click on serviceMonitor/oigns/wls-exporter/0 and then show more. Verify all the targets are mentioned.\n Note : It may take several minutes for serviceMonitor/oigns/wls-exporter/0 to appear, so refresh the page until it does.\nGrafana dashboard Access the Grafana dashboard with the following URL: http://${MASTERNODE-HOSTNAME}:32100 and login with admin/admin. Change your password when prompted.\n In the Dashboards panel, click on WebLogic Server Dashboard. The dashboard for your OIG domain should be displayed. If it is not displayed, click the Search icon in the left hand menu and search for WebLogic Server Dashboard.\n Cleanup To uninstall the Prometheus, Grafana, WebLogic Monitoring Exporter and the deployments, you can run the $WORKDIR/monitoring-service/kubernetes/delete-monitoring.sh script. For usage details execute ./delete-monitoring.sh -h\n To uninstall run the following command:\n$ cd $WORKDIR/kubernetes/monitoring-service $ ./delete-monitoring.sh -i monitoring-inputs.yaml Setup using manual configuration Install Prometheus, Grafana and WebLogic Monitoring Exporter manually. Create the web applications and deploy to the OIG domain.\nDeploy the Prometheus operator Kube-Prometheus requires all nodes to be labelled with kubernetes.io/os=linux. To check if your nodes are labelled, run the following:\n$ kubectl get nodes --show-labels If the nodes are labelled the output will look similar to the following:\nNAME STATUS ROLES AGE VERSION LABELS worker-node1 Ready \u0026lt;none\u0026gt; 42d v1.20.10 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker-node1,kubernetes.io/os=linux worker-node2 Ready \u0026lt;none\u0026gt; 42d v1.20.10 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker-node2,kubernetes.io/os=linux master-node Ready master 42d v1.20.10 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=master-node,kubernetes.io/os=linux,node-role.kubernetes.io/master= If the nodes are not labelled, run the following command:\n$ kubectl label nodes --all kubernetes.io/os=linux Clone Prometheus by running the following commands:\n$ cd $WORKDIR/kubernetes/monitoring-service $ git clone https://github.com/coreos/kube-prometheus.git -b v0.7.0 Note: Please refer the compatibility matrix of Kube Prometheus. Please download the release of the repository according to the Kubernetes version of your cluster.\n If your cluster does not have access to the internet to pull external images, such as grafana, you must load the images in a local container registry.\nFor grafana, edit the $WORKDIR/kubernetes/monitoring-service/kube-prometheus/manifests/grafana-deployment.yaml and change image: grafana/grafana:7.3.4 to your local container registry image location, for example image: container-registry.example.com/grafana/grafana:8.3.4.\nFor any other images check the $WORKDIR/kubernetes/monitoring-service/kube-prometheus/manifests/*deployment.yaml files.\n Run the following command to create the namespace and custom resource definitions:\n$ cd $WORKDIR/kubernetes/monitoring-service/kube-prometheus $ kubectl create -f manifests/setup The output will look similar to the following:\nnamespace/monitoring created customresourcedefinition.apiextensions.k8s.io/alertmanagerconfigs.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/alertmanagers.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/podmonitors.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/probes.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/prometheuses.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/prometheusrules.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/servicemonitors.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/thanosrulers.monitoring.coreos.com created Warning: spec.template.spec.nodeSelector[beta.kubernetes.io/os]: deprecated since v1.14; use \u0026quot;kubernetes.io/os\u0026quot; instead clusterrole.rbac.authorization.k8s.io/prometheus-operator created clusterrolebinding.rbac.authorization.k8s.io/prometheus-operator created deployment.apps/prometheus-operator created service/prometheus-operator created serviceaccount/prometheus-operator created Run the following command to created the rest of the resources:\n$ kubectl create -f manifests/ The output will look similar to the following:\nalertmanager.monitoring.coreos.com/main created secret/alertmanager-main created service/alertmanager-main created serviceaccount/alertmanager-main created servicemonitor.monitoring.coreos.com/alertmanager created secret/grafana-datasources created configmap/grafana-dashboard-apiserver created configmap/grafana-dashboard-cluster-total created configmap/grafana-dashboard-controller-manager created configmap/grafana-dashboard-k8s-resources-cluster created configmap/grafana-dashboard-k8s-resources-namespace created configmap/grafana-dashboard-k8s-resources-node created configmap/grafana-dashboard-k8s-resources-pod created configmap/grafana-dashboard-k8s-resources-workload created configmap/grafana-dashboard-k8s-resources-workloads-namespace created configmap/grafana-dashboard-kubelet created configmap/grafana-dashboard-namespace-by-pod created configmap/grafana-dashboard-namespace-by-workload created configmap/grafana-dashboard-node-cluster-rsrc-use created configmap/grafana-dashboard-node-rsrc-use created configmap/grafana-dashboard-nodes created configmap/grafana-dashboard-persistentvolumesusage created configmap/grafana-dashboard-pod-total created configmap/grafana-dashboard-prometheus-remote-write created configmap/grafana-dashboard-prometheus created configmap/grafana-dashboard-proxy created configmap/grafana-dashboard-scheduler created configmap/grafana-dashboard-statefulset created configmap/grafana-dashboard-workload-total created configmap/grafana-dashboards created Warning: spec.template.spec.nodeSelector[beta.kubernetes.io/os]: deprecated since v1.14; use \u0026quot;kubernetes.io/os\u0026quot; instead deployment.apps/grafana created service/grafana created serviceaccount/grafana created servicemonitor.monitoring.coreos.com/grafana created clusterrole.rbac.authorization.k8s.io/kube-state-metrics created clusterrolebinding.rbac.authorization.k8s.io/kube-state-metrics created deployment.apps/kube-state-metrics created service/kube-state-metrics created serviceaccount/kube-state-metrics created servicemonitor.monitoring.coreos.com/kube-state-metrics created clusterrole.rbac.authorization.k8s.io/node-exporter created clusterrolebinding.rbac.authorization.k8s.io/node-exporter created daemonset.apps/node-exporter created service/node-exporter created serviceaccount/node-exporter created servicemonitor.monitoring.coreos.com/node-exporter created apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created clusterrole.rbac.authorization.k8s.io/prometheus-adapter created clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created clusterrolebinding.rbac.authorization.k8s.io/prometheus-adapter created clusterrolebinding.rbac.authorization.k8s.io/resource-metrics:system:auth-delegator created clusterrole.rbac.authorization.k8s.io/resource-metrics-server-resources created configmap/adapter-config created deployment.apps/prometheus-adapter created rolebinding.rbac.authorization.k8s.io/resource-metrics-auth-reader created service/prometheus-adapter created serviceaccount/prometheus-adapter created servicemonitor.monitoring.coreos.com/prometheus-adapter created clusterrole.rbac.authorization.k8s.io/prometheus-k8s created clusterrolebinding.rbac.authorization.k8s.io/prometheus-k8s created servicemonitor.monitoring.coreos.com/prometheus-operator created prometheus.monitoring.coreos.com/k8s created rolebinding.rbac.authorization.k8s.io/prometheus-k8s-config created rolebinding.rbac.authorization.k8s.io/prometheus-k8s created rolebinding.rbac.authorization.k8s.io/prometheus-k8s created rolebinding.rbac.authorization.k8s.io/prometheus-k8s created role.rbac.authorization.k8s.io/prometheus-k8s-config created role.rbac.authorization.k8s.io/prometheus-k8s created role.rbac.authorization.k8s.io/prometheus-k8s created role.rbac.authorization.k8s.io/prometheus-k8s created prometheusrule.monitoring.coreos.com/prometheus-k8s-rules created service/prometheus-k8s created serviceaccount/prometheus-k8s created servicemonitor.monitoring.coreos.com/prometheus created servicemonitor.monitoring.coreos.com/kube-apiserver created servicemonitor.monitoring.coreos.com/coredns created servicemonitor.monitoring.coreos.com/kube-controller-manager created servicemonitor.monitoring.coreos.com/kube-scheduler created servicemonitor.monitoring.coreos.com/kubelet created Provide external access for Grafana, Prometheus, and Alertmanager, by running the following commands:\n$ kubectl patch svc grafana -n monitoring --type=json -p \u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/type\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;NodePort\u0026#34; },{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/ports/0/nodePort\u0026#34;, \u0026#34;value\u0026#34;: 32100 }]\u0026#39; $ kubectl patch svc prometheus-k8s -n monitoring --type=json -p \u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/type\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;NodePort\u0026#34; },{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/ports/0/nodePort\u0026#34;, \u0026#34;value\u0026#34;: 32101 }]\u0026#39; $ kubectl patch svc alertmanager-main -n monitoring --type=json -p \u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/type\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;NodePort\u0026#34; },{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/ports/0/nodePort\u0026#34;, \u0026#34;value\u0026#34;: 32102 }]\u0026#39; Note: This assigns port 32100 to Grafana, 32101 to Prometheus, and 32102 to Alertmanager.\nThe output will look similar to the following:\nservice/grafana patched service/prometheus-k8s patched service/alertmanager-main patched Verify that the Prometheus, Grafana, and Alertmanager pods are running in the monitoring namespace and the respective services have the exports configured correctly:\n$ kubectl get pods,services -o wide -n monitoring The output should look similar to the following:\npod/alertmanager-main-0 2/2 Running 0 40s 10.244.1.29 worker-node1 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/alertmanager-main-1 2/2 Running 0 40s 10.244.2.68 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/alertmanager-main-2 2/2 Running 0 40s 10.244.1.28 worker-node1 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/grafana-f8cd57fcf-zpjh2 1/1 Running 0 40s 10.244.2.69 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/kube-state-metrics-587bfd4f97-zw9zj 3/3 Running 0 38s 10.244.1.30 worker-node1 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/node-exporter-2cgrm 2/2 Running 0 38s 10.196.54.36 master-node \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/node-exporter-fpl7f 2/2 Running 0 38s 10.247.95.26 worker-node1 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/node-exporter-kvvnr 2/2 Running 0 38s 10.250.40.59 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/prometheus-adapter-69b8496df6-9vfdp 1/1 Running 0 38s 10.244.2.70 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/prometheus-k8s-0 2/2 Running 0 37s 10.244.2.71 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/prometheus-k8s-1 2/2 Running 0 37s 10.244.1.31 worker-node1 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/prometheus-operator-7649c7454f-g5b4l 2/2 Running 0 47s 10.244.2.67 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR service/alertmanager-main NodePort 10.105.76.223 \u0026lt;none\u0026gt; 9093:32102/TCP 41s alertmanager=main,app=alertmanager service/alertmanager-operated ClusterIP None \u0026lt;none\u0026gt; 9093/TCP,9094/TCP,9094/UDP 40s app=alertmanager service/grafana NodePort 10.107.86.157 \u0026lt;none\u0026gt; 3000:32100/TCP 40s app=grafana service/kube-state-metrics ClusterIP None \u0026lt;none\u0026gt; 8443/TCP,9443/TCP 40s app.kubernetes.io/name=kube-state-metrics service/node-exporter ClusterIP None \u0026lt;none\u0026gt; 9100/TCP 39s app.kubernetes.io/name=node-exporter service/prometheus-adapter ClusterIP 10.102.244.224 \u0026lt;none\u0026gt; 443/TCP 39s name=prometheus-adapter service/prometheus-k8s NodePort 10.100.241.34 \u0026lt;none\u0026gt; 9090:32101/TCP 39s app=prometheus,prometheus=k8s service/prometheus-operated ClusterIP None \u0026lt;none\u0026gt; 9090/TCP 39s app=prometheus service/prometheus-operator ClusterIP None \u0026lt;none\u0026gt; 8443/TCP 47s app.kubernetes.io/component=controller,app.kubernetes.io/name=prometheus-operator Deploy WebLogic Monitoring Exporter Generate the WebLogic Monitoring Exporter deployment package. The wls-exporter.war package need to be updated and created for each listening port (Administration Server and Managed Servers) in the domain.\n Set the below environment values and run the script get-wls-exporter.sh to generate the required WAR files at ${WORKDIR}/kubernetes/monitoring-service/scripts/wls-exporter-deploy:\n$ cd $WORKDIR/kubernetes/monitoring-service/scripts $ export adminServerPort=7001 $ export wlsMonitoringExporterTosoaCluster=true $ export soaManagedServerPort=8001 $ export wlsMonitoringExporterTooimCluster=true $ export oimManagedServerPort=14000 $ sh get-wls-exporter.sh The output will look similar to the following:\n % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 655 100 655 0 0 1159 0 --:--:-- --:--:-- --:--:-- 1159 100 2196k 100 2196k 0 0 1430k 0 0:00:01 0:00:01 --:--:-- 8479k created $WORKDIR/kubernetes/monitoring-service/scripts/wls-exporter-deploy dir domainNamespace is empty, setting to default oimcluster domainUID is empty, setting to default oimcluster weblogicCredentialsSecretName is empty, setting to default \u0026quot;oimcluster-domain-credentials\u0026quot; adminServerPort is empty, setting to default \u0026quot;7001\u0026quot; soaClusterName is empty, setting to default \u0026quot;soa_cluster\u0026quot; oimClusterName is empty, setting to default \u0026quot;oim_cluster\u0026quot; created /tmp/ci-NEZy7NOfoz /tmp/ci-NEZy7NOfoz $WORKDIR/kubernetes/monitoring-service/scripts in temp dir adding: WEB-INF/weblogic.xml (deflated 61%) adding: config.yml (deflated 60%) $WORKDIR/kubernetes/monitoring-service/scripts created /tmp/ci-J7QJ4Nc1lo /tmp/ci-J7QJ4Nc1lo $WORKDIR/kubernetes/monitoring-service/scripts in temp dir adding: WEB-INF/weblogic.xml (deflated 61%) adding: config.yml (deflated 60%) $WORKDIR/kubernetes/monitoring-service/scripts created /tmp/ci-f4GbaxM2aJ /tmp/ci-f4GbaxM2aJ $WORKDIR/kubernetes/monitoring-service/scripts in temp dir adding: WEB-INF/weblogic.xml (deflated 61%) adding: config.yml (deflated 60%) $WORKDIR/kubernetes/monitoring-service/scripts Deploy the WebLogic Monitoring Exporter WAR files into the Oracle Identity Governance domain:\n$ cd $WORKDIR/kubernetes/monitoring-service/scripts $ kubectl cp wls-exporter-deploy \u0026lt;domain_namespace\u0026gt;/\u0026lt;domain_uid\u0026gt;-adminserver:/u01/oracle $ kubectl cp deploy-weblogic-monitoring-exporter.py \u0026lt;domain_namespace\u0026gt;/\u0026lt;domain_uid\u0026gt;-adminserver:/u01/oracle/wls-exporter-deploy $ kubectl exec -it -n \u0026lt;domain_namespace\u0026gt; \u0026lt;domain_uid\u0026gt;-adminserver -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/deploy-weblogic-monitoring-exporter.py -domainName \u0026lt;domain_uid\u0026gt; -adminServerName AdminServer -adminURL \u0026lt;domain_uid\u0026gt;-adminserver:7001 -username weblogic -password \u0026lt;password\u0026gt; -oimClusterName oim_cluster -wlsMonitoringExporterTooimCluster true -soaClusterName soa_cluster -wlsMonitoringExporterTosoaCluster true For example:\n$ cd $WORKDIR/kubernetes/monitoring-service/scripts $ kubectl cp wls-exporter-deploy oigns/governancedomain-adminserver:/u01/oracle $ kubectl cp deploy-weblogic-monitoring-exporter.py oigns/governancedomain-adminserver:/u01/oracle/wls-exporter-deploy $ kubectl exec -it -n oigns governancedomain-adminserver -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/deploy-weblogic-monitoring-exporter.py -domainName governancedomain -adminServerName AdminServer -adminURL governancedomain-adminserver:7001 -username weblogic -password \u0026lt;password\u0026gt; -oimClusterName oim_cluster -wlsMonitoringExporterTooimCluster true -soaClusterName soa_cluster -wlsMonitoringExporterTosoaCluster true The output will look similar to the following:\nInitializing WebLogic Scripting Tool (WLST) ... Welcome to WebLogic Server Administration Scripting Shell Type help() for help on available commands Connecting to t3://governancedomain-adminserver:7001 with userid weblogic ... Successfully connected to Admin Server \u0026quot;AdminServer\u0026quot; that belongs to domain \u0026quot;governancedomaindomain\u0026quot;. Warning: An insecure protocol was used to connect to the server. To ensure on-the-wire security, the SSL port or Admin port should be used instead. Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war to targets AdminServer (upload=true) ... \u0026lt;DATE\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating deploy operation for application, wls-exporter-adminserver [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war], to AdminServer .\u0026gt; .Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-adminserver. \u0026lt;DATE\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating start operation for application, wls-exporter-adminserver [archive: null], to AdminServer .\u0026gt; .Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start Deployment State : completed Deployment Message : no message Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-soa.war to targets soa_cluster (upload=true) ... \u0026lt;DATE\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating deploy operation for application, wls-exporter-soa [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-soa.war], to soa_cluster .\u0026gt; ..Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-soa. \u0026lt;DATE\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating start operation for application, wls-exporter-soa [archive: null], to soa_cluster .\u0026gt; .Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start Deployment State : completed Deployment Message : no message Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-oim.war to targets oim_cluster (upload=true) ... \u0026lt;DATE\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating deploy operation for application, wls-exporter-oim [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-oim.war], to oim_cluster .\u0026gt; .Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-oim. \u0026lt;DATE\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating start operation for application, wls-exporter-oim [archive: null], to oim_cluster .\u0026gt; .Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start Deployment State : completed Deployment Message : no message Disconnected from weblogic server: AdminServer Exiting WebLogic Scripting Tool. \u0026lt;DATE\u0026gt; \u0026lt;Warning\u0026gt; \u0026lt;JNDI\u0026gt; \u0026lt;BEA-050001\u0026gt; \u0026lt;WLContext.close() was called in a different thread than the one in which it was created.\u0026gt; Configure Prometheus Operator Prometheus enables you to collect metrics from the WebLogic Monitoring Exporter. The Prometheus Operator identifies the targets using service discovery. To get the WebLogic Monitoring Exporter end point discovered as a target, you must create a service monitor pointing to the service.\nThe exporting of metrics from wls-exporter requires basicAuth, so a Kubernetes Secret is created with the user name and password that are base64 encoded. This Secret is used in the ServiceMonitor deployment. The wls-exporter-ServiceMonitor.yaml has basicAuth with credentials as username: weblogic and password: \u0026lt;password\u0026gt; in base64 encoded.\n Run the following command to get the base64 encoded version of the weblogic password:\n$ echo -n \u0026#34;\u0026lt;password\u0026gt;\u0026#34; | base64 The output will look similar to the following:\nV2VsY29tZTE= Update the $WORKDIR/kubernetes/monitoring-service/manifests/wls-exporter-ServiceMonitor.yaml and change the password: value to the value returned above. Also change any reference to the namespace and weblogic.domainName: values to match your OIG namespace and domain name. For example:\napiVersion: v1 kind: Secret metadata: name: basic-auth namespace: oigns data: password: V2VsY29tZTE= user: d2VibG9naWM= type: Opaque --- apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: name: wls-exporter namespace: oigns labels: k8s-app: wls-exporter release: monitoring spec: namespaceSelector: matchNames: - oigns selector: matchLabels: weblogic.domainName: governancedomain endpoints: - basicAuth: password: name: basic-auth key: password username: name: basic-auth key: user port: default relabelings: - action: labelmap regex: __meta_kubernetes_service_label_(.+) interval: 10s honorLabels: true path: /wls-exporter/metrics Update the $WORKDIR/kubernetes/monitoring-service/manifests/prometheus-roleSpecific-domain-namespace.yaml and change the namespace to match your OIG namespace. For example:\napiVersion: rbac.authorization.k8s.io/v1 items: - apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: prometheus-k8s namespace: oigns rules: - apiGroups: - \u0026quot;\u0026quot; resources: - services - endpoints - pods verbs: - get - list - watch kind: RoleList Update the $WORKDIR/kubernetes/monitoring-service/manifests/prometheus-roleBinding-domain-namespace.yaml and change the namespace to match your OIG namespace. For example:\napiVersion: rbac.authorization.k8s.io/v1 items: - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: prometheus-k8s namespace: oigns roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: prometheus-k8s subjects: - kind: ServiceAccount name: prometheus-k8s namespace: monitoring kind: RoleBindingList Run the following command to enable Prometheus:\n$ cd $WORKDIR/kubernetes/monitoring-service/manifests $ kubectl apply -f . The output will look similar to the following:\nrolebinding.rbac.authorization.k8s.io/prometheus-k8s created role.rbac.authorization.k8s.io/prometheus-k8s created secret/basic-auth created servicemonitor.monitoring.coreos.com/wls-exporter created Prometheus service discovery After the ServiceMonitor is deployed, the wls-exporter should be discovered by Prometheus and be able to collect metrics.\n Access the following URL to view Prometheus service discovery: http://${MASTERNODE-HOSTNAME}:32101/service-discovery\n Click on oigns/wls-exporter/0 and then show more. Verify all the targets are mentioned.\n Note: It may take several minutes for oigns/wls-exporter/0 to appear, so refresh the page until it does.\nGrafana dashboard Access the Grafana dashboard with the following URL: http://${MASTERNODE-HOSTNAME}:32100 and login with admin/admin. Change your password when prompted.\n Import the Grafana dashboard by navigating on the left hand menu to Create \u0026gt; Import. Copy the content from $WORKDIR/kubernetes/monitoring-service/config/weblogic-server-dashboard-import.json and paste. Then click Load and Import. The dashboard should be displayed.\n Cleanup To clean up a manual installation:\n Run the following commands:\n$ cd $WORKDIR/kubernetes/monitoring-service/manifests/ $ kubectl delete -f . Delete the deployments:\n$ cd $WORKDIR/kubernetes/monitoring-service/scripts/ $ kubectl cp undeploy-weblogic-monitoring-exporter.py \u0026lt;domain_namespace\u0026gt;/\u0026lt;domain_uid\u0026gt;-adminserver:/u01/oracle/wls-exporter-deploy $ kubectl exec -it -n \u0026lt;domain_namespace\u0026gt; \u0026lt;domain_uid\u0026gt;-adminserver -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/undeploy-weblogic-monitoring-exporter.py -domainName \u0026lt;domain_uid\u0026gt; -adminServerName AdminServer -adminURL \u0026lt;domain_uid\u0026gt;-adminserver:7001 -username weblogic -password \u0026lt;password\u0026gt; -oimClusterName oim_cluster -wlsMonitoringExporterTooimCluster true -soaClusterName soa_cluster -wlsMonitoringExporterTosoaCluster true Delete Prometheus:\n$ cd $WORKDIR/kubernetes/monitoring-service/kube-prometheus $ kubectl delete -f manifests $ kubectl delete -f manifests/setup " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/enterprise-deployments/", + "title": "Enterprise Deployments", + "tags": [], + "description": "The complete Oracle Identity Management suite can be deployed in a production environment", + "content": "Enterprise Deployments of Oracle Identity Management The entire Oracle Identity and Access Management Suite can be deployed in a production environment. See the following sections:\n a. Enterprise Deployment Guide b. Enterprise Deployment Guide Automation Scripts " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oam/manage-oam-domains/delete-domain-home/", + "title": "f. Delete the OAM domain home", + "tags": [], + "description": "Learn about the steps to cleanup the OAM domain home.", + "content": "Sometimes in production, but most likely in testing environments, you might want to remove the domain home that is generated using the create-domain.sh script.\n Run the following command to delete the domain:\n$ cd $WORKDIR/kubernetes/delete-domain $ ./delete-weblogic-domain-resources.sh -d \u0026lt;domain_uid\u0026gt; For example:\n$ cd $WORKDIR/kubernetes/delete-domain $ ./delete-weblogic-domain-resources.sh -d accessdomain Drop the RCU schemas as follows:\n$ kubectl exec -it helper -n \u0026lt;domain_namespace\u0026gt; -- /bin/bash [oracle@helper ~]$ [oracle@helper ~]$ export CONNECTION_STRING=\u0026lt;db_host.domain\u0026gt;:\u0026lt;db_port\u0026gt;/\u0026lt;service_name\u0026gt; [oracle@helper ~]$ export RCUPREFIX=\u0026lt;rcu_schema_prefix\u0026gt; /u01/oracle/oracle_common/bin/rcu -silent -dropRepository -databaseType ORACLE -connectString $CONNECTION_STRING \\ -dbUser sys -dbRole sysdba -selectDependentsForComponents true -schemaPrefix $RCUPREFIX \\ -component MDS -component IAU -component IAU_APPEND -component IAU_VIEWER -component OPSS \\ -component WLS -component STB -component OAM -f \u0026lt; /tmp/pwd.txt For example:\n$ kubectl exec -it helper -n oamns -- /bin/bash [oracle@helper ~]$ export CONNECTION_STRING=mydatabasehost.example.com:1521/orcl.example.com [oracle@helper ~]$ export RCUPREFIX=OAMK8S /u01/oracle/oracle_common/bin/rcu -silent -dropRepository -databaseType ORACLE -connectString $CONNECTION_STRING \\ -dbUser sys -dbRole sysdba -selectDependentsForComponents true -schemaPrefix $RCUPREFIX \\ -component MDS -component IAU -component IAU_APPEND -component IAU_VIEWER -component OPSS \\ -component WLS -component STB -component OAM -f \u0026lt; /tmp/pwd.txt Delete the contents of the persistent volume, for example:\n$ rm -rf \u0026lt;persistent_volume\u0026gt;/accessdomainpv/* For example:\n$ rm -rf /scratch/shared/accessdomainpv/* Delete the WebLogic Kubernetes Operator, by running the following command:\n$ helm delete weblogic-kubernetes-operator -n opns Delete the label from the OAM namespace:\n$ kubectl label namespaces \u0026lt;domain_namespace\u0026gt; weblogic-operator- For example:\n$ kubectl label namespaces oamns weblogic-operator- Delete the service account for the operator:\n$ kubectl delete serviceaccount \u0026lt;sample-kubernetes-operator-sa\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl delete serviceaccount op-sa -n opns Delete the operator namespace:\n$ kubectl delete namespace \u0026lt;sample-kubernetes-operator-ns\u0026gt; For example:\n$ kubectl delete namespace opns To delete NGINX:\n$ helm delete oam-nginx -n \u0026lt;domain_namespace\u0026gt; For example:\n$ helm delete oam-nginx -n oamns Then run:\n$ helm delete nginx-ingress -n \u0026lt;domain_namespace\u0026gt; For example:\n$ helm delete nginx-ingress -n oamns Delete the OAM namespace:\n$ kubectl delete namespace \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl delete namespace oamns " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oig/manage-oig-domains/hpa/", + "title": "f. Kubernetes Horizontal Pod Autoscaler", + "tags": [], + "description": "Describes the steps for implementing the Horizontal Pod Autoscaler.", + "content": " Prerequisite configuration Deploy the Kubernetes Metrics Server Troubleshooting Deploy HPA Testing HPA Delete the HPA Other considerations Kubernetes Horizontal Pod Autoscaler (HPA) is supported from Weblogic Kubernetes Operator 4.0.X and later.\nHPA allows automatic scaling (up and down) of the OIG Managed Servers. If load increases then extra OIG Managed Servers will be started as required, up to the value configuredManagedServerCount defined when the domain was created (see Prepare the create domain script). Similarly, if load decreases, OIG Managed Servers will be automatically shutdown.\nFor more information on HPA, see Horizontal Pod Autoscaling.\nThe instructions below show you how to configure and run an HPA to scale an OIG cluster (governancedomain-oim-cluster) resource, based on CPU utilization or memory resource metrics. If required, you can also perform the following for the governancedomain-soa-cluster.\nNote: If you enable HPA and then decide you want to start/stop/scale OIG Managed servers manually as per Domain Life Cycle, it is recommended to delete HPA beforehand as per Delete the HPA.\nPrerequisite configuration In order to use HPA, the OIG domain must have been created with the required resources parameter as per Set the OIM server memory parameters. For example:\nserverPod: env: - name: USER_MEM_ARGS value: \u0026quot;-XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m\u0026quot; resources: limits: cpu: \u0026quot;2\u0026quot; memory: \u0026quot;8Gi\u0026quot; requests: cpu: \u0026quot;1000m\u0026quot; memory: \u0026quot;4Gi\u0026quot; If you created the OIG domain without setting these parameters, then you can update the domain using the following steps:\n Run the following command to edit the cluster:\n$ kubectl edit cluster governancedomain-oim-cluster -n oigns Note: This opens an edit session for the governancedomain-oim-cluster where parameters can be changed using standard vi commands.\n In the edit session, search for spec:, and then look for the replicas parameter under clusterName: oim_cluster. Change the entry so it looks as follows:\nspec: clusterName: oim_cluster replicas: 1 serverPod: env: - name: USER_MEM_ARGS value: -XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m resources: limits: cpu: \u0026quot;2\u0026quot; memory: 8Gi requests: cpu: 1000m memory: 4Gi serverService: precreateService: true ... Save the file and exit (:wq!)\nThe output will look similar to the following:\ncluster.weblogic.oracle/governancedomain-oim-cluster edited The OIG Managed Server pods will then automatically be restarted.\n Deploy the Kubernetes Metrics Server Before deploying HPA you must deploy the Kubernetes Metrics Server.\n Check to see if the Kubernetes Metrics Server is already deployed:\n$ kubectl get pods -n kube-system | grep metric If a row is returned as follows, then Kubernetes Metric Server is deployed and you can move to Deploy HPA.\nmetrics-server-d9694457-mf69d 1/1 Running 0 5m13s If no rows are returned by the previous command, then the Kubernetes Metric Server needs to be deployed. Run the following commands to get the components.yaml:\n$ mkdir $WORKDIR/kubernetes/hpa $ cd $WORKDIR/kubernetes/hpa $ wget https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml Deploy the Kubernetes Metrics Server by running the following command:\n$ kubectl apply -f components.yaml The output will look similar to the following:\nserviceaccount/metrics-server created clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created clusterrole.rbac.authorization.k8s.io/system:metrics-server created rolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader created clusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator created clusterrolebinding.rbac.authorization.k8s.io/system:metrics-server created service/metrics-server created deployment.apps/metrics-server created apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created Run the following command to check Kubernetes Metric Server is running:\n$ kubectl get pods -n kube-system | grep metric Make sure the pod has a READY status of 1/1:\nmetrics-server-d9694457-mf69d 1/1 Running 0 39s Troubleshooting If the Kubernetes Metric Server does not reach the READY 1/1 state, run the following commands:\n$ kubectl describe pod \u0026lt;metrics-server-pod\u0026gt; -n kube-system $ kubectl logs \u0026lt;metrics-server-pod\u0026gt; -n kube-system If you see errors such as:\nReadiness probe failed: HTTP probe failed with statuscode: 500 and:\nE0907 13:07:50.937308 1 scraper.go:140] \u0026quot;Failed to scrape node\u0026quot; err=\u0026quot;Get \\\u0026quot;https://100.105.18.113:10250/metrics/resource\\\u0026quot;: x509: cannot validate certificate for 100.105.18.113 because it doesn't contain any IP SANs\u0026quot; node=\u0026quot;worker-node1\u0026quot; then you may need to install a valid cluster certificate for your Kubernetes cluster.\nFor testing purposes, you can resolve this issue by:\n Delete the Kubernetes Metrics Server by running the following command:\n$ kubectl delete -f $WORKDIR/kubernetes/hpa/components.yaml Edit the $WORKDIR/hpa/components.yaml and locate the args: section. Add kubelet-insecure-tls to the arguments. For example:\nspec: containers: - args: - --cert-dir=/tmp - --secure-port=4443 - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname - --kubelet-use-node-status-port - --kubelet-insecure-tls - --metric-resolution=15s image: registry.k8s.io/metrics-server/metrics-server:v0.6.4 ... Deploy the Kubenetes Metrics Server using the command:\n$ kubectl apply -f components.yaml Run the following and make sure the READY status shows 1/1:\n$ kubectl get pods -n kube-system | grep metric The output should look similar to the following:\nmetrics-server-d9694457-mf69d 1/1 Running 0 40s Deploy HPA The steps below show how to configure and run an HPA to scale the governancedomain-oim-cluster, based on the CPU or memory utilization resource metrics.\nThe default OIG deployment creates the cluster governancedomain-oim-cluster which starts one OIG Managed Server (oim_server1). The deployment also creates, but doesn’t start, four extra OIG Managed Servers (oim-server2 to oim-server5).\nIn the following example an HPA resource is created, targeted at the cluster resource governancedomain-oim-cluster. This resource will autoscale OIG Managed Servers from a minimum of 1 cluster member up to 5 cluster members. Scaling up will occur when the average CPU is consistently over 70%. Scaling down will occur when the average CPU is consistently below 70%.\n Navigate to the $WORKDIR/kubernetes/hpa and create an autoscalehpa.yaml file that contains the following.\n# apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler metadata: name: governancedomain-oim-cluster-hpa namespace: oigns spec: scaleTargetRef: apiVersion: weblogic.oracle/v1 kind: Cluster name: governancedomain-oim-cluster behavior: scaleDown: stabilizationWindowSeconds: 60 scaleUp: stabilizationWindowSeconds: 60 minReplicas: 1 maxReplicas: 5 metrics: - type: Resource resource: name: cpu target: type: Utilization averageUtilization: 70 Note : minReplicas and maxReplicas should match your current domain settings.\nNote: For setting HPA based on Memory Metrics, update the metrics block with the following content. Please note we recommend using only CPU or Memory, not both.\nmetrics: - type: Resource resource: name: memory target: type: Utilization averageUtilization: 70 Run the following command to create the autoscaler:\n$ kubectl apply -f autoscalehpa.yaml The output will look similar to the following:\nhorizontalpodautoscaler.autoscaling/governancedomain-oim-cluster-hpa created Verify the status of the autoscaler by running the following:\n$ kubectl get hpa -n oigns The output will look similar to the following:\nNAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE governancedomain-oim-cluster-hpa Cluster/governancedomain-oim-cluster 16%/70% 1 5 1 20s In the example above, this shows that CPU is currently running at 16% for the governancedomain-oim-cluster-hpa.\n Testing HPA Check the current status of the OIG Managed Servers:\n$ kubectl get pods -n oigns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE governancedomain-adminserver 1/1 Running 0 20m governancedomain-create-fmw-infra-sample-domain-job-8wd2b 0/1 Completed 0 2d18h governancedomain-oim-server1 1/1 Running 0 17m governancedomain-soa-server1 1/1 Running 0 17m helper 1/1 Running 0 2d18h In the above only governancedomain-oim-server1 is running.\n To test HPA can scale up the WebLogic cluster governancedomain-oim-cluster, run the following commands:\n$ kubectl exec --stdin --tty governancedomain-oim-server1 -n oigns -- /bin/bash This will take you inside a bash shell inside the oim_server1 pod:\n[oracle@governancedomain-oim-server1 oracle]$ Inside the bash shell, run the following command to increase the load on the CPU:\n[oracle@governancedomain-oim-server1 oracle]$ dd if=/dev/zero of=/dev/null This command will continue to run in the foreground.\n In a command window outside the bash shell, run the following command to view the current CPU usage:\n$ kubectl get hpa -n oigns The output will look similar to the following:\nNAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE governancedomain-oim-cluster-hpa Cluster/governancedomain-oim-cluster 386%/70% 1 5 1 2m47s In the above example the CPU has increased to 386%. As this is above the 70% limit, the autoscaler increases the replicas on the Cluster resource and the operator responds by starting additional cluster members.\n Run the following to see if any more OIG Managed Servers are started:\n$ kubectl get pods -n oigns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE governancedomain-adminserver 1/1 Running 0 30m governancedomain-create-fmw-infra-sample-domain-job-8wd2b 0/1 Completed 0 2d18h governancedomain-oim-server1 1/1 Running 0 27m governancedomain-oim-server2 1/1 Running 0 10m governancedomain-oim-server3 1/1 Running 0 10m governancedomain-oim-server4 1/1 Running 0 10m governancedomain-oim-server5 1/1 Running 0 10m governancedomain-soa-server1 1/1 Running 0 27m helper 1/1 Running 0 2d18h In the example above four more OIG Managed Servers have been started (oim-server2 - oim-server5).\nNote: It may take some time for the servers to appear and start. Once the servers are at READY status of 1/1, the servers are started.\n To stop the load on the CPU, in the bash shell, issue a Control C, and then exit the bash shell:\n[oracle@governancedomain-oim-server1 oracle]$ dd if=/dev/zero of=/dev/null ^C [oracle@governancedomain-oim-server1 oracle]$ exit Run the following command to view the current CPU usage:\n$ kubectl get hpa -n oigns The output will look similar to the following:\nNAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE governancedomain-oim-cluster-hpa Cluster/governancedomain-oim-cluster 33%/70% 1 5 5 37m In the above example CPU has dropped to 33%. As this is below the 70% threshold, you should see the autoscaler scale down the servers:\n$ kubectl get pods -n oigns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE governancedomain-adminserver 1/1 Running 0 43m governancedomain-create-fmw-infra-sample-domain-job-8wd2b 0/1 Completed 0 2d18h governancedomain-oim-server1 1/1 Running 0 40m governancedomain-oim-server2 1/1 Running 0 13m governancedomain-oim-server3 1/1 Running 0 13m governancedomain-oim-server4 1/1 Running 0 13m governancedomain-oim-server5 0/1 Terminating 0 13m governancedomain-soa-server1 1/1 Running 0 40m helper 1/1 Running 0 2d19h Eventually, all the servers except oim-server1 will disappear:\nNAME READY STATUS RESTARTS AGE governancedomain-adminserver 1/1 Running 0 44m governancedomain-create-fmw-infra-sample-domain-job-8wd2b 0/1 Completed 0 2d18h governancedomain-oim-server1 1/1 Running 0 41m governancedomain-soa-server1 1/1 Running 0 41m helper 1/1 Running 0 2d20h Delete the HPA If you need to delete the HPA, you can do so by running the following command:\n$ cd $WORKDIR/kubernetes/hpa $ kubectl delete -f autoscalehpa.yaml Other considerations If HPA is deployed and you need to upgrade the OIG image, then you must delete the HPA before upgrading. Once the upgrade is successful you can deploy HPA again. If you choose to start/stop an OIG Managed Server manually as per Domain Life Cycle, then it is recommended to delete the HPA before doing so. " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oig/manage-oig-domains/delete-domain-home/", + "title": "g. Delete the OIG domain home", + "tags": [], + "description": "Learn about the steps to cleanup the OIG domain home.", + "content": "Sometimes in production, but most likely in testing environments, you might want to remove the domain home that is generated using the create-domain.sh script.\n Run the following command to delete the domain:\n$ cd $WORKDIR/kubernetes/delete-domain $ ./delete-weblogic-domain-resources.sh -d \u0026lt;domain_uid\u0026gt; For example:\n$ cd $WORKDIR/kubernetes/delete-domain $ ./delete-weblogic-domain-resources.sh -d governancedomain Drop the RCU schemas as follows:\n$ kubectl exec -it helper -n \u0026lt;domain_namespace\u0026gt; -- /bin/bash [oracle@helper ~]$ [oracle@helper ~]$ export CONNECTION_STRING=\u0026lt;db_host.domain\u0026gt;:\u0026lt;db_port\u0026gt;/\u0026lt;service_name\u0026gt; [oracle@helper ~]$ export RCUPREFIX=\u0026lt;rcu_schema_prefix\u0026gt; /u01/oracle/oracle_common/bin/rcu -silent -dropRepository -databaseType ORACLE -connectString $CONNECTION_STRING \\ -dbUser sys -dbRole sysdba -selectDependentsForComponents true -schemaPrefix $RCUPREFIX \\ -component MDS -component IAU -component IAU_APPEND -component IAU_VIEWER -component OPSS \\ -component WLS -component STB -component OIM -component SOAINFRA -component UCSUMS -f \u0026lt; /tmp/pwd.txt For example:\n$ kubectl exec -it helper -n oigns -- /bin/bash [oracle@helper ~]$ export CONNECTION_STRING=mydatabasehost.example.com:1521/orcl.example.com [oracle@helper ~]$ export RCUPREFIX=OIGK8S /u01/oracle/oracle_common/bin/rcu -silent -dropRepository -databaseType ORACLE -connectString $CONNECTION_STRING \\ -dbUser sys -dbRole sysdba -selectDependentsForComponents true -schemaPrefix $RCUPREFIX \\ -component MDS -component IAU -component IAU_APPEND -component IAU_VIEWER -component OPSS \\ -component WLS -component STB -component OIM -component SOAINFRA -component UCSUMS -f \u0026lt; /tmp/pwd.txt Delete the contents of the persistent volume:\n$ rm -rf \u0026lt;persistent_volume\u0026gt;/governancedomainpv/* For example:\n$ rm -rf /scratch/shared/governancedomainpv/* Delete the WebLogic Kubernetes Operator, by running the following command:\n$ helm delete weblogic-kubernetes-operator -n opns Delete the label from the OIG namespace:\n$ kubectl label namespaces \u0026lt;domain_namespace\u0026gt; weblogic-operator- For example:\n$ kubectl label namespaces oigns weblogic-operator- Delete the service account for the operator:\n$ kubectl delete serviceaccount \u0026lt;sample-kubernetes-operator-sa\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl delete serviceaccount op-sa -n opns Delete the operator namespace:\n$ kubectl delete namespace \u0026lt;sample-kubernetes-operator-ns\u0026gt; For example:\n$ kubectl delete namespace opns To delete NGINX:\n$ helm delete governancedomain-nginx-designconsole -n \u0026lt;domain_namespace\u0026gt; For example:\n$ helm delete governancedomain-nginx-designconsole -n oigns Then run:\n$ helm delete governancedomain-nginx -n \u0026lt;domain_namespace\u0026gt; For example:\n$ helm delete governancedomain-nginx -n oigns Then run:\n$ helm delete nginx-ingress -n \u0026lt;domain_namespace\u0026gt; For example:\n$ helm delete nginx-ingress -n nginxssl Then delete the NGINX namespace:\n$ kubectl delete namespace \u0026lt;namespace\u0026gt; For example:\n$ kubectl delete namespace nginxssl Delete the OIG namespace:\n$ kubectl delete namespace \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl delete namespace oigns " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oam/", + "title": "Oracle Access Management", + "tags": [], + "description": "The WebLogic Kubernetes Operator supports deployment of Oracle Access Management (OAM).", + "content": "Oracle Access Management on Kubernetes Oracle supports the deployment of Oracle Access Management on Kubernetes. See the following sections:\n Introduction Release Notes Prerequisites Prepare your environment Create OAM domains Configure an Ingress for an OAM domain Validate Domain URLs Post Install Configuration Validate a Basic SSO Flow using WebGate Registration Manage OAM Domains Create or update an image Patch and Upgrade Troubleshooting " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oig/", + "title": "Oracle Identity Governance", + "tags": [], + "description": "The WebLogic Kubernetes Operator supports deployment of Oracle Identity Governance (OIG).", + "content": "Oracle Identity Governance on Kubernetes Oracle supports the deployment of Oracle Identity Governance on Kubernetes. See the following sections:\n Introduction Release Notes Prerequisites Prepare your environment Create OIG domains Configure an ingress for an OIG domain Validate domain URLs Post install configuration Configure Design Console Manage OIG domains Create or update an image Patch and upgrade Troubleshooting " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oid/", + "title": "Oracle Internet Directory", + "tags": [], + "description": "Oracle Internet Directory provides a comprehensive Directory Solution for robust Identity Management", + "content": "As of July 2022, container support has been removed for Oracle Internet Directory. Refer to document ID 2723908.1 on My Oracle Support for more details.\nDocumentation for earlier releases To view documentation for previous releases, see:\n Version 22.2.1 Version 21.4.2 " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oud/", + "title": "Oracle Unified Directory", + "tags": [], + "description": "Oracle Unified Directory provides a comprehensive Directory Solution for robust Identity Management", + "content": "Oracle Unified Directory on Kubernetes Oracle supports the deployment of Oracle Unified Directory on Kubernetes. See the following sections:\n Introduction Release Notes Prerequisites Prepare Your Environment Create Oracle Unified Directory Instances Configure an Ingress for OUD Manage Oracle Unified Directory Containers Create or update an image Patch and Upgrade Troubleshooting " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/idm-products/oudsm/", + "title": "Oracle Unified Directory Services Manager", + "tags": [], + "description": "Oracle Unified Directory Services Manager provides an interface for managing instances of Oracle Unified Directory", + "content": "Oracle Unified Directory Services Manager on Kubernetes Oracle supports the deployment of Oracle Unified Directory Services Manager on Kubernetes. See the following sections:\n Introduction Release Notes Prerequisites Prepare Your Environment Create Oracle Unified Directory Services Manager Instances Configure an Ingress for OUDSM Manage Oracle Unified Directory Services Manager Containers Create or update an image Patch and upgrade Troubleshooting " +}, +{ + "uri": "/fmw-kubernetes/23.4.1/tags/", + "title": "Tags", + "tags": [], + "description": "", + "content": "" +}] \ No newline at end of file diff --git a/docs/23.4.1/index.xml b/docs/23.4.1/index.xml new file mode 100644 index 000000000..ffac4a161 --- /dev/null +++ b/docs/23.4.1/index.xml @@ -0,0 +1,479 @@ + + + + Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/23.4.1/ + Recent content on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + Thu, 18 Apr 2019 06:46:23 -0500 + + + + + + a. Using Design Console with NGINX(non-SSL) + /fmw-kubernetes/23.4.1/idm-products/oig/configure-design-console/using-the-design-console-with-nginx-non-ssl/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oig/configure-design-console/using-the-design-console-with-nginx-non-ssl/ + Configure an NGINX ingress (non-SSL) to allow Design Console to connect to your Kubernetes cluster. + Prerequisites + Setup routing rules for the Design Console ingress + Create the ingress + Update the T3 channel + Restart the OIG Managed Server + Design Console client +a. Using an on-premises installed Design Console +b. Using a container image for Design Console + Login to the Design Console + + + + Release Notes + /fmw-kubernetes/23.4.1/idm-products/oid/release-notes/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oid/release-notes/ + Review the latest changes and known issues for Oracle Internet Directory on Kubernetes. +Recent changes Date Version Change July, 2022 22.3.1 As of July 2022, Container support has been removed for Oracle Internet Directory. Refer to document ID 2723908.1 on My Oracle Support for more details. April, 2022 22.2.1 Updated for CRI-O support. October, 2021 21.4.1 Initial release of Oracle Identity Directory on Kubernetes. + + + + b. Using Design Console with NGINX(SSL) + /fmw-kubernetes/23.4.1/idm-products/oig/configure-design-console/using-the-design-console-with-nginx-ssl/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oig/configure-design-console/using-the-design-console-with-nginx-ssl/ + Configure an NGINX ingress (SSL) to allow Design Console to connect to your Kubernetes cluster. + Prerequisites + Setup routing rules for the Design Console ingress + Create the ingress + Update the T3 channel + Restart the OIG Managed Server + Design Console client +a. Using an on-premises installed Design Console +b. Using a container image for Design Console + Login to the Design Console + + + + Patch and Upgrade + /fmw-kubernetes/23.4.1/idm-products/oud/patch-and-upgrade/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oud/patch-and-upgrade/ + In this section you learn how to upgrade OUD from a previous version. Follow the section relevant to the version you are upgrading from. + Upgrading to October 23 (23.4.1) from April 23 (23.2.1) or later Upgrading to October 23 (23.4.1) from October 22 (22.4.1) or January 23 (23.1.1) Upgrading to October 23 (23.4.1) from July 22 (22.3.1) Upgrading to October 23 (23.4.1) from releases prior to July 22 (22.3.1) Upgrading Elasticsearch and Kibana Note: If on July 22 (22. + + + + a) Scaling Up/Down OUD Pods + /fmw-kubernetes/23.4.1/idm-products/oud/manage-oud-containers/scaling-up-down/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oud/manage-oud-containers/scaling-up-down/ + Introduction This section describes how to increase or decrease the number of OUD pods in the Kubernetes deployment. +Note: The instructions below are for scaling servers up or down manually. If you wish to use autoscaling, see Kubernetes Horizontal Pod Autoscaler. Please note, if you have enabled autoscaling, it is recommended to delete the autoscaler before running the commands below. +View existing OUD pods By default the oud-ds-rs helm chart deployment starts three pods: oud-ds-rs-0 and two replica pods oud-ds-rs-1 and oud-ds-rs-2. + + + + a) Scaling Up/Down OUDSM Pods + /fmw-kubernetes/23.4.1/idm-products/oudsm/manage-oudsm-containers/scaling-up-down/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oudsm/manage-oudsm-containers/scaling-up-down/ + Introduction This section describes how to increase or decrease the number of OUDSM pods in the Kubernetes deployment. +View existing OUDSM pods By default the oudsm helm chart deployment starts one pod: oudsm-1. +The number of pods started is determined by the replicaCount, which is set to 1 by default. A value of 1 starts the pod above. +To scale up or down the number of OUDSM pods, set replicaCount accordingly. + + + + a. Domain Life Cycle + /fmw-kubernetes/23.4.1/idm-products/oam/manage-oam-domains/domain-lifecycle/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oam/manage-oam-domains/domain-lifecycle/ + View existing OAM servers Starting/Scaling up OAM Managed servers Stopping/Scaling down OAM Managed servers Starting/Scaling up OAM Policy Managed servers Stopping/Scaling down OAM Policy Managed servers Stopping and starting the Administration Server and Managed Servers Domain lifecycle sample scripts As OAM domains use the WebLogic Kubernetes Operator, domain lifecyle operations are managed using the WebLogic Kubernetes Operator itself. +This document shows the basic operations for starting, stopping and scaling servers in the OAM domain. + + + + a. Domain life cycle + /fmw-kubernetes/23.4.1/idm-products/oig/manage-oig-domains/domain-lifecycle/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oig/manage-oig-domains/domain-lifecycle/ + View existing OIG servers Starting/Scaling up OIG Managed servers Stopping/Scaling down OIG Managed servers Stopping and starting the Administration Server and Managed Servers Domain lifecycle sample scripts As OIG domains use the WebLogic Kubernetes Operator, domain lifecyle operations are managed using the WebLogic Kubernetes Operator itself. +This document shows the basic operations for starting, stopping and scaling servers in the OIG domain. +For more detailed information refer to Domain Life Cycle in the WebLogic Kubernetes Operator documentation. + + + + a. Patch an image + /fmw-kubernetes/23.4.1/idm-products/oudsm/patch-and-upgrade/patch-an-oudsm-image/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oudsm/patch-and-upgrade/patch-an-oudsm-image/ + Introduction In this section the Oracle Unified Directory Services Manager (OUDSM) deployment is updated with a new OUDSM container image. +Note: If you are not using Oracle Container Registry or your own container registry, then you must first load the new container image on all nodes in your Kubernetes cluster. +You can update the deployment with a new OUDSM container image using one of the following methods: + Using a YAML file Using --set argument Using a YAML file Navigate to the $WORKDIR/kubernetes/helm directory: + + + + a. Post Install Tasks + /fmw-kubernetes/23.4.1/idm-products/oig/post-install-config/set_oimfronendurl_using_mbeans/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oig/post-install-config/set_oimfronendurl_using_mbeans/ + Follow these post install configuration steps. + Create a Server Overrides File Set OIMFrontendURL using MBeans Create a Server Overrides File Navigate to the following directory: +cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv/output/weblogic-domains/governancedomain Create a setUserOverrides.sh with the following contents: +DERBY_FLAG=false JAVA_OPTIONS=&quot;${JAVA_OPTIONS} -Djava.net.preferIPv4Stack=true&quot; MEM_ARGS=&quot;-Xms8192m -Xmx8192m&quot; Copy the setUserOverrides.sh file to the Administration Server pod: +$ chmod 755 setUserOverrides.sh $ kubectl cp setUserOverrides.sh oigns/governancedomain-adminserver:/u01/oracle/user_projects/domains/governancedomain/bin/setUserOverrides.sh Where oigns is the OIG namespace and governancedomain is the domain_UID. + + + + a. Upgrade an operator release + /fmw-kubernetes/23.4.1/idm-products/oam/patch-and-upgrade/upgrade-an-operator-release/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oam/patch-and-upgrade/upgrade-an-operator-release/ + These instructions apply to upgrading operators from 3.X.X to 4.X, or from within the 4.X release family as additional versions are released. + On the master node, download the new WebLogic Kubernetes Operator source code from the operator github project: +$ mkdir &lt;workdir&gt;/weblogic-kubernetes-operator-4.X.X $ cd &lt;workdir&gt;/weblogic-kubernetes-operator-4.X.X $ git clone https://github.com/oracle/weblogic-kubernetes-operator.git --branch v4.X.X For example: +$ mkdir /scratch/OAMK8S/weblogic-kubernetes-operator-4.X.X $ cd /scratch/OAMK8S/weblogic-kubernetes-operator-4.X.X $ git clone https://github.com/oracle/weblogic-kubernetes-operator.git --branch v4.X.X This will create the directory &lt;workdir&gt;/weblogic-kubernetes-operator-4. + + + + a. Upgrade an operator release + /fmw-kubernetes/23.4.1/idm-products/oig/patch-and-upgrade/upgrade-an-operator-release/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oig/patch-and-upgrade/upgrade-an-operator-release/ + These instructions apply to upgrading operators from 3.X.X to 4.X, or from within the 4.x release family as additional versions are released. + On the master node, download the new WebLogic Kubernetes Operator source code from the operator github project: +$ mkdir &lt;workdir&gt;/weblogic-kubernetes-operator-4.X.X $ cd &lt;workdir&gt;/weblogic-kubernetes-operator-4.X.X $ git clone https://github.com/oracle/weblogic-kubernetes-operator.git --branch v4.X.X For example: +$ mkdir /scratch/OIGK8S/weblogic-kubernetes-operator-4.X.X $ cd /scratch/OIGK8S/weblogic-kubernetes-operator-4.X.X $ git clone https://github.com/oracle/weblogic-kubernetes-operator.git --branch v4.X.X This will create the directory &lt;workdir&gt;/weblogic-kubernetes-operator-4. + + + + a. Using an Ingress with NGINX (non-SSL) + /fmw-kubernetes/23.4.1/idm-products/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-k8s/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-k8s/ + Setting up an ingress for NGINX for the OIG domain on Kubernetes (non-SSL) The instructions below explain how to set up NGINX as an ingress for the OIG domain with non-SSL termination. +Note: All the steps below should be performed on the master node. + Install NGINX +a. Configure the repository +b. Create a namespace +c. Install NGINX using helm +d. Setup routing rules for the domain + Create an ingress for the domain + + + + b) Logging and Visualization for Helm Chart oud-ds-rs Deployment + /fmw-kubernetes/23.4.1/idm-products/oud/manage-oud-containers/logging-and-visualization/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oud/manage-oud-containers/logging-and-visualization/ + Introduction Install Elasticsearch and Kibana Create a Kubernetes secret Enable Logstash Upgrade OUD deployment with ELK configuration Verify the pods Verify and access the Kibana console Introduction This section describes how to install and configure logging and visualization for the oud-ds-rs Helm chart deployment. +The ELK stack consists of Elasticsearch, Logstash, and Kibana. Using ELK you can gain insights in real-time from the log data from your applications. + + + + b) Logging and Visualization for Helm Chart oudsm Deployment + /fmw-kubernetes/23.4.1/idm-products/oudsm/manage-oudsm-containers/logging-and-visualization/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oudsm/manage-oudsm-containers/logging-and-visualization/ + Introduction This section describes how to install and configure logging and visualization for the oudsm Helm chart deployment. +The ELK stack consists of Elasticsearch, Logstash, and Kibana. Using ELK you can gain insights in real-time from the log data from your applications. + Elasticsearch is a distributed, RESTful search and analytics engine capable of solving a growing number of use cases. As the heart of the Elastic Stack, it centrally stores your data so you can discover the expected and uncover the unexpected. + + + + b. Install and configure connectors + /fmw-kubernetes/23.4.1/idm-products/oig/post-install-config/install_and_configure_connectors/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oig/post-install-config/install_and_configure_connectors/ + Download the connector Download the Connector you are interested in from Oracle Identity Manager Connector Downloads. + Copy the connector zip file to a staging directory on the master node e.g. &lt;workdir&gt;/stage and unzip it: +$ cp $HOME/Downloads/&lt;connector&gt;.zip &lt;workdir&gt;/&lt;stage&gt;/ $ cd &lt;workdir&gt;/&lt;stage&gt; $ unzip &lt;connector&gt;.zip $ chmod -R 755 * For example: +$ cp $HOME/Downloads/Exchange-12.2.1.3.0.zip /scratch/OIGK8S/stage/ $ cd /scratch/OIGK8S/stage/ $ unzip exchange-12.2.1.3.0.zip $ chmod -R 755 * Copy OIG connectors There are two options to copy OIG Connectors to your Kubernetes cluster: + + + + b. Patch an image + /fmw-kubernetes/23.4.1/idm-products/oam/patch-and-upgrade/patch-an-image/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oam/patch-and-upgrade/patch-an-image/ + Choose one of the following options to update your OAM kubernetes cluster to use the new image: + Run the kubectl edit domain command Run the kubectl patch domain command In all of the above cases, the WebLogic Kubernetes Operator will restart the Administration Server pod first and then perform a rolling restart on the OAM Managed Servers. +Note: If you are not using Oracle Container Registry or your own container registry, then you must first load the new container image on all nodes in your Kubernetes cluster. + + + + b. Patch an image + /fmw-kubernetes/23.4.1/idm-products/oig/patch-and-upgrade/patch-an-image/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oig/patch-and-upgrade/patch-an-image/ + Introduction The OIG domain patching script automatically performs the update of your OIG Kubernetes cluster with a new OIG container image. +Note: Before following the steps below, you must have upgraded to WebLogic Kubernetes Operator 4.1.2. +The script executes the following steps sequentially: + Checks if the helper pod exists in the given namespace. If yes, then it deletes the helper pod. Brings up a new helper pod with the new image. + + + + b. Upgrade Elasticsearch and Kibana + /fmw-kubernetes/23.4.1/idm-products/oudsm/patch-and-upgrade/upgrade-elk/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oudsm/patch-and-upgrade/upgrade-elk/ + This section shows how to upgrade Elasticsearch and Kibana. +To determine if this step is required for the version you are upgrading from, refer to the Release Notes. +Download the latest code repository Download the latest code repository as follows: + Create a working directory to setup the source code. +$ mkdir &lt;workdir&gt; For example: +$ mkdir /scratch/OUDSMK8SOctober23 Download the latest OUDSM deployment scripts from the OUDSM repository. + + + + b. Using an Ingress with NGINX (SSL) + /fmw-kubernetes/23.4.1/idm-products/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-k8s-ssl/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-k8s-ssl/ + Setting up an ingress for NGINX for the OIG domain on Kubernetes The instructions below explain how to set up NGINX as an ingress for the OIG domain with SSL termination. +Note: All the steps below should be performed on the master node. + Create a SSL certificate +a. Generate SSL certificate +b. Create a Kubernetes secret for SSL + Install NGINX +a. Configure the repository +b. Create a namespace + + + + b. WLST Administration Operations + /fmw-kubernetes/23.4.1/idm-products/oam/manage-oam-domains/wlst-admin-operations/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oam/manage-oam-domains/wlst-admin-operations/ + To use WLST to administer the OAM domain, use the helper pod in the same Kubernetes cluster as the OAM Domain. + Check to see if the helper pod exists by running: +$ kubectl get pods -n &lt;domain_namespace&gt; | grep helper For example: +$ kubectl get pods -n oamns | grep helper The output should look similar to the following: +helper 1/1 Running 0 26h If the helper pod doesn&rsquo;t exist then see Step 1 in Prepare your environment to create it. + + + + b. WLST administration operations + /fmw-kubernetes/23.4.1/idm-products/oig/manage-oig-domains/wlst-admin-operations/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oig/manage-oig-domains/wlst-admin-operations/ + Invoke WLST and access Administration Server To use WLST to administer the OIG domain, use a helper pod in the same Kubernetes cluster as the OIG Domain. + Check to see if the helper pod exists by running: +$ kubectl get pods -n &lt;domain_namespace&gt; | grep helper For example: +$ kubectl get pods -n oigns | grep helper The output should look similar to the following: +helper 1/1 Running 0 26h If the helper pod doesn&rsquo;t exist then see Step 1 in Prepare your environment to create it. + + + + c) Monitoring an Oracle Unified Directory Instance + /fmw-kubernetes/23.4.1/idm-products/oud/manage-oud-containers/monitoring-oud-instance/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oud/manage-oud-containers/monitoring-oud-instance/ + Introduction Install Prometheus and Grafana Create a Kubernetes namespace Add Prometheus and Grafana Helm repositories Install the Prometheus operator View Prometheus and Grafana objects created Add the NodePort Verify using Grafana GUI Introduction After the Oracle Unified Directory instance (OUD) is set up you can monitor it using Prometheus and Grafana. +Install Prometheus and Grafana Create a Kubernetes namespace Create a Kubernetes namespace to provide a scope for Prometheus and Grafana objects such as pods and services that you create in the environment. + + + + c) Monitoring an Oracle Unified Directory Services Manager Instance + /fmw-kubernetes/23.4.1/idm-products/oudsm/manage-oudsm-containers/monitoring-oudsm-instance/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oudsm/manage-oudsm-containers/monitoring-oudsm-instance/ + Introduction Install Prometheus and Grafana Create a Kubernetes namespace Add Prometheus and Grafana Helm repositories Install the Prometheus operator View Prometheus and Grafana Objects Created Add the NodePort Verify Using Grafana GUI Introduction After the Oracle Unified Directory Services Manager instance is set up you can monitor it using Prometheus and Grafana. +Install Prometheus and Grafana Create a Kubernetes namespace Create a Kubernetes namespace to provide a scope for Prometheus and Grafana objects such as pods and services that you create in the environment. + + + + c. Logging and Visualization + /fmw-kubernetes/23.4.1/idm-products/oam/manage-oam-domains/logging-and-visualization/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oam/manage-oam-domains/logging-and-visualization/ + After the OAM domain is set up you can publish operator and WebLogic Server logs into Elasticsearch and interact with them in Kibana. +Install Elasticsearch stack and Kibana If you do not already have a centralized Elasticsearch (ELK) stack then you must configure this first. For details on how to configure the ELK stack, follow Installing Elasticsearch (ELK) Stack and Kibana +Create the logstash pod Variables used in this chapter In order to create the logstash pod, you must create several files. + + + + c. Runnning OIG utilities + /fmw-kubernetes/23.4.1/idm-products/oig/manage-oig-domains/running-oig-utilities/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oig/manage-oig-domains/running-oig-utilities/ + Run OIG utlities inside the OIG Kubernetes cluster. +Run utilities in an interactive bash shell Access a bash shell inside the &lt;domain_uid&gt;-oim-server1 pod: +$ kubectl -n oigns exec -it &lt;domain_uid&gt;-oim-server1 -- bash For example: +$ kubectl -n oigns exec -it governancedomain-oim-server1 -- bash This will take you into a bash shell in the running &lt;domain_uid&gt;-oim-server1 pod: +[oracle@governancedomain-oim-server1 oracle]$ Navigate to the /u01/oracle/idm/server/bin directory and execute the utility as required. + + + + c. Upgrade Ingress + /fmw-kubernetes/23.4.1/idm-products/oam/patch-and-upgrade/upgrade-an-ingress/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oam/patch-and-upgrade/upgrade-an-ingress/ + This section shows how to upgrade the ingress. +To determine if this step is required for the version you are upgrading to, refer to the Release Notes. +Download the latest code repository Download the latest code repository as follows: + Create a working directory to setup the source code. +$ mkdir &lt;workdir&gt; For example: +$ mkdir /scratch/OAMK8Slatest Download the latest OAM deployment scripts from the OAM repository. + + + + c. Upgrade Ingress + /fmw-kubernetes/23.4.1/idm-products/oig/patch-and-upgrade/upgrade-an-ingress/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oig/patch-and-upgrade/upgrade-an-ingress/ + This section shows how to upgrade the ingress. +To determine if this step is required for the version you are upgrading to, refer to the Release Notes. +Upgrading the ingress To upgrade the existing ingress rules, follow the steps below: + List the existing ingress: +$ helm list -n &lt;domain_namespace&gt; For example: +$ helm list -n oigns The output will look similar to the following: +NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION governancedomain-nginx oigns 1 &lt;DATE&gt; deployed ingress-per-domain-0. + + + + d. Kubernetes Horizontal Pod Autoscaler + /fmw-kubernetes/23.4.1/idm-products/oud/manage-oud-containers/hpa/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oud/manage-oud-containers/hpa/ + Prerequisite configuration Deploy the Kubernetes Metrics Server Troubleshooting Deploy HPA Testing HPA Delete the HPA Other considerations Kubernetes Horizontal Pod Autoscaler (HPA) allows automatic scaling (up and down) of the OUD servers. If load increases then extra OUD servers will be started as required. Similarly, if load decreases, OUD servers will be automatically shutdown. +For more information on HPA, see Horizontal Pod Autoscaling. +The instructions below show you how to configure and run an HPA to scale OUD servers, based on CPU utilization or memory resource metrics. + + + + d. Logging and visualization + /fmw-kubernetes/23.4.1/idm-products/oig/manage-oig-domains/logging-and-visualization/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oig/manage-oig-domains/logging-and-visualization/ + After the OIG domain is set up you can publish operator and WebLogic Server logs into Elasticsearch and interact with them in Kibana. +Install Elasticsearch and Kibana If you do not already have a centralized Elasticsearch (ELK) stack then you must configure this first. For details on how to configure the ELK stack, follow Installing Elasticsearch (ELK) Stack and Kibana +Create the logstash pod Variables used in this chapter In order to create the logstash pod, you must create several files. + + + + d. Monitoring an OAM domain + /fmw-kubernetes/23.4.1/idm-products/oam/manage-oam-domains/monitoring-oam-domains/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oam/manage-oam-domains/monitoring-oam-domains/ + After the OAM domain is set up you can monitor the OAM instance using Prometheus and Grafana. See Monitoring a domain. +The WebLogic Monitoring Exporter uses the WLS RESTful Management API to scrape runtime information and then exports Prometheus-compatible metrics. It is deployed as a web application in a WebLogic Server (WLS) instance, version 12.2.1 or later, typically, in the instance from which you want to get metrics. +There are two ways to setup monitoring and you should choose one method or the other: + + + + d. Upgrade Elasticsearch and Kibana + /fmw-kubernetes/23.4.1/idm-products/oam/patch-and-upgrade/upgrade-elk/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oam/patch-and-upgrade/upgrade-elk/ + This section shows how to upgrade Elasticsearch and Kibana. +To determine if this step is required for the version you are upgrading to, refer to the Release Notes. +Undeploy Elasticsearch and Kibana From October 22 (22.4.1) onwards, OAM logs should be stored on a centralized Elasticsearch and Kibana stack. +Deployments prior to October 22 (22.4.1) used local deployments of Elasticsearch and Kibana. +If you are upgrading from July 22 (22.3.1) or earlier, to October 22 (22. + + + + d. Upgrade Elasticsearch and Kibana + /fmw-kubernetes/23.4.1/idm-products/oig/patch-and-upgrade/upgrade-elk/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oig/patch-and-upgrade/upgrade-elk/ + This section shows how to upgrade Elasticsearch and Kibana. +To determine if this step is required for the version you are upgrading to, refer to the Release Notes. +Download the latest code repository Make sure you have downloaded the latest code as per Download the latest code repository. Undeploy Elasticsearch and Kibana From October 22 (22.4.1) onwards, OIG logs should be stored on a centralized Elasticsearch and Kibana stack. + + + + e. Kubernetes Horizontal Pod Autoscaler + /fmw-kubernetes/23.4.1/idm-products/oam/manage-oam-domains/hpa/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oam/manage-oam-domains/hpa/ + Prerequisite configuration Deploy the Kubernetes Metrics Server Troubleshooting Deploy HPA Testing HPA Delete the HPA Other considerations Kubernetes Horizontal Pod Autoscaler (HPA) is supported from Weblogic Kubernetes Operator 4.0.X and later. +HPA allows automatic scaling (up and down) of the OAM Managed Servers. If load increases then extra OAM Managed Servers will be started as required, up to the value configuredManagedServerCount defined when the domain was created (see Prepare the create domain script). + + + + e. Monitoring an OIG domain + /fmw-kubernetes/23.4.1/idm-products/oig/manage-oig-domains/monitoring-oim-domains/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oig/manage-oig-domains/monitoring-oim-domains/ + After the OIG domain is set up you can monitor the OIG instance using Prometheus and Grafana. See Monitoring a domain. +The WebLogic Monitoring Exporter uses the WLS RESTful Management API to scrape runtime information and then exports Prometheus-compatible metrics. It is deployed as a web application in a WebLogic Server (WLS) instance, version 12.2.1 or later, typically, in the instance from which you want to get metrics. +There are two ways to setup monitoring and you should choose one method or the other: + + + + f. Delete the OAM domain home + /fmw-kubernetes/23.4.1/idm-products/oam/manage-oam-domains/delete-domain-home/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oam/manage-oam-domains/delete-domain-home/ + Sometimes in production, but most likely in testing environments, you might want to remove the domain home that is generated using the create-domain.sh script. + Run the following command to delete the domain: +$ cd $WORKDIR/kubernetes/delete-domain $ ./delete-weblogic-domain-resources.sh -d &lt;domain_uid&gt; For example: +$ cd $WORKDIR/kubernetes/delete-domain $ ./delete-weblogic-domain-resources.sh -d accessdomain Drop the RCU schemas as follows: +$ kubectl exec -it helper -n &lt;domain_namespace&gt; -- /bin/bash [oracle@helper ~]$ [oracle@helper ~]$ export CONNECTION_STRING=&lt;db_host. + + + + f. Kubernetes Horizontal Pod Autoscaler + /fmw-kubernetes/23.4.1/idm-products/oig/manage-oig-domains/hpa/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oig/manage-oig-domains/hpa/ + Prerequisite configuration Deploy the Kubernetes Metrics Server Troubleshooting Deploy HPA Testing HPA Delete the HPA Other considerations Kubernetes Horizontal Pod Autoscaler (HPA) is supported from Weblogic Kubernetes Operator 4.0.X and later. +HPA allows automatic scaling (up and down) of the OIG Managed Servers. If load increases then extra OIG Managed Servers will be started as required, up to the value configuredManagedServerCount defined when the domain was created (see Prepare the create domain script). + + + + g. Delete the OIG domain home + /fmw-kubernetes/23.4.1/idm-products/oig/manage-oig-domains/delete-domain-home/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/23.4.1/idm-products/oig/manage-oig-domains/delete-domain-home/ + Sometimes in production, but most likely in testing environments, you might want to remove the domain home that is generated using the create-domain.sh script. + Run the following command to delete the domain: +$ cd $WORKDIR/kubernetes/delete-domain $ ./delete-weblogic-domain-resources.sh -d &lt;domain_uid&gt; For example: +$ cd $WORKDIR/kubernetes/delete-domain $ ./delete-weblogic-domain-resources.sh -d governancedomain Drop the RCU schemas as follows: +$ kubectl exec -it helper -n &lt;domain_namespace&gt; -- /bin/bash [oracle@helper ~]$ [oracle@helper ~]$ export CONNECTION_STRING=&lt;db_host. + + + + \ No newline at end of file diff --git a/docs/23.4.1/js/auto-complete.js b/docs/23.4.1/js/auto-complete.js new file mode 100644 index 000000000..7fbde995e --- /dev/null +++ b/docs/23.4.1/js/auto-complete.js @@ -0,0 +1,223 @@ +/* + JavaScript autoComplete v1.0.4 + Copyright (c) 2014 Simon Steinberger / Pixabay + GitHub: https://github.com/Pixabay/JavaScript-autoComplete + License: http://www.opensource.org/licenses/mit-license.php +*/ + +var autoComplete = (function(){ + // "use strict"; + function autoComplete(options){ + if (!document.querySelector) return; + + // helpers + function hasClass(el, className){ return el.classList ? el.classList.contains(className) : new RegExp('\\b'+ className+'\\b').test(el.className); } + + function addEvent(el, type, handler){ + if (el.attachEvent) el.attachEvent('on'+type, handler); else el.addEventListener(type, handler); + } + function removeEvent(el, type, handler){ + // if (el.removeEventListener) not working in IE11 + if (el.detachEvent) el.detachEvent('on'+type, handler); else el.removeEventListener(type, handler); + } + function live(elClass, event, cb, context){ + addEvent(context || document, event, function(e){ + var found, el = e.target || e.srcElement; + while (el && !(found = hasClass(el, elClass))) el = el.parentElement; + if (found) cb.call(el, e); + }); + } + + var o = { + selector: 0, + source: 0, + minChars: 3, + delay: 150, + offsetLeft: 0, + offsetTop: 1, + cache: 1, + menuClass: '', + renderItem: function (item, search){ + // escape special characters + search = search.replace(/[-\/\\^$*+?.()|[\]{}]/g, '\\$&'); + var re = new RegExp("(" + search.split(' ').join('|') + ")", "gi"); + return '
' + item.replace(re, "$1") + '
'; + }, + onSelect: function(e, term, item){} + }; + for (var k in options) { if (options.hasOwnProperty(k)) o[k] = options[k]; } + + // init + var elems = typeof o.selector == 'object' ? [o.selector] : document.querySelectorAll(o.selector); + for (var i=0; i 0) + that.sc.scrollTop = selTop + that.sc.suggestionHeight + scrTop - that.sc.maxHeight; + else if (selTop < 0) + that.sc.scrollTop = selTop + scrTop; + } + } + } + addEvent(window, 'resize', that.updateSC); + document.body.appendChild(that.sc); + + live('autocomplete-suggestion', 'mouseleave', function(e){ + var sel = that.sc.querySelector('.autocomplete-suggestion.selected'); + if (sel) setTimeout(function(){ sel.className = sel.className.replace('selected', ''); }, 20); + }, that.sc); + + live('autocomplete-suggestion', 'mouseover', function(e){ + var sel = that.sc.querySelector('.autocomplete-suggestion.selected'); + if (sel) sel.className = sel.className.replace('selected', ''); + this.className += ' selected'; + }, that.sc); + + live('autocomplete-suggestion', 'mousedown', function(e){ + if (hasClass(this, 'autocomplete-suggestion')) { // else outside click + var v = this.getAttribute('data-val'); + that.value = v; + o.onSelect(e, v, this); + that.sc.style.display = 'none'; + } + }, that.sc); + + that.blurHandler = function(){ + try { var over_sb = document.querySelector('.autocomplete-suggestions:hover'); } catch(e){ var over_sb = 0; } + if (!over_sb) { + that.last_val = that.value; + that.sc.style.display = 'none'; + setTimeout(function(){ that.sc.style.display = 'none'; }, 350); // hide suggestions on fast input + } else if (that !== document.activeElement) setTimeout(function(){ that.focus(); }, 20); + }; + addEvent(that, 'blur', that.blurHandler); + + var suggest = function(data){ + var val = that.value; + that.cache[val] = data; + if (data.length && val.length >= o.minChars) { + var s = ''; + for (var i=0;i 40) && key != 13 && key != 27) { + var val = that.value; + if (val.length >= o.minChars) { + if (val != that.last_val) { + that.last_val = val; + clearTimeout(that.timer); + if (o.cache) { + if (val in that.cache) { suggest(that.cache[val]); return; } + // no requests if previous suggestions were empty + for (var i=1; i https://github.com/noelboss/featherlight/issues/317 +!function(u){"use strict";if(void 0!==u)if(u.fn.jquery.match(/-ajax/))"console"in window&&window.console.info("Featherlight needs regular jQuery, not the slim version.");else{var r=[],i=function(t){return r=u.grep(r,function(e){return e!==t&&0','
','",'
'+n.loading+"
","
",""].join("")),o="."+n.namespace+"-close"+(n.otherClose?","+n.otherClose:"");return n.$instance=i.clone().addClass(n.variant),n.$instance.on(n.closeTrigger+"."+n.namespace,function(e){if(!e.isDefaultPrevented()){var t=u(e.target);("background"===n.closeOnClick&&t.is("."+n.namespace)||"anywhere"===n.closeOnClick||t.closest(o).length)&&(n.close(e),e.preventDefault())}}),this},getContent:function(){if(!1!==this.persist&&this.$content)return this.$content;var t=this,e=this.constructor.contentFilters,n=function(e){return t.$currentTarget&&t.$currentTarget.attr(e)},r=n(t.targetAttr),i=t.target||r||"",o=e[t.type];if(!o&&i in e&&(o=e[i],i=t.target&&r),i=i||n("href")||"",!o)for(var a in e)t[a]&&(o=e[a],i=t[a]);if(!o){var s=i;if(i=null,u.each(t.contentFilters,function(){return(o=e[this]).test&&(i=o.test(s)),!i&&o.regex&&s.match&&s.match(o.regex)&&(i=s),!i}),!i)return"console"in window&&window.console.error("Featherlight: no content filter found "+(s?' for "'+s+'"':" (no target specified)")),!1}return o.process.call(t,i)},setContent:function(e){return this.$instance.removeClass(this.namespace+"-loading"),this.$instance.toggleClass(this.namespace+"-iframe",e.is("iframe")),this.$instance.find("."+this.namespace+"-inner").not(e).slice(1).remove().end().replaceWith(u.contains(this.$instance[0],e[0])?"":e),this.$content=e.addClass(this.namespace+"-inner"),this},open:function(t){var n=this;if(n.$instance.hide().appendTo(n.root),!(t&&t.isDefaultPrevented()||!1===n.beforeOpen(t))){t&&t.preventDefault();var e=n.getContent();if(e)return r.push(n),s(!0),n.$instance.fadeIn(n.openSpeed),n.beforeContent(t),u.when(e).always(function(e){n.setContent(e),n.afterContent(t)}).then(n.$instance.promise()).done(function(){n.afterOpen(t)})}return n.$instance.detach(),u.Deferred().reject().promise()},close:function(e){var t=this,n=u.Deferred();return!1===t.beforeClose(e)?n.reject():(0===i(t).length&&s(!1),t.$instance.fadeOut(t.closeSpeed,function(){t.$instance.detach(),t.afterClose(e),n.resolve()})),n.promise()},resize:function(e,t){if(e&&t&&(this.$content.css("width","").css("height",""),this.$content.parent().width()');return n.onload=function(){r.naturalWidth=n.width,r.naturalHeight=n.height,t.resolve(r)},n.onerror=function(){t.reject(r)},n.src=e,t.promise()}},html:{regex:/^\s*<[\w!][^<]*>/,process:function(e){return u(e)}},ajax:{regex:/./,process:function(e){var n=u.Deferred(),r=u("
").load(e,function(e,t){"error"!==t&&n.resolve(r.contents()),n.fail()});return n.promise()}},iframe:{process:function(e){var t=new u.Deferred,n=u("